[lxc-devel] [lxd/master] Moves containerLXC and instance interface to instance package

tomponline on Github lxc-bot at linuxcontainers.org
Wed Sep 25 11:18:04 UTC 2019


A non-text attachment was scrubbed...
Name: not available
Type: text/x-mailbox
Size: 978 bytes
Desc: not available
URL: <http://lists.linuxcontainers.org/pipermail/lxc-devel/attachments/20190925/9f26e1f5/attachment-0001.bin>
-------------- next part --------------
From f268ff3ad399c13164b5d74405cced3f28c891f2 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Wed, 25 Sep 2019 10:57:32 +0100
Subject: [PATCH 01/72] lxd/daemon: Adds daemon package

This package moves some daemon level settings and functions to a separate package so that other packages inside LXD can use them.

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/daemon/daemon_loglevel.go       |  4 +++
 lxd/daemon/daemon_share_mounts.go   | 45 +++++++++++++++++++++++++++++
 lxd/daemon/daemon_smart_response.go | 10 +++++++
 3 files changed, 59 insertions(+)
 create mode 100644 lxd/daemon/daemon_loglevel.go
 create mode 100644 lxd/daemon/daemon_share_mounts.go
 create mode 100644 lxd/daemon/daemon_smart_response.go

diff --git a/lxd/daemon/daemon_loglevel.go b/lxd/daemon/daemon_loglevel.go
new file mode 100644
index 0000000000..fda83ecccc
--- /dev/null
+++ b/lxd/daemon/daemon_loglevel.go
@@ -0,0 +1,4 @@
+package daemon
+
+var Debug bool
+var Verbose bool
diff --git a/lxd/daemon/daemon_share_mounts.go b/lxd/daemon/daemon_share_mounts.go
new file mode 100644
index 0000000000..3603ac7e5f
--- /dev/null
+++ b/lxd/daemon/daemon_share_mounts.go
@@ -0,0 +1,45 @@
+package daemon
+
+import (
+	"sync"
+
+	"golang.org/x/sys/unix"
+
+	"github.com/lxc/lxd/shared"
+)
+
+// have we setup shared mounts?
+var sharedMounted bool
+var sharedMountsLock sync.Mutex
+
+func SetupSharedMounts() error {
+	// Check if we already went through this
+	if sharedMounted {
+		return nil
+	}
+
+	// Get a lock to prevent races
+	sharedMountsLock.Lock()
+	defer sharedMountsLock.Unlock()
+
+	// Check if already setup
+	path := shared.VarPath("shmounts")
+	if shared.IsMountPoint(path) {
+		sharedMounted = true
+		return nil
+	}
+
+	// Mount a new tmpfs
+	if err := unix.Mount("tmpfs", path, "tmpfs", 0, "size=100k,mode=0711"); err != nil {
+		return err
+	}
+
+	// Mark as MS_SHARED and MS_REC
+	var flags uintptr = unix.MS_SHARED | unix.MS_REC
+	if err := unix.Mount(path, path, "none", flags, ""); err != nil {
+		return err
+	}
+
+	sharedMounted = true
+	return nil
+}
diff --git a/lxd/daemon/daemon_smart_response.go b/lxd/daemon/daemon_smart_response.go
new file mode 100644
index 0000000000..a8a3a47eaa
--- /dev/null
+++ b/lxd/daemon/daemon_smart_response.go
@@ -0,0 +1,10 @@
+package daemon
+
+import (
+	"net/http"
+)
+
+type Response interface {
+	Render(w http.ResponseWriter) error
+	String() string
+}

From 2a7dba817f6b094113af784cf4e770a22f610c23 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Wed, 25 Sep 2019 10:58:55 +0100
Subject: [PATCH 02/72] lxd/device/device/utils/cpu: Adds CPU rebalancing
 functions

Moves from main package so can be accessed by other packages.

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/device/device_utils_cpu.go | 83 ++++++++++++++++++++++++++++++++++
 1 file changed, 83 insertions(+)
 create mode 100644 lxd/device/device_utils_cpu.go

diff --git a/lxd/device/device_utils_cpu.go b/lxd/device/device_utils_cpu.go
new file mode 100644
index 0000000000..45843ff928
--- /dev/null
+++ b/lxd/device/device_utils_cpu.go
@@ -0,0 +1,83 @@
+package device
+
+import (
+	"fmt"
+	"strconv"
+	"strings"
+)
+
+// DeviceSchedRebalance channel for scheduling a CPU rebalance.
+var DeviceSchedRebalance = make(chan []string, 2)
+
+// TaskSchedulerTrigger triggers a CPU rebalance.
+func TaskSchedulerTrigger(srcType string, srcName string, srcStatus string) {
+	// Spawn a go routine which then triggers the scheduler
+	select {
+	case DeviceSchedRebalance <- []string{srcType, srcName, srcStatus}:
+	default:
+		// Channel is full, drop the event
+	}
+}
+
+// ParseCPU parses CPU allowances.
+func ParseCPU(cpuAllowance string, cpuPriority string) (string, string, string, error) {
+	var err error
+
+	// Parse priority
+	cpuShares := 0
+	cpuPriorityInt := 10
+	if cpuPriority != "" {
+		cpuPriorityInt, err = strconv.Atoi(cpuPriority)
+		if err != nil {
+			return "", "", "", err
+		}
+	}
+	cpuShares -= 10 - cpuPriorityInt
+
+	// Parse allowance
+	cpuCfsQuota := "-1"
+	cpuCfsPeriod := "100000"
+
+	if cpuAllowance != "" {
+		if strings.HasSuffix(cpuAllowance, "%") {
+			// Percentage based allocation
+			percent, err := strconv.Atoi(strings.TrimSuffix(cpuAllowance, "%"))
+			if err != nil {
+				return "", "", "", err
+			}
+
+			cpuShares += (10 * percent) + 24
+		} else {
+			// Time based allocation
+			fields := strings.SplitN(cpuAllowance, "/", 2)
+			if len(fields) != 2 {
+				return "", "", "", fmt.Errorf("Invalid allowance: %s", cpuAllowance)
+			}
+
+			quota, err := strconv.Atoi(strings.TrimSuffix(fields[0], "ms"))
+			if err != nil {
+				return "", "", "", err
+			}
+
+			period, err := strconv.Atoi(strings.TrimSuffix(fields[1], "ms"))
+			if err != nil {
+				return "", "", "", err
+			}
+
+			// Set limit in ms
+			cpuCfsQuota = fmt.Sprintf("%d", quota*1000)
+			cpuCfsPeriod = fmt.Sprintf("%d", period*1000)
+			cpuShares += 1024
+		}
+	} else {
+		// Default is 100%
+		cpuShares += 1024
+	}
+
+	// Deal with a potential negative score
+	if cpuShares < 0 {
+		cpuShares = 0
+	}
+
+	return fmt.Sprintf("%d", cpuShares), cpuCfsQuota, cpuCfsPeriod, nil
+}

From d4a903334d0c4892d75bedcd40474283b6838ab1 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Wed, 25 Sep 2019 10:59:26 +0100
Subject: [PATCH 03/72] lxd/operation: Adds operation package

Moves LXD operation functions out of main package so can be accessed by other packages.

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/operation/operation.go | 463 +++++++++++++++++++++++++++++++++++++
 1 file changed, 463 insertions(+)
 create mode 100644 lxd/operation/operation.go

diff --git a/lxd/operation/operation.go b/lxd/operation/operation.go
new file mode 100644
index 0000000000..0053492ff6
--- /dev/null
+++ b/lxd/operation/operation.go
@@ -0,0 +1,463 @@
+package operation
+
+import (
+	"fmt"
+	"net/http"
+	"sync"
+	"time"
+
+	"github.com/pborman/uuid"
+	"github.com/pkg/errors"
+
+	"github.com/lxc/lxd/lxd/daemon"
+	"github.com/lxc/lxd/lxd/db"
+	"github.com/lxc/lxd/shared"
+	"github.com/lxc/lxd/shared/api"
+	"github.com/lxc/lxd/shared/cancel"
+	"github.com/lxc/lxd/shared/logger"
+	"github.com/lxc/lxd/shared/version"
+)
+
+var EventSend func(project, eventType string, eventMessage interface{}) error
+var SmartError func(err error) daemon.Response
+
+type OperationClass int
+
+const (
+	OperationClassTask      OperationClass = 1
+	OperationClassWebsocket OperationClass = 2
+	OperationClassToken     OperationClass = 3
+)
+
+var OperationsLock sync.Mutex
+var Operations map[string]*Operation = make(map[string]*Operation)
+
+func (t OperationClass) String() string {
+	return map[OperationClass]string{
+		OperationClassTask:      "task",
+		OperationClassWebsocket: "websocket",
+		OperationClassToken:     "token",
+	}[t]
+}
+
+type Operation struct {
+	Project     string
+	ID          string
+	class       OperationClass
+	createdAt   time.Time
+	updatedAt   time.Time
+	Status      api.StatusCode
+	URL         string
+	Resources   map[string][]string
+	Metadata    map[string]interface{}
+	err         string
+	readonly    bool
+	Canceler    *cancel.Canceler
+	description string
+	Permission  string
+
+	// Those functions are called at various points in the operation lifecycle
+	onRun     func(*Operation) error
+	onCancel  func(*Operation) error
+	onConnect func(*Operation, *http.Request, http.ResponseWriter) error
+
+	// Channels used for error reporting and state tracking of background actions
+	chanDone chan error
+
+	// Locking for concurent access to the operation
+	lock sync.Mutex
+
+	cluster *db.Cluster
+}
+
+func OperationCreate(cluster *db.Cluster, project string, opClass OperationClass, opType db.OperationType, opResources map[string][]string, opMetadata interface{}, onRun func(*Operation) error, onCancel func(*Operation) error, onConnect func(*Operation, *http.Request, http.ResponseWriter) error) (*Operation, error) {
+	// Main attributes
+	op := Operation{}
+	op.Project = project
+	op.ID = uuid.NewRandom().String()
+	op.description = opType.Description()
+	op.Permission = opType.Permission()
+	op.class = opClass
+	op.createdAt = time.Now()
+	op.updatedAt = op.createdAt
+	op.Status = api.Pending
+	op.URL = fmt.Sprintf("/%s/operations/%s", version.APIVersion, op.ID)
+	op.Resources = opResources
+	op.chanDone = make(chan error)
+	op.cluster = cluster
+
+	newMetadata, err := shared.ParseMetadata(opMetadata)
+	if err != nil {
+		return nil, err
+	}
+	op.Metadata = newMetadata
+
+	// Callback functions
+	op.onRun = onRun
+	op.onCancel = onCancel
+	op.onConnect = onConnect
+
+	// Sanity check
+	if op.class != OperationClassWebsocket && op.onConnect != nil {
+		return nil, fmt.Errorf("Only websocket operations can have a Connect hook")
+	}
+
+	if op.class == OperationClassWebsocket && op.onConnect == nil {
+		return nil, fmt.Errorf("Websocket operations must have a Connect hook")
+	}
+
+	if op.class == OperationClassToken && op.onRun != nil {
+		return nil, fmt.Errorf("Token operations can't have a Run hook")
+	}
+
+	if op.class == OperationClassToken && op.onCancel != nil {
+		return nil, fmt.Errorf("Token operations can't have a Cancel hook")
+	}
+
+	OperationsLock.Lock()
+	Operations[op.ID] = &op
+	OperationsLock.Unlock()
+
+	err = op.cluster.Transaction(func(tx *db.ClusterTx) error {
+		_, err := tx.OperationAdd(project, op.ID, opType)
+		return err
+	})
+	if err != nil {
+		return nil, errors.Wrapf(err, "failed to add operation %s to database", op.ID)
+	}
+
+	logger.Debugf("New %s operation: %s", op.class.String(), op.ID)
+	_, md, _ := op.Render()
+	EventSend(op.Project, "operation", md)
+
+	return &op, nil
+}
+
+func OperationGetInternal(id string) (*Operation, error) {
+	OperationsLock.Lock()
+	op, ok := Operations[id]
+	OperationsLock.Unlock()
+
+	if !ok {
+		return nil, fmt.Errorf("Operation '%s' doesn't exist", id)
+	}
+
+	return op, nil
+}
+
+func (op *Operation) UpdateMetadata(opMetadata interface{}) error {
+	if op.Status != api.Pending && op.Status != api.Running {
+		return fmt.Errorf("Only pending or running operations can be updated")
+	}
+
+	if op.readonly {
+		return fmt.Errorf("Read-only operations can't be updated")
+	}
+
+	newMetadata, err := shared.ParseMetadata(opMetadata)
+	if err != nil {
+		return err
+	}
+
+	op.lock.Lock()
+	op.updatedAt = time.Now()
+	op.Metadata = newMetadata
+	op.lock.Unlock()
+
+	logger.Debugf("Updated metadata for %s operation: %s", op.class.String(), op.ID)
+	_, md, _ := op.Render()
+	EventSend(op.Project, "operation", md)
+
+	return nil
+}
+
+func (op *Operation) Render() (string, *api.Operation, error) {
+	// Setup the resource URLs
+	resources := op.Resources
+	if resources != nil {
+		tmpResources := make(map[string][]string)
+		for key, value := range resources {
+			var values []string
+			for _, c := range value {
+				values = append(values, fmt.Sprintf("/%s/%s/%s", version.APIVersion, key, c))
+			}
+			tmpResources[key] = values
+		}
+		resources = tmpResources
+	}
+
+	// Local server name
+	var err error
+	var serverName string
+	err = op.cluster.Transaction(func(tx *db.ClusterTx) error {
+		serverName, err = tx.NodeName()
+		return err
+	})
+	if err != nil {
+		return "", nil, err
+	}
+
+	return op.URL, &api.Operation{
+		ID:          op.ID,
+		Class:       op.class.String(),
+		Description: op.description,
+		CreatedAt:   op.createdAt,
+		UpdatedAt:   op.updatedAt,
+		Status:      op.Status.String(),
+		StatusCode:  op.Status,
+		Resources:   resources,
+		Metadata:    op.Metadata,
+		MayCancel:   op.mayCancel(),
+		Err:         op.err,
+		Location:    serverName,
+	}, nil
+}
+
+func (op *Operation) mayCancel() bool {
+	if op.class == OperationClassToken {
+		return true
+	}
+
+	if op.onCancel != nil {
+		return true
+	}
+
+	if op.Canceler != nil && op.Canceler.Cancelable() {
+		return true
+	}
+
+	return false
+}
+
+func (op *Operation) WaitFinal(timeout int) (bool, error) {
+	// Check current state
+	if op.Status.IsFinal() {
+		return true, nil
+	}
+
+	// Wait indefinitely
+	if timeout == -1 {
+		<-op.chanDone
+		return true, nil
+	}
+
+	// Wait until timeout
+	if timeout > 0 {
+		timer := time.NewTimer(time.Duration(timeout) * time.Second)
+		select {
+		case <-op.chanDone:
+			return true, nil
+
+		case <-timer.C:
+			return false, nil
+		}
+	}
+
+	return false, nil
+}
+
+func (op *Operation) UpdateResources(opResources map[string][]string) error {
+	if op.Status != api.Pending && op.Status != api.Running {
+		return fmt.Errorf("Only pending or running operations can be updated")
+	}
+
+	if op.readonly {
+		return fmt.Errorf("Read-only operations can't be updated")
+	}
+
+	op.lock.Lock()
+	op.updatedAt = time.Now()
+	op.Resources = opResources
+	op.lock.Unlock()
+
+	logger.Debugf("Updated resources for %s operation: %s", op.class.String(), op.ID)
+	_, md, _ := op.Render()
+	EventSend(op.Project, "operation", md)
+
+	return nil
+}
+
+func (op *Operation) done() {
+	if op.readonly {
+		return
+	}
+
+	op.lock.Lock()
+	op.readonly = true
+	op.onRun = nil
+	op.onCancel = nil
+	op.onConnect = nil
+	close(op.chanDone)
+	op.lock.Unlock()
+
+	time.AfterFunc(time.Second*5, func() {
+		OperationsLock.Lock()
+		_, ok := Operations[op.ID]
+		if !ok {
+			OperationsLock.Unlock()
+			return
+		}
+
+		delete(Operations, op.ID)
+		OperationsLock.Unlock()
+
+		err := op.cluster.Transaction(func(tx *db.ClusterTx) error {
+			return tx.OperationRemove(op.ID)
+		})
+		if err != nil {
+			logger.Warnf("Failed to delete operation %s: %s", op.ID, err)
+		}
+	})
+}
+
+func (op *Operation) Run() (chan error, error) {
+	if op.Status != api.Pending {
+		return nil, fmt.Errorf("Only pending operations can be started")
+	}
+
+	chanRun := make(chan error, 1)
+
+	op.lock.Lock()
+	op.Status = api.Running
+
+	if op.onRun != nil {
+		go func(op *Operation, chanRun chan error) {
+			err := op.onRun(op)
+			if err != nil {
+				op.lock.Lock()
+				op.Status = api.Failure
+				op.err = SmartError(err).String()
+				op.lock.Unlock()
+				op.done()
+				chanRun <- err
+
+				logger.Debugf("Failure for %s operation: %s: %s", op.class.String(), op.ID, err)
+
+				_, md, _ := op.Render()
+				EventSend(op.Project, "operation", md)
+				return
+			}
+
+			op.lock.Lock()
+			op.Status = api.Success
+			op.lock.Unlock()
+			op.done()
+			chanRun <- nil
+
+			op.lock.Lock()
+			logger.Debugf("Success for %s operation: %s", op.class.String(), op.ID)
+			_, md, _ := op.Render()
+			EventSend(op.Project, "operation", md)
+			op.lock.Unlock()
+		}(op, chanRun)
+	}
+	op.lock.Unlock()
+
+	logger.Debugf("Started %s operation: %s", op.class.String(), op.ID)
+	_, md, _ := op.Render()
+	EventSend(op.Project, "operation", md)
+
+	return chanRun, nil
+}
+
+func (op *Operation) Cancel() (chan error, error) {
+	if op.Status != api.Running {
+		return nil, fmt.Errorf("Only running operations can be cancelled")
+	}
+
+	if !op.mayCancel() {
+		return nil, fmt.Errorf("This operation can't be cancelled")
+	}
+
+	chanCancel := make(chan error, 1)
+
+	op.lock.Lock()
+	oldStatus := op.Status
+	op.Status = api.Cancelling
+	op.lock.Unlock()
+
+	if op.onCancel != nil {
+		go func(op *Operation, oldStatus api.StatusCode, chanCancel chan error) {
+			err := op.onCancel(op)
+			if err != nil {
+				op.lock.Lock()
+				op.Status = oldStatus
+				op.lock.Unlock()
+				chanCancel <- err
+
+				logger.Debugf("Failed to cancel %s operation: %s: %s", op.class.String(), op.ID, err)
+				_, md, _ := op.Render()
+				EventSend(op.Project, "operation", md)
+				return
+			}
+
+			op.lock.Lock()
+			op.Status = api.Cancelled
+			op.lock.Unlock()
+			op.done()
+			chanCancel <- nil
+
+			logger.Debugf("Cancelled %s operation: %s", op.class.String(), op.ID)
+			_, md, _ := op.Render()
+			EventSend(op.Project, "operation", md)
+		}(op, oldStatus, chanCancel)
+	}
+
+	logger.Debugf("Cancelling %s operation: %s", op.class.String(), op.ID)
+	_, md, _ := op.Render()
+	EventSend(op.Project, "operation", md)
+
+	if op.Canceler != nil {
+		err := op.Canceler.Cancel()
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	if op.onCancel == nil {
+		op.lock.Lock()
+		op.Status = api.Cancelled
+		op.lock.Unlock()
+		op.done()
+		chanCancel <- nil
+	}
+
+	logger.Debugf("Cancelled %s operation: %s", op.class.String(), op.ID)
+	_, md, _ = op.Render()
+	EventSend(op.Project, "operation", md)
+
+	return chanCancel, nil
+}
+
+func (op *Operation) Connect(r *http.Request, w http.ResponseWriter) (chan error, error) {
+	if op.class != OperationClassWebsocket {
+		return nil, fmt.Errorf("Only websocket operations can be connected")
+	}
+
+	if op.Status != api.Running {
+		return nil, fmt.Errorf("Only running operations can be connected")
+	}
+
+	chanConnect := make(chan error, 1)
+
+	op.lock.Lock()
+
+	go func(op *Operation, chanConnect chan error) {
+		err := op.onConnect(op, r, w)
+		if err != nil {
+			chanConnect <- err
+
+			logger.Debugf("Failed to handle %s operation: %s: %s", op.class.String(), op.ID, err)
+			return
+		}
+
+		chanConnect <- nil
+
+		logger.Debugf("Handled %s operation: %s", op.class.String(), op.ID)
+	}(op, chanConnect)
+	op.lock.Unlock()
+
+	logger.Debugf("Connected %s operation: %s", op.class.String(), op.ID)
+
+	return chanConnect, nil
+}

From ebba7af9260bee63b8f7700c4acef0fd47edeb59 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Wed, 25 Sep 2019 11:00:20 +0100
Subject: [PATCH 04/72] lxd/instance/instancetype: Adds instancetype package

This is to avoid circular references with the forthcoming instance package and the db package.

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/instance/instancetype/instance_type.go | 52 ++++++++++++++++++++++
 1 file changed, 52 insertions(+)
 create mode 100644 lxd/instance/instancetype/instance_type.go

diff --git a/lxd/instance/instancetype/instance_type.go b/lxd/instance/instancetype/instance_type.go
new file mode 100644
index 0000000000..2d821babbd
--- /dev/null
+++ b/lxd/instance/instancetype/instance_type.go
@@ -0,0 +1,52 @@
+package instancetype
+
+import (
+	"fmt"
+
+	"github.com/lxc/lxd/shared/api"
+)
+
+// Type indicates the type of instance.
+type Type int
+
+const (
+	// Any represents any type of instance.
+	Any = Type(-1)
+
+	// Container represents a container instance type.
+	Container = Type(0)
+
+	// VM represents a virtual-machine instance type.
+	VM = Type(1)
+)
+
+// New validates the supplied string against the allowed types of instance and returns the internal
+// representation of that type. If empty string is supplied then the type returned is Container.
+// If an invalid name is supplied an error will be returned.
+func New(name string) (Type, error) {
+	// If "container" or "" is supplied, return type as Container.
+	if api.InstanceType(name) == api.InstanceTypeContainer || name == "" {
+		return Container, nil
+	}
+
+	// If "virtual-machine" is supplied, return type as VM.
+	if api.InstanceType(name) == api.InstanceTypeVM {
+		return VM, nil
+	}
+
+	return -1, fmt.Errorf("Invalid instance type")
+}
+
+// String converts the internal representation of instance type to a string used in API requests.
+// Returns empty string if value is not a valid instance type.
+func (instanceType Type) String() string {
+	if instanceType == Container {
+		return string(api.InstanceTypeContainer)
+	}
+
+	if instanceType == VM {
+		return string(api.InstanceTypeVM)
+	}
+
+	return ""
+}

From 0a870b9ec99eb374d7c590d754e6b0f21b27e6b4 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Wed, 25 Sep 2019 11:02:59 +0100
Subject: [PATCH 05/72] lxd/instance/instance/storage: Adds storage interface
 and types to instance package

This so so it can be used by other packages in LXD.

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/instance/instance_storage.go | 177 +++++++++++++++++++++++++++++++
 1 file changed, 177 insertions(+)
 create mode 100644 lxd/instance/instance_storage.go

diff --git a/lxd/instance/instance_storage.go b/lxd/instance/instance_storage.go
new file mode 100644
index 0000000000..6da76728a6
--- /dev/null
+++ b/lxd/instance/instance_storage.go
@@ -0,0 +1,177 @@
+package instance
+
+import (
+	"fmt"
+	"io"
+
+	"github.com/gorilla/websocket"
+
+	"github.com/lxc/lxd/lxd/migration"
+	"github.com/lxc/lxd/lxd/operation"
+	"github.com/lxc/lxd/lxd/state"
+	"github.com/lxc/lxd/shared/api"
+	"github.com/lxc/lxd/shared/ioprogress"
+)
+
+// Storage interface defines the functions needed to implement a storage
+// backend for a given storage driver.
+type Storage interface {
+	// Functions dealing with basic driver properties only.
+	StorageCoreInit() error
+	GetStorageType() StorageType
+	GetStorageTypeName() string
+	GetStorageTypeVersion() string
+	GetState() *state.State
+
+	// Functions dealing with storage pools.
+	StoragePoolInit() error
+	StoragePoolCheck() error
+	StoragePoolCreate() error
+	StoragePoolDelete() error
+	StoragePoolMount() (bool, error)
+	StoragePoolUmount() (bool, error)
+	StoragePoolResources() (*api.ResourcesStoragePool, error)
+	StoragePoolUpdate(writable *api.StoragePoolPut, changedConfig []string) error
+	GetStoragePoolWritable() api.StoragePoolPut
+	SetStoragePoolWritable(writable *api.StoragePoolPut)
+	GetStoragePool() *api.StoragePool
+
+	// Functions dealing with custom storage volumes.
+	StoragePoolVolumeCreate() error
+	StoragePoolVolumeDelete() error
+	StoragePoolVolumeMount() (bool, error)
+	StoragePoolVolumeUmount() (bool, error)
+	StoragePoolVolumeUpdate(writable *api.StorageVolumePut, changedConfig []string) error
+	StoragePoolVolumeRename(newName string) error
+	StoragePoolVolumeCopy(source *api.StorageVolumeSource) error
+	GetStoragePoolVolumeWritable() api.StorageVolumePut
+	SetStoragePoolVolumeWritable(writable *api.StorageVolumePut)
+	GetStoragePoolVolume() *api.StorageVolume
+
+	// Functions dealing with custom storage volume snapshots.
+	StoragePoolVolumeSnapshotCreate(target *api.StorageVolumeSnapshotsPost) error
+	StoragePoolVolumeSnapshotDelete() error
+	StoragePoolVolumeSnapshotRename(newName string) error
+
+	// Functions dealing with container storage volumes.
+	// ContainerCreate creates an empty container (no rootfs/metadata.yaml)
+	ContainerCreate(container Instance) error
+
+	// ContainerCreateFromImage creates a container from a image.
+	ContainerCreateFromImage(c Instance, fingerprint string, tracker *ioprogress.ProgressTracker) error
+	ContainerDelete(c Instance) error
+	ContainerCopy(target Instance, source Instance, containerOnly bool) error
+	ContainerRefresh(target Instance, source Instance, snapshots []Instance) error
+	ContainerMount(c Instance) (bool, error)
+	ContainerUmount(c Instance, path string) (bool, error)
+	ContainerRename(container Instance, newName string) error
+	ContainerRestore(container Instance, sourceContainer Instance) error
+	ContainerGetUsage(container Instance) (int64, error)
+	GetContainerPoolInfo() (int64, string, string)
+	ContainerStorageReady(container Instance) bool
+
+	ContainerSnapshotCreate(target Instance, source Instance) error
+	ContainerSnapshotDelete(c Instance) error
+	ContainerSnapshotRename(c Instance, newName string) error
+	ContainerSnapshotStart(c Instance) (bool, error)
+	ContainerSnapshotStop(c Instance) (bool, error)
+
+	ContainerBackupCreate(backup Backup, sourceContainer Instance) error
+	ContainerBackupLoad(info BackupInfo, data io.ReadSeeker, tarArgs []string) error
+
+	// For use in migrating snapshots.
+	ContainerSnapshotCreateEmpty(c Instance) error
+
+	// Functions dealing with image storage volumes.
+	ImageCreate(fingerprint string, tracker *ioprogress.ProgressTracker) error
+	ImageDelete(fingerprint string) error
+
+	// Storage type agnostic functions.
+	StorageEntitySetQuota(volumeType int, size int64, data interface{}) error
+
+	// Functions dealing with migration.
+	MigrationType() migration.MigrationFSType
+	// Does this storage backend preserve inodes when it is moved across LXD
+	// hosts?
+	PreservesInodes() bool
+
+	// Get the pieces required to migrate the source. This contains a list
+	// of the "object" (i.e. container or snapshot, depending on whether or
+	// not it is a snapshot name) to be migrated in order, and a channel
+	// for arguments of the specific migration command. We use a channel
+	// here so we don't have to invoke `zfs send` or `rsync` or whatever
+	// and keep its stdin/stdout open for each snapshot during the course
+	// of migration, we can do it lazily.
+	//
+	// N.B. that the order here important: e.g. in btrfs/zfs, snapshots
+	// which are parents of other snapshots should be sent first, to save
+	// as much transfer as possible. However, the base container is always
+	// sent as the first object, since that is the grandparent of every
+	// snapshot.
+	//
+	// We leave sending containers which are snapshots of other containers
+	// already present on the target instance as an exercise for the
+	// enterprising developer.
+	MigrationSource(args MigrationSourceArgs) (MigrationStorageSourceDriver, error)
+	MigrationSink(conn *websocket.Conn, op *operation.Operation, args MigrationSinkArgs) error
+
+	StorageMigrationSource(args MigrationSourceArgs) (MigrationStorageSourceDriver, error)
+	StorageMigrationSink(conn *websocket.Conn, op *operation.Operation, args MigrationSinkArgs) error
+}
+
+// storageType defines the type of a storage
+type StorageType int
+
+const (
+	StorageTypeBtrfs StorageType = iota
+	StorageTypeCeph
+	StorageTypeCephFs
+	StorageTypeDir
+	StorageTypeLvm
+	StorageTypeMock
+	StorageTypeZfs
+)
+
+var SupportedStoragePoolDrivers = []string{"btrfs", "ceph", "cephfs", "dir", "lvm", "zfs"}
+
+func StorageTypeToString(sType StorageType) (string, error) {
+	switch sType {
+	case StorageTypeBtrfs:
+		return "btrfs", nil
+	case StorageTypeCeph:
+		return "ceph", nil
+	case StorageTypeCephFs:
+		return "cephfs", nil
+	case StorageTypeDir:
+		return "dir", nil
+	case StorageTypeLvm:
+		return "lvm", nil
+	case StorageTypeMock:
+		return "mock", nil
+	case StorageTypeZfs:
+		return "zfs", nil
+	}
+
+	return "", fmt.Errorf("invalid storage type")
+}
+
+func StorageStringToType(sName string) (StorageType, error) {
+	switch sName {
+	case "btrfs":
+		return StorageTypeBtrfs, nil
+	case "ceph":
+		return StorageTypeCeph, nil
+	case "cephfs":
+		return StorageTypeCephFs, nil
+	case "dir":
+		return StorageTypeDir, nil
+	case "lvm":
+		return StorageTypeLvm, nil
+	case "mock":
+		return StorageTypeMock, nil
+	case "zfs":
+		return StorageTypeZfs, nil
+	}
+
+	return -1, fmt.Errorf("invalid storage type name")
+}

From 24f5f81a9d7ac47bb7143651895844a54f93238f Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Wed, 25 Sep 2019 11:04:04 +0100
Subject: [PATCH 06/72] lxd/instance/instance/seccomp: Adds seccomp functions
 and types to instance package

So they can be used by other packages outside of main.

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/instance/instance_seccomp.go | 325 +++++++++++++++++++++++++++++++
 1 file changed, 325 insertions(+)
 create mode 100644 lxd/instance/instance_seccomp.go

diff --git a/lxd/instance/instance_seccomp.go b/lxd/instance/instance_seccomp.go
new file mode 100644
index 0000000000..f6e2f0ee3e
--- /dev/null
+++ b/lxd/instance/instance_seccomp.go
@@ -0,0 +1,325 @@
+package instance
+
+import (
+	"fmt"
+	"io/ioutil"
+	"os"
+	"path"
+	"regexp"
+	"strconv"
+	"strings"
+
+	"github.com/lxc/lxd/shared"
+	"github.com/lxc/lxd/shared/osarch"
+)
+
+const SECCOMP_HEADER = `2
+`
+
+const DEFAULT_SECCOMP_POLICY = `reject_force_umount  # comment this to allow umount -f;  not recommended
+[all]
+kexec_load errno 38
+open_by_handle_at errno 38
+init_module errno 38
+finit_module errno 38
+delete_module errno 38
+`
+
+const SECCOMP_NOTIFY_MKNOD = `mknod notify [1,8192,SCMP_CMP_MASKED_EQ,61440]
+mknod notify [1,24576,SCMP_CMP_MASKED_EQ,61440]
+mknodat notify [2,8192,SCMP_CMP_MASKED_EQ,61440]
+mknodat notify [2,24576,SCMP_CMP_MASKED_EQ,61440]
+`
+
+const SECCOMP_NOTIFY_SETXATTR = `setxattr notify [3,1,SCMP_CMP_EQ]
+`
+
+const COMPAT_BLOCKING_POLICY = `[%s]
+compat_sys_rt_sigaction errno 38
+stub_x32_rt_sigreturn errno 38
+compat_sys_ioctl errno 38
+compat_sys_readv errno 38
+compat_sys_writev errno 38
+compat_sys_recvfrom errno 38
+compat_sys_sendmsg errno 38
+compat_sys_recvmsg errno 38
+stub_x32_execve errno 38
+compat_sys_ptrace errno 38
+compat_sys_rt_sigpending errno 38
+compat_sys_rt_sigtimedwait errno 38
+compat_sys_rt_sigqueueinfo errno 38
+compat_sys_sigaltstack errno 38
+compat_sys_timer_create errno 38
+compat_sys_mq_notify errno 38
+compat_sys_kexec_load errno 38
+compat_sys_waitid errno 38
+compat_sys_set_robust_list errno 38
+compat_sys_get_robust_list errno 38
+compat_sys_vmsplice errno 38
+compat_sys_move_pages errno 38
+compat_sys_preadv64 errno 38
+compat_sys_pwritev64 errno 38
+compat_sys_rt_tgsigqueueinfo errno 38
+compat_sys_recvmmsg errno 38
+compat_sys_sendmmsg errno 38
+compat_sys_process_vm_readv errno 38
+compat_sys_process_vm_writev errno 38
+compat_sys_setsockopt errno 38
+compat_sys_getsockopt errno 38
+compat_sys_io_setup errno 38
+compat_sys_io_submit errno 38
+stub_x32_execveat errno 38
+`
+
+var SeccompPath = shared.VarPath("security", "seccomp")
+
+func SeccompProfilePath(c Instance) string {
+	return path.Join(SeccompPath, c.Name())
+}
+
+func SeccompContainerNeedsPolicy(c Instance) bool {
+	config := c.ExpandedConfig()
+
+	// Check for text keys
+	keys := []string{
+		"raw.seccomp",
+		"security.syscalls.whitelist",
+		"security.syscalls.blacklist",
+	}
+
+	for _, k := range keys {
+		_, hasKey := config[k]
+		if hasKey {
+			return true
+		}
+	}
+
+	// Check for boolean keys that default to false
+	keys = []string{
+		"security.syscalls.blacklist_compat",
+		"security.syscalls.intercept.mknod",
+		"security.syscalls.intercept.setxattr",
+	}
+
+	for _, k := range keys {
+		if shared.IsTrue(config[k]) {
+			return true
+		}
+	}
+
+	// Check for boolean keys that default to true
+	keys = []string{
+		"security.syscalls.blacklist_default",
+	}
+
+	for _, k := range keys {
+		value, ok := config[k]
+		if !ok || shared.IsTrue(value) {
+			return true
+		}
+	}
+
+	return false
+}
+
+func SeccompContainerNeedsIntercept(c Instance) (bool, error) {
+	// No need if privileged
+	if c.IsPrivileged() {
+		return false, nil
+	}
+
+	// If nested, assume the host handles it
+	if c.DaemonState().OS.RunningInUserNS {
+		return false, nil
+	}
+
+	config := c.ExpandedConfig()
+
+	keys := []string{
+		"security.syscalls.intercept.mknod",
+		"security.syscalls.intercept.setxattr",
+	}
+
+	needed := false
+	for _, k := range keys {
+		if shared.IsTrue(config[k]) {
+			needed = true
+			break
+		}
+	}
+
+	if needed {
+		if !lxcSupportSeccompNotify(c.DaemonState()) {
+			return needed, fmt.Errorf("System doesn't support syscall interception")
+		}
+	}
+
+	return needed, nil
+}
+
+func SeccompCreateProfile(c Instance) error {
+	/* Unlike apparmor, there is no way to "cache" profiles, and profiles
+	 * are automatically unloaded when a task dies. Thus, we don't need to
+	 * unload them when a container stops, and we don't have to worry about
+	 * the mtime on the file for any compiler purpose, so let's just write
+	 * out the profile.
+	 */
+	if !SeccompContainerNeedsPolicy(c) {
+		return nil
+	}
+
+	profile, err := SeccompGetPolicyContent(c)
+	if err != nil {
+		return err
+	}
+
+	if err := os.MkdirAll(SeccompPath, 0700); err != nil {
+		return err
+	}
+
+	return ioutil.WriteFile(SeccompProfilePath(c), []byte(profile), 0600)
+}
+
+func SeccompGetPolicyContent(c Instance) (string, error) {
+	config := c.ExpandedConfig()
+
+	// Full policy override
+	raw := config["raw.seccomp"]
+	if raw != "" {
+		return raw, nil
+	}
+
+	// Policy header
+	policy := SECCOMP_HEADER
+	whitelist := config["security.syscalls.whitelist"]
+	if whitelist != "" {
+		policy += "whitelist\n[all]\n"
+		policy += whitelist
+	} else {
+		policy += "blacklist\n"
+
+		default_, ok := config["security.syscalls.blacklist_default"]
+		if !ok || shared.IsTrue(default_) {
+			policy += DEFAULT_SECCOMP_POLICY
+		}
+	}
+
+	// Syscall interception
+	ok, err := SeccompContainerNeedsIntercept(c)
+	if err != nil {
+		return "", err
+	}
+
+	if ok {
+		if shared.IsTrue(config["security.syscalls.intercept.mknod"]) {
+			policy += SECCOMP_NOTIFY_MKNOD
+		}
+
+		if shared.IsTrue(config["security.syscalls.intercept.setxattr"]) {
+			policy += SECCOMP_NOTIFY_SETXATTR
+		}
+	}
+
+	if whitelist != "" {
+		return policy, nil
+	}
+
+	// Additional blacklist entries
+	compat := config["security.syscalls.blacklist_compat"]
+	if shared.IsTrue(compat) {
+		arch, err := osarch.ArchitectureName(c.Architecture())
+		if err != nil {
+			return "", err
+		}
+		policy += fmt.Sprintf(COMPAT_BLOCKING_POLICY, arch)
+	}
+
+	blacklist := config["security.syscalls.blacklist"]
+	if blacklist != "" {
+		policy += blacklist
+	}
+
+	return policy, nil
+}
+
+func SeccompDeleteProfile(c Instance) {
+	/* similar to AppArmor, if we've never started this container, the
+	 * delete can fail and that's ok.
+	 */
+	os.Remove(SeccompProfilePath(c))
+}
+
+func TaskIDs(pid int) (error, int64, int64, int64, int64) {
+	status, err := ioutil.ReadFile(fmt.Sprintf("/proc/%d/status", pid))
+	if err != nil {
+		return err, -1, -1, -1, -1
+	}
+
+	reUid := regexp.MustCompile("Uid:\\s*([0-9]*)\\s*([0-9]*)\\s*([0-9]*)\\s*([0-9]*)")
+	reGid := regexp.MustCompile("Gid:\\s*([0-9]*)\\s*([0-9]*)\\s*([0-9]*)\\s*([0-9]*)")
+	var gid int64 = -1
+	var uid int64 = -1
+	var fsgid int64 = -1
+	var fsuid int64 = -1
+	uidFound := false
+	gidFound := false
+	for _, line := range strings.Split(string(status), "\n") {
+		if uidFound && gidFound {
+			break
+		}
+
+		if !uidFound {
+			m := reUid.FindStringSubmatch(line)
+			if m != nil && len(m) > 2 {
+				// effective uid
+				result, err := strconv.ParseInt(m[2], 10, 64)
+				if err != nil {
+					return err, -1, -1, -1, -1
+				}
+
+				uid = result
+				uidFound = true
+			}
+
+			if m != nil && len(m) > 4 {
+				// fsuid
+				result, err := strconv.ParseInt(m[4], 10, 64)
+				if err != nil {
+					return err, -1, -1, -1, -1
+				}
+
+				fsuid = result
+			}
+
+			continue
+		}
+
+		if !gidFound {
+			m := reGid.FindStringSubmatch(line)
+			if m != nil && len(m) > 2 {
+				// effective gid
+				result, err := strconv.ParseInt(m[2], 10, 64)
+				if err != nil {
+					return err, -1, -1, -1, -1
+				}
+
+				gid = result
+				gidFound = true
+			}
+
+			if m != nil && len(m) > 4 {
+				// fsgid
+				result, err := strconv.ParseInt(m[4], 10, 64)
+				if err != nil {
+					return err, -1, -1, -1, -1
+				}
+
+				fsgid = result
+			}
+
+			continue
+		}
+	}
+
+	return nil, uid, gid, fsuid, fsgid
+}

From 8c0d6777265e7acfbda55f05d705178f52fe79fd Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Wed, 25 Sep 2019 11:05:24 +0100
Subject: [PATCH 07/72] lxd/instance/instance/migration: Adds migration types
 to instance package

So they can be used outside of main package.

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/instance/instance_migration.go | 68 ++++++++++++++++++++++++++++++
 1 file changed, 68 insertions(+)
 create mode 100644 lxd/instance/instance_migration.go

diff --git a/lxd/instance/instance_migration.go b/lxd/instance/instance_migration.go
new file mode 100644
index 0000000000..b4019e9a67
--- /dev/null
+++ b/lxd/instance/instance_migration.go
@@ -0,0 +1,68 @@
+package instance
+
+import (
+	"github.com/gorilla/websocket"
+
+	"github.com/lxc/lxd/lxd/migration"
+	"github.com/lxc/lxd/lxd/operation"
+	"github.com/lxc/lxd/shared/idmap"
+)
+
+// MigrationStorageSourceDriver defines the functions needed to implement a
+// migration source driver.
+type MigrationStorageSourceDriver interface {
+	/* send any bits of the container/snapshots that are possible while the
+	 * container is still running.
+	 */
+	SendWhileRunning(conn *websocket.Conn, op *operation.Operation, bwlimit string, containerOnly bool) error
+
+	/* send the final bits (e.g. a final delta snapshot for zfs, btrfs, or
+	 * do a final rsync) of the fs after the container has been
+	 * checkpointed. This will only be called when a container is actually
+	 * being live migrated.
+	 */
+	SendAfterCheckpoint(conn *websocket.Conn, bwlimit string) error
+
+	/* Called after either success or failure of a migration, can be used
+	 * to clean up any temporary snapshots, etc.
+	 */
+	Cleanup()
+
+	SendStorageVolume(conn *websocket.Conn, op *operation.Operation, bwlimit string, storage Storage, volumeOnly bool) error
+}
+
+type MigrationSourceArgs struct {
+	// Instance specific fields
+	Instance     Instance
+	InstanceOnly bool
+
+	// Transport specific fields
+	RsyncFeatures []string
+	ZfsFeatures   []string
+
+	// Volume specific fields
+	VolumeOnly bool
+}
+
+type MigrationSinkArgs struct {
+	// General migration fields
+	Dialer  websocket.Dialer
+	Push    bool
+	Secrets map[string]string
+	Url     string
+
+	// Instance specific fields
+	Instance     Instance
+	InstanceOnly bool
+	Idmap        *idmap.IdmapSet
+	Live         bool
+	Refresh      bool
+	Snapshots    []*migration.Snapshot
+
+	// Storage specific fields
+	Storage    Storage
+	VolumeOnly bool
+
+	// Transport specific fields
+	RsyncFeatures []string
+}

From 02af63c4cd6169094b6d87f615d1d0ad79f8a954 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Wed, 25 Sep 2019 11:05:50 +0100
Subject: [PATCH 08/72] lxd/instance/instance/interface: Adds instance
 interface

This is the common exported interface used for containerLXC and the future vmQemu types.

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/instance/instance_interface.go | 119 +++++++++++++++++++++++++++++
 1 file changed, 119 insertions(+)
 create mode 100644 lxd/instance/instance_interface.go

diff --git a/lxd/instance/instance_interface.go b/lxd/instance/instance_interface.go
new file mode 100644
index 0000000000..e75799b674
--- /dev/null
+++ b/lxd/instance/instance_interface.go
@@ -0,0 +1,119 @@
+package instance
+
+import (
+	"io"
+	"os"
+	"os/exec"
+	"time"
+
+	"github.com/lxc/lxd/lxd/db"
+	"github.com/lxc/lxd/lxd/device"
+	"github.com/lxc/lxd/lxd/device/config"
+	"github.com/lxc/lxd/lxd/instance/instancetype"
+	"github.com/lxc/lxd/lxd/operation"
+	"github.com/lxc/lxd/lxd/state"
+	"github.com/lxc/lxd/shared/api"
+)
+
+// The Instance interface
+type Instance interface {
+	// Instance actions
+	Freeze() error
+	Shutdown(timeout time.Duration) error
+	Start(stateful bool) error
+	Stop(stateful bool) error
+	Unfreeze() error
+
+	IsPrivileged() bool
+
+	// Snapshots & migration & backups
+	Restore(source Instance, stateful bool) error
+	Snapshots() ([]Instance, error)
+	Backups() ([]Backup, error)
+
+	// Config handling
+	Rename(newName string) error
+
+	// TODO rename db.ContainerArgs to db.InstanceArgs.
+	Update(newConfig db.ContainerArgs, userRequested bool) error
+
+	Delete() error
+	Export(w io.Writer, properties map[string]string) error
+
+	// Live configuration
+	CGroupGet(key string) (string, error)
+	CGroupSet(key string, value string) error
+	VolatileSet(changes map[string]string) error
+
+	// File handling
+	FileExists(path string) error
+	FilePull(srcpath string, dstpath string) (int64, int64, os.FileMode, string, []string, error)
+	FilePush(type_ string, srcpath string, dstpath string, uid int64, gid int64, mode int, write string) error
+	FileRemove(path string) error
+
+	// Console - Allocate and run a console tty.
+	//
+	// terminal  - Bidirectional file descriptor.
+	//
+	// This function will not return until the console has been exited by
+	// the user.
+	Console(terminal *os.File) *exec.Cmd
+	Exec(command []string, env map[string]string, stdin *os.File, stdout *os.File, stderr *os.File, wait bool, cwd string, uid uint32, gid uint32) (*exec.Cmd, int, int, error)
+
+	// Status
+	Render() (interface{}, interface{}, error)
+	RenderFull() (*api.InstanceFull, interface{}, error)
+	RenderState() (*api.InstanceState, error)
+	IsRunning() bool
+	IsFrozen() bool
+	IsEphemeral() bool
+	IsSnapshot() bool
+	IsStateful() bool
+
+	// Hooks
+	DeviceEventHandler(*device.RunConfig) error
+
+	// Properties
+	Id() int
+	Location() string
+	Project() string
+	Name() string
+	Type() instancetype.Type
+	Description() string
+	Architecture() int
+	CreationDate() time.Time
+	LastUsedDate() time.Time
+	ExpandedConfig() map[string]string
+	ExpandedDevices() config.Devices
+	LocalConfig() map[string]string
+	LocalDevices() config.Devices
+	Profiles() []string
+	InitPID() int
+	State() string
+	ExpiryDate() time.Time
+
+	// Paths
+	Path() string
+	RootfsPath() string
+	TemplatesPath() string
+	StatePath() string
+	LogFilePath() string
+	ConsoleBufferLogPath() string
+	LogPath() string
+	DevicesPath() string
+
+	// Storage
+	StoragePool() (string, error)
+
+	// Progress reporting
+
+	SetOperation(op *operation.Operation)
+
+	// FIXME: Those should be internal functions
+	// Needed for migration for now.
+	StorageStart() (bool, error)
+	StorageStop() (bool, error)
+	Storage() Storage
+	TemplateApply(trigger string) error
+	DaemonState() *state.State
+}

From 74a7df515f6f77a2ef54b8dd18bed722607da685 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Wed, 25 Sep 2019 11:06:36 +0100
Subject: [PATCH 09/72] lxd/instance/instance/apparmor: Adds appamor types to
 instance package

So they can be used outside of main package.

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/instance/instance_apparmor.go | 788 ++++++++++++++++++++++++++++++
 1 file changed, 788 insertions(+)
 create mode 100644 lxd/instance/instance_apparmor.go

diff --git a/lxd/instance/instance_apparmor.go b/lxd/instance/instance_apparmor.go
new file mode 100644
index 0000000000..a5b61a1476
--- /dev/null
+++ b/lxd/instance/instance_apparmor.go
@@ -0,0 +1,788 @@
+package instance
+
+import (
+	"crypto/sha256"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"os"
+	"path"
+	"strings"
+
+	"github.com/lxc/lxd/lxd/project"
+	"github.com/lxc/lxd/shared"
+	"github.com/lxc/lxd/shared/logger"
+
+	log "github.com/lxc/lxd/shared/log15"
+)
+
+const (
+	APPARMOR_CMD_LOAD   = "r"
+	APPARMOR_CMD_UNLOAD = "R"
+	APPARMOR_CMD_PARSE  = "Q"
+)
+
+var aaPath = shared.VarPath("security", "apparmor")
+
+const AA_PROFILE_BASE = `
+  ### Base profile
+  capability,
+  dbus,
+  file,
+  network,
+  umount,
+
+  # Hide common denials
+  deny mount options=(ro, remount) -> /,
+  deny mount options=(ro, remount, silent) -> /,
+
+  # Allow normal signal handling
+  signal (receive),
+  signal peer=@{profile_name},
+
+  # Allow normal process handling
+  ptrace (readby),
+  ptrace (tracedby),
+  ptrace peer=@{profile_name},
+
+  # Handle binfmt
+  mount fstype=binfmt_misc -> /proc/sys/fs/binfmt_misc/,
+  deny /proc/sys/fs/binfmt_misc/{,**} rwklx,
+
+  # Handle cgroupfs
+  mount options=(ro, nosuid, nodev, noexec, remount, strictatime) -> /sys/fs/cgroup/,
+
+  # Handle debugfs
+  mount fstype=debugfs -> /sys/kernel/debug/,
+  deny /sys/kernel/debug/{,**} rwklx,
+
+  # Handle efivarfs
+  mount fstype=efivarfs -> /sys/firmware/efi/efivars/,
+  deny /sys/firmware/efi/efivars/{,**} rwklx,
+
+  # Handle fuse
+  mount fstype=fuse,
+  mount fstype=fuse.*,
+  mount fstype=fusectl -> /sys/fs/fuse/connections/,
+
+  # Handle hugetlbfs
+  mount fstype=hugetlbfs,
+
+  # Handle mqueue
+  mount fstype=mqueue,
+
+  # Handle proc
+  mount fstype=proc -> /proc/,
+  deny /proc/bus/** wklx,
+  deny /proc/kcore rwklx,
+  deny /proc/sysrq-trigger rwklx,
+  deny /proc/acpi/** rwklx,
+  deny /proc/sys/fs/** wklx,
+
+  # Handle securityfs (access handled separately)
+  mount fstype=securityfs -> /sys/kernel/security/,
+
+  # Handle sysfs (access handled below)
+  mount fstype=sysfs -> /sys/,
+  mount options=(rw, nosuid, nodev, noexec, remount) -> /sys/,
+
+  # Handle tmpfs
+  mount fstype=tmpfs,
+
+  # Allow limited modification of mount propagation
+  mount options=(rw,slave) -> /,
+  mount options=(rw,rslave) -> /,
+  mount options=(rw,shared) -> /,
+  mount options=(rw,rshared) -> /,
+  mount options=(rw,private) -> /,
+  mount options=(rw,rprivate) -> /,
+  mount options=(rw,unbindable) -> /,
+  mount options=(rw,runbindable) -> /,
+
+  # Allow various ro-bind-*re*-mounts
+  mount options=(ro,remount,bind) /[^spd]*{,/**},
+  mount options=(ro,remount,bind) /d[^e]*{,/**},
+  mount options=(ro,remount,bind) /de[^v]*{,/**},
+  mount options=(ro,remount,bind) /dev/.[^l]*{,/**},
+  mount options=(ro,remount,bind) /dev/.l[^x]*{,/**},
+  mount options=(ro,remount,bind) /dev/.lx[^c]*{,/**},
+  mount options=(ro,remount,bind) /dev/.lxc?*{,/**},
+  mount options=(ro,remount,bind) /dev/[^.]*{,/**},
+  mount options=(ro,remount,bind) /dev?*{,/**},
+  mount options=(ro,remount,bind) /p[^r]*{,/**},
+  mount options=(ro,remount,bind) /pr[^o]*{,/**},
+  mount options=(ro,remount,bind) /pro[^c]*{,/**},
+  mount options=(ro,remount,bind) /proc?*{,/**},
+  mount options=(ro,remount,bind) /s[^y]*{,/**},
+  mount options=(ro,remount,bind) /sy[^s]*{,/**},
+  mount options=(ro,remount,bind) /sys?*{,/**},
+
+  mount options=(ro,remount,bind,nodev) /[^spd]*{,/**},
+  mount options=(ro,remount,bind,nodev) /d[^e]*{,/**},
+  mount options=(ro,remount,bind,nodev) /de[^v]*{,/**},
+  mount options=(ro,remount,bind,nodev) /dev/.[^l]*{,/**},
+  mount options=(ro,remount,bind,nodev) /dev/.l[^x]*{,/**},
+  mount options=(ro,remount,bind,nodev) /dev/.lx[^c]*{,/**},
+  mount options=(ro,remount,bind,nodev) /dev/.lxc?*{,/**},
+  mount options=(ro,remount,bind,nodev) /dev/[^.]*{,/**},
+  mount options=(ro,remount,bind,nodev) /dev?*{,/**},
+  mount options=(ro,remount,bind,nodev) /p[^r]*{,/**},
+  mount options=(ro,remount,bind,nodev) /pr[^o]*{,/**},
+  mount options=(ro,remount,bind,nodev) /pro[^c]*{,/**},
+  mount options=(ro,remount,bind,nodev) /proc?*{,/**},
+  mount options=(ro,remount,bind,nodev) /s[^y]*{,/**},
+  mount options=(ro,remount,bind,nodev) /sy[^s]*{,/**},
+  mount options=(ro,remount,bind,nodev) /sys?*{,/**},
+
+  mount options=(ro,remount,bind,nodev,nosuid) /[^spd]*{,/**},
+  mount options=(ro,remount,bind,nodev,nosuid) /d[^e]*{,/**},
+  mount options=(ro,remount,bind,nodev,nosuid) /de[^v]*{,/**},
+  mount options=(ro,remount,bind,nodev,nosuid) /dev/.[^l]*{,/**},
+  mount options=(ro,remount,bind,nodev,nosuid) /dev/.l[^x]*{,/**},
+  mount options=(ro,remount,bind,nodev,nosuid) /dev/.lx[^c]*{,/**},
+  mount options=(ro,remount,bind,nodev,nosuid) /dev/.lxc?*{,/**},
+  mount options=(ro,remount,bind,nodev,nosuid) /dev/[^.]*{,/**},
+  mount options=(ro,remount,bind,nodev,nosuid) /dev?*{,/**},
+  mount options=(ro,remount,bind,nodev,nosuid) /p[^r]*{,/**},
+  mount options=(ro,remount,bind,nodev,nosuid) /pr[^o]*{,/**},
+  mount options=(ro,remount,bind,nodev,nosuid) /pro[^c]*{,/**},
+  mount options=(ro,remount,bind,nodev,nosuid) /proc?*{,/**},
+  mount options=(ro,remount,bind,nodev,nosuid) /s[^y]*{,/**},
+  mount options=(ro,remount,bind,nodev,nosuid) /sy[^s]*{,/**},
+  mount options=(ro,remount,bind,nodev,nosuid) /sys?*{,/**},
+
+  mount options=(ro,remount,bind,noexec) /[^spd]*{,/**},
+  mount options=(ro,remount,bind,noexec) /d[^e]*{,/**},
+  mount options=(ro,remount,bind,noexec) /de[^v]*{,/**},
+  mount options=(ro,remount,bind,noexec) /dev/.[^l]*{,/**},
+  mount options=(ro,remount,bind,noexec) /dev/.l[^x]*{,/**},
+  mount options=(ro,remount,bind,noexec) /dev/.lx[^c]*{,/**},
+  mount options=(ro,remount,bind,noexec) /dev/.lxc?*{,/**},
+  mount options=(ro,remount,bind,noexec) /dev/[^.]*{,/**},
+  mount options=(ro,remount,bind,noexec) /dev?*{,/**},
+  mount options=(ro,remount,bind,noexec) /p[^r]*{,/**},
+  mount options=(ro,remount,bind,noexec) /pr[^o]*{,/**},
+  mount options=(ro,remount,bind,noexec) /pro[^c]*{,/**},
+  mount options=(ro,remount,bind,noexec) /proc?*{,/**},
+  mount options=(ro,remount,bind,noexec) /s[^y]*{,/**},
+  mount options=(ro,remount,bind,noexec) /sy[^s]*{,/**},
+  mount options=(ro,remount,bind,noexec) /sys?*{,/**},
+
+  mount options=(ro,remount,bind,noexec,nodev) /[^spd]*{,/**},
+  mount options=(ro,remount,bind,noexec,nodev) /d[^e]*{,/**},
+  mount options=(ro,remount,bind,noexec,nodev) /de[^v]*{,/**},
+  mount options=(ro,remount,bind,noexec,nodev) /dev/.[^l]*{,/**},
+  mount options=(ro,remount,bind,noexec,nodev) /dev/.l[^x]*{,/**},
+  mount options=(ro,remount,bind,noexec,nodev) /dev/.lx[^c]*{,/**},
+  mount options=(ro,remount,bind,noexec,nodev) /dev/.lxc?*{,/**},
+  mount options=(ro,remount,bind,noexec,nodev) /dev/[^.]*{,/**},
+  mount options=(ro,remount,bind,noexec,nodev) /dev?*{,/**},
+  mount options=(ro,remount,bind,noexec,nodev) /p[^r]*{,/**},
+  mount options=(ro,remount,bind,noexec,nodev) /pr[^o]*{,/**},
+  mount options=(ro,remount,bind,noexec,nodev) /pro[^c]*{,/**},
+  mount options=(ro,remount,bind,noexec,nodev) /proc?*{,/**},
+  mount options=(ro,remount,bind,noexec,nodev) /s[^y]*{,/**},
+  mount options=(ro,remount,bind,noexec,nodev) /sy[^s]*{,/**},
+  mount options=(ro,remount,bind,noexec,nodev) /sys?*{,/**},
+
+  mount options=(ro,remount,bind,nosuid) /[^spd]*{,/**},
+  mount options=(ro,remount,bind,nosuid) /d[^e]*{,/**},
+  mount options=(ro,remount,bind,nosuid) /de[^v]*{,/**},
+  mount options=(ro,remount,bind,nosuid) /dev/.[^l]*{,/**},
+  mount options=(ro,remount,bind,nosuid) /dev/.l[^x]*{,/**},
+  mount options=(ro,remount,bind,nosuid) /dev/.lx[^c]*{,/**},
+  mount options=(ro,remount,bind,nosuid) /dev/.lxc?*{,/**},
+  mount options=(ro,remount,bind,nosuid) /dev/[^.]*{,/**},
+  mount options=(ro,remount,bind,nosuid) /dev?*{,/**},
+  mount options=(ro,remount,bind,nosuid) /p[^r]*{,/**},
+  mount options=(ro,remount,bind,nosuid) /pr[^o]*{,/**},
+  mount options=(ro,remount,bind,nosuid) /pro[^c]*{,/**},
+  mount options=(ro,remount,bind,nosuid) /proc?*{,/**},
+  mount options=(ro,remount,bind,nosuid) /s[^y]*{,/**},
+  mount options=(ro,remount,bind,nosuid) /sy[^s]*{,/**},
+  mount options=(ro,remount,bind,nosuid) /sys?*{,/**},
+
+  mount options=(ro,remount,bind,nosuid,nodev) /[^spd]*{,/**},
+  mount options=(ro,remount,bind,nosuid,nodev) /d[^e]*{,/**},
+  mount options=(ro,remount,bind,nosuid,nodev) /de[^v]*{,/**},
+  mount options=(ro,remount,bind,nosuid,nodev) /dev/.[^l]*{,/**},
+  mount options=(ro,remount,bind,nosuid,nodev) /dev/.l[^x]*{,/**},
+  mount options=(ro,remount,bind,nosuid,nodev) /dev/.lx[^c]*{,/**},
+  mount options=(ro,remount,bind,nosuid,nodev) /dev/.lxc?*{,/**},
+  mount options=(ro,remount,bind,nosuid,nodev) /dev/[^.]*{,/**},
+  mount options=(ro,remount,bind,nosuid,nodev) /dev?*{,/**},
+  mount options=(ro,remount,bind,nosuid,nodev) /p[^r]*{,/**},
+  mount options=(ro,remount,bind,nosuid,nodev) /pr[^o]*{,/**},
+  mount options=(ro,remount,bind,nosuid,nodev) /pro[^c]*{,/**},
+  mount options=(ro,remount,bind,nosuid,nodev) /proc?*{,/**},
+  mount options=(ro,remount,bind,nosuid,nodev) /s[^y]*{,/**},
+  mount options=(ro,remount,bind,nosuid,nodev) /sy[^s]*{,/**},
+  mount options=(ro,remount,bind,nosuid,nodev) /sys?*{,/**},
+
+  mount options=(ro,remount,bind,nosuid,noexec) /[^spd]*{,/**},
+  mount options=(ro,remount,bind,nosuid,noexec) /d[^e]*{,/**},
+  mount options=(ro,remount,bind,nosuid,noexec) /de[^v]*{,/**},
+  mount options=(ro,remount,bind,nosuid,noexec) /dev/.[^l]*{,/**},
+  mount options=(ro,remount,bind,nosuid,noexec) /dev/.l[^x]*{,/**},
+  mount options=(ro,remount,bind,nosuid,noexec) /dev/.lx[^c]*{,/**},
+  mount options=(ro,remount,bind,nosuid,noexec) /dev/.lxc?*{,/**},
+  mount options=(ro,remount,bind,nosuid,noexec) /dev/[^.]*{,/**},
+  mount options=(ro,remount,bind,nosuid,noexec) /dev?*{,/**},
+  mount options=(ro,remount,bind,nosuid,noexec) /p[^r]*{,/**},
+  mount options=(ro,remount,bind,nosuid,noexec) /pr[^o]*{,/**},
+  mount options=(ro,remount,bind,nosuid,noexec) /pro[^c]*{,/**},
+  mount options=(ro,remount,bind,nosuid,noexec) /proc?*{,/**},
+  mount options=(ro,remount,bind,nosuid,noexec) /s[^y]*{,/**},
+  mount options=(ro,remount,bind,nosuid,noexec) /sy[^s]*{,/**},
+  mount options=(ro,remount,bind,nosuid,noexec) /sys?*{,/**},
+
+  mount options=(ro,remount,bind,nosuid,noexec,nodev) /[^spd]*{,/**},
+  mount options=(ro,remount,bind,nosuid,noexec,nodev) /d[^e]*{,/**},
+  mount options=(ro,remount,bind,nosuid,noexec,nodev) /de[^v]*{,/**},
+  mount options=(ro,remount,bind,nosuid,noexec,nodev) /dev/.[^l]*{,/**},
+  mount options=(ro,remount,bind,nosuid,noexec,nodev) /dev/.l[^x]*{,/**},
+  mount options=(ro,remount,bind,nosuid,noexec,nodev) /dev/.lx[^c]*{,/**},
+  mount options=(ro,remount,bind,nosuid,noexec,nodev) /dev/.lxc?*{,/**},
+  mount options=(ro,remount,bind,nosuid,noexec,nodev) /dev/[^.]*{,/**},
+  mount options=(ro,remount,bind,nosuid,noexec,nodev) /dev?*{,/**},
+  mount options=(ro,remount,bind,nosuid,noexec,nodev) /p[^r]*{,/**},
+  mount options=(ro,remount,bind,nosuid,noexec,nodev) /pr[^o]*{,/**},
+  mount options=(ro,remount,bind,nosuid,noexec,nodev) /pro[^c]*{,/**},
+  mount options=(ro,remount,bind,nosuid,noexec,nodev) /proc?*{,/**},
+  mount options=(ro,remount,bind,nosuid,noexec,nodev) /s[^y]*{,/**},
+  mount options=(ro,remount,bind,nosuid,noexec,nodev) /sy[^s]*{,/**},
+  mount options=(ro,remount,bind,nosuid,noexec,nodev) /sys?*{,/**},
+
+  mount options=(ro,remount,bind,nosuid,noexec,strictatime) /[^spd]*{,/**},
+  mount options=(ro,remount,bind,nosuid,noexec,strictatime) /d[^e]*{,/**},
+  mount options=(ro,remount,bind,nosuid,noexec,strictatime) /de[^v]*{,/**},
+  mount options=(ro,remount,bind,nosuid,noexec,strictatime) /dev/.[^l]*{,/**},
+  mount options=(ro,remount,bind,nosuid,noexec,strictatime) /dev/.l[^x]*{,/**},
+  mount options=(ro,remount,bind,nosuid,noexec,strictatime) /dev/.lx[^c]*{,/**},
+  mount options=(ro,remount,bind,nosuid,noexec,strictatime) /dev/.lxc?*{,/**},
+  mount options=(ro,remount,bind,nosuid,noexec,strictatime) /dev/[^.]*{,/**},
+  mount options=(ro,remount,bind,nosuid,noexec,strictatime) /dev?*{,/**},
+  mount options=(ro,remount,bind,nosuid,noexec,strictatime) /p[^r]*{,/**},
+  mount options=(ro,remount,bind,nosuid,noexec,strictatime) /pr[^o]*{,/**},
+  mount options=(ro,remount,bind,nosuid,noexec,strictatime) /pro[^c]*{,/**},
+  mount options=(ro,remount,bind,nosuid,noexec,strictatime) /proc?*{,/**},
+  mount options=(ro,remount,bind,nosuid,noexec,strictatime) /s[^y]*{,/**},
+  mount options=(ro,remount,bind,nosuid,noexec,strictatime) /sy[^s]*{,/**},
+  mount options=(ro,remount,bind,nosuid,noexec,strictatime) /sys?*{,/**},
+
+  # Allow bind-mounts of anything except /proc, /sys and /dev/.lxc
+  mount options=(rw,bind) /[^spd]*{,/**},
+  mount options=(rw,bind) /d[^e]*{,/**},
+  mount options=(rw,bind) /de[^v]*{,/**},
+  mount options=(rw,bind) /dev/.[^l]*{,/**},
+  mount options=(rw,bind) /dev/.l[^x]*{,/**},
+  mount options=(rw,bind) /dev/.lx[^c]*{,/**},
+  mount options=(rw,bind) /dev/.lxc?*{,/**},
+  mount options=(rw,bind) /dev/[^.]*{,/**},
+  mount options=(rw,bind) /dev?*{,/**},
+  mount options=(rw,bind) /p[^r]*{,/**},
+  mount options=(rw,bind) /pr[^o]*{,/**},
+  mount options=(rw,bind) /pro[^c]*{,/**},
+  mount options=(rw,bind) /proc?*{,/**},
+  mount options=(rw,bind) /s[^y]*{,/**},
+  mount options=(rw,bind) /sy[^s]*{,/**},
+  mount options=(rw,bind) /sys?*{,/**},
+
+  # Allow rbind-mounts of anything except /, /dev, /proc and /sys
+  mount options=(rw,rbind) /[^spd]*{,/**},
+  mount options=(rw,rbind) /d[^e]*{,/**},
+  mount options=(rw,rbind) /de[^v]*{,/**},
+  mount options=(rw,rbind) /dev?*{,/**},
+  mount options=(rw,rbind) /p[^r]*{,/**},
+  mount options=(rw,rbind) /pr[^o]*{,/**},
+  mount options=(rw,rbind) /pro[^c]*{,/**},
+  mount options=(rw,rbind) /proc?*{,/**},
+  mount options=(rw,rbind) /s[^y]*{,/**},
+  mount options=(rw,rbind) /sy[^s]*{,/**},
+  mount options=(rw,rbind) /sys?*{,/**},
+
+  # Allow read-only bind-mounts of anything except /proc, /sys and /dev/.lxc
+  mount options=(ro,remount,bind) /[^spd]*{,/**},
+  mount options=(ro,remount,bind) /d[^e]*{,/**},
+  mount options=(ro,remount,bind) /de[^v]*{,/**},
+  mount options=(ro,remount,bind) /dev/.[^l]*{,/**},
+  mount options=(ro,remount,bind) /dev/.l[^x]*{,/**},
+  mount options=(ro,remount,bind) /dev/.lx[^c]*{,/**},
+  mount options=(ro,remount,bind) /dev/.lxc?*{,/**},
+  mount options=(ro,remount,bind) /dev/[^.]*{,/**},
+  mount options=(ro,remount,bind) /dev?*{,/**},
+  mount options=(ro,remount,bind) /p[^r]*{,/**},
+  mount options=(ro,remount,bind) /pr[^o]*{,/**},
+  mount options=(ro,remount,bind) /pro[^c]*{,/**},
+  mount options=(ro,remount,bind) /proc?*{,/**},
+  mount options=(ro,remount,bind) /s[^y]*{,/**},
+  mount options=(ro,remount,bind) /sy[^s]*{,/**},
+  mount options=(ro,remount,bind) /sys?*{,/**},
+
+  # Allow moving mounts except for /proc, /sys and /dev/.lxc
+  mount options=(rw,move) /[^spd]*{,/**},
+  mount options=(rw,move) /d[^e]*{,/**},
+  mount options=(rw,move) /de[^v]*{,/**},
+  mount options=(rw,move) /dev/.[^l]*{,/**},
+  mount options=(rw,move) /dev/.l[^x]*{,/**},
+  mount options=(rw,move) /dev/.lx[^c]*{,/**},
+  mount options=(rw,move) /dev/.lxc?*{,/**},
+  mount options=(rw,move) /dev/[^.]*{,/**},
+  mount options=(rw,move) /dev?*{,/**},
+  mount options=(rw,move) /p[^r]*{,/**},
+  mount options=(rw,move) /pr[^o]*{,/**},
+  mount options=(rw,move) /pro[^c]*{,/**},
+  mount options=(rw,move) /proc?*{,/**},
+  mount options=(rw,move) /s[^y]*{,/**},
+  mount options=(rw,move) /sy[^s]*{,/**},
+  mount options=(rw,move) /sys?*{,/**},
+
+  # Block dangerous paths under /proc/sys
+  deny /proc/sys/[^kn]*{,/**} wklx,
+  deny /proc/sys/k[^e]*{,/**} wklx,
+  deny /proc/sys/ke[^r]*{,/**} wklx,
+  deny /proc/sys/ker[^n]*{,/**} wklx,
+  deny /proc/sys/kern[^e]*{,/**} wklx,
+  deny /proc/sys/kerne[^l]*{,/**} wklx,
+  deny /proc/sys/kernel/[^smhd]*{,/**} wklx,
+  deny /proc/sys/kernel/d[^o]*{,/**} wklx,
+  deny /proc/sys/kernel/do[^m]*{,/**} wklx,
+  deny /proc/sys/kernel/dom[^a]*{,/**} wklx,
+  deny /proc/sys/kernel/doma[^i]*{,/**} wklx,
+  deny /proc/sys/kernel/domai[^n]*{,/**} wklx,
+  deny /proc/sys/kernel/domain[^n]*{,/**} wklx,
+  deny /proc/sys/kernel/domainn[^a]*{,/**} wklx,
+  deny /proc/sys/kernel/domainna[^m]*{,/**} wklx,
+  deny /proc/sys/kernel/domainnam[^e]*{,/**} wklx,
+  deny /proc/sys/kernel/domainname?*{,/**} wklx,
+  deny /proc/sys/kernel/h[^o]*{,/**} wklx,
+  deny /proc/sys/kernel/ho[^s]*{,/**} wklx,
+  deny /proc/sys/kernel/hos[^t]*{,/**} wklx,
+  deny /proc/sys/kernel/host[^n]*{,/**} wklx,
+  deny /proc/sys/kernel/hostn[^a]*{,/**} wklx,
+  deny /proc/sys/kernel/hostna[^m]*{,/**} wklx,
+  deny /proc/sys/kernel/hostnam[^e]*{,/**} wklx,
+  deny /proc/sys/kernel/hostname?*{,/**} wklx,
+  deny /proc/sys/kernel/m[^s]*{,/**} wklx,
+  deny /proc/sys/kernel/ms[^g]*{,/**} wklx,
+  deny /proc/sys/kernel/msg*/** wklx,
+  deny /proc/sys/kernel/s[^he]*{,/**} wklx,
+  deny /proc/sys/kernel/se[^m]*{,/**} wklx,
+  deny /proc/sys/kernel/sem*/** wklx,
+  deny /proc/sys/kernel/sh[^m]*{,/**} wklx,
+  deny /proc/sys/kernel/shm*/** wklx,
+  deny /proc/sys/kernel?*{,/**} wklx,
+  deny /proc/sys/n[^e]*{,/**} wklx,
+  deny /proc/sys/ne[^t]*{,/**} wklx,
+  deny /proc/sys/net?*{,/**} wklx,
+
+  # Block dangerous paths under /sys
+  deny /sys/[^fdck]*{,/**} wklx,
+  deny /sys/c[^l]*{,/**} wklx,
+  deny /sys/cl[^a]*{,/**} wklx,
+  deny /sys/cla[^s]*{,/**} wklx,
+  deny /sys/clas[^s]*{,/**} wklx,
+  deny /sys/class/[^n]*{,/**} wklx,
+  deny /sys/class/n[^e]*{,/**} wklx,
+  deny /sys/class/ne[^t]*{,/**} wklx,
+  deny /sys/class/net?*{,/**} wklx,
+  deny /sys/class?*{,/**} wklx,
+  deny /sys/d[^e]*{,/**} wklx,
+  deny /sys/de[^v]*{,/**} wklx,
+  deny /sys/dev[^i]*{,/**} wklx,
+  deny /sys/devi[^c]*{,/**} wklx,
+  deny /sys/devic[^e]*{,/**} wklx,
+  deny /sys/device[^s]*{,/**} wklx,
+  deny /sys/devices/[^v]*{,/**} wklx,
+  deny /sys/devices/v[^i]*{,/**} wklx,
+  deny /sys/devices/vi[^r]*{,/**} wklx,
+  deny /sys/devices/vir[^t]*{,/**} wklx,
+  deny /sys/devices/virt[^u]*{,/**} wklx,
+  deny /sys/devices/virtu[^a]*{,/**} wklx,
+  deny /sys/devices/virtua[^l]*{,/**} wklx,
+  deny /sys/devices/virtual/[^n]*{,/**} wklx,
+  deny /sys/devices/virtual/n[^e]*{,/**} wklx,
+  deny /sys/devices/virtual/ne[^t]*{,/**} wklx,
+  deny /sys/devices/virtual/net?*{,/**} wklx,
+  deny /sys/devices/virtual?*{,/**} wklx,
+  deny /sys/devices?*{,/**} wklx,
+  deny /sys/f[^s]*{,/**} wklx,
+  deny /sys/fs/[^c]*{,/**} wklx,
+  deny /sys/fs/c[^g]*{,/**} wklx,
+  deny /sys/fs/cg[^r]*{,/**} wklx,
+  deny /sys/fs/cgr[^o]*{,/**} wklx,
+  deny /sys/fs/cgro[^u]*{,/**} wklx,
+  deny /sys/fs/cgrou[^p]*{,/**} wklx,
+  deny /sys/fs/cgroup?*{,/**} wklx,
+  deny /sys/fs?*{,/**} wklx,
+`
+
+const AA_PROFILE_NESTING = `
+  pivot_root,
+
+  # Allow sending signals and tracing children namespaces
+  ptrace,
+  signal,
+
+  # Prevent access to hidden proc/sys mounts
+  deny /dev/.lxc/proc/** rw,
+  deny /dev/.lxc/sys/** rw,
+
+  # Allow mounting proc and sysfs in the container
+  mount fstype=proc -> /usr/lib/*/lxc/**,
+  mount fstype=sysfs -> /usr/lib/*/lxc/**,
+
+  # Allow nested LXD
+  mount none -> /var/lib/lxd/shmounts/,
+  mount /var/lib/lxd/shmounts/ -> /var/lib/lxd/shmounts/,
+  mount options=bind /var/lib/lxd/shmounts/** -> /var/lib/lxd/**,
+
+  # FIXME: There doesn't seem to be a way to ask for:
+  # mount options=(ro,nosuid,nodev,noexec,remount,bind),
+  # as we always get mount to $cdir/proc/sys with those flags denied
+  # So allow all mounts until that is straightened out:
+  mount,
+`
+
+const AA_PROFILE_UNPRIVILEGED = `
+  pivot_root,
+
+  # Allow modifying mount propagation
+  mount options=(rw,slave) -> **,
+  mount options=(rw,rslave) -> **,
+  mount options=(rw,shared) -> **,
+  mount options=(rw,rshared) -> **,
+  mount options=(rw,private) -> **,
+  mount options=(rw,rprivate) -> **,
+  mount options=(rw,unbindable) -> **,
+  mount options=(rw,runbindable) -> **,
+
+  # Allow all bind-mounts
+  mount options=(rw,bind) / -> /**,
+  mount options=(rw,bind) /** -> /**,
+  mount options=(rw,rbind) / -> /**,
+  mount options=(rw,rbind) /** -> /**,
+
+  # Allow common combinations of bind/remount
+  # NOTE: AppArmor bug effectively turns those into wildcards mount allow
+  mount options=(ro,remount,bind),
+  mount options=(ro,remount,bind,nodev),
+  mount options=(ro,remount,bind,nodev,nosuid),
+  mount options=(ro,remount,bind,noexec),
+  mount options=(ro,remount,bind,noexec,nodev),
+  mount options=(ro,remount,bind,nosuid),
+  mount options=(ro,remount,bind,nosuid,nodev),
+  mount options=(ro,remount,bind,nosuid,noexec),
+  mount options=(ro,remount,bind,nosuid,noexec,nodev),
+  mount options=(ro,remount,bind,nosuid,noexec,strictatime),
+
+  # Allow remounting things read-only
+  mount options=(ro,remount) /,
+  mount options=(ro,remount) /**,
+`
+
+func mkApparmorName(name string) string {
+	if len(name)+7 >= 253 {
+		hash := sha256.New()
+		io.WriteString(hash, name)
+		return fmt.Sprintf("%x", hash.Sum(nil))
+	}
+
+	return name
+}
+
+func AANamespace(c *ContainerLXC) string {
+	/* / is not allowed in apparmor namespace names; let's also trim the
+	 * leading / so it doesn't look like "-var-lib-lxd"
+	 */
+	lxddir := strings.Replace(strings.Trim(shared.VarPath(""), "/"), "/", "-", -1)
+	lxddir = mkApparmorName(lxddir)
+	name := project.Prefix(c.Project(), c.Name())
+	return fmt.Sprintf("lxd-%s_<%s>", name, lxddir)
+}
+
+func AAProfileFull(c *ContainerLXC) string {
+	lxddir := shared.VarPath("")
+	lxddir = mkApparmorName(lxddir)
+	name := project.Prefix(c.Project(), c.Name())
+	return fmt.Sprintf("lxd-%s_<%s>", name, lxddir)
+}
+
+func AAProfileShort(c *ContainerLXC) string {
+	name := project.Prefix(c.Project(), c.Name())
+	return fmt.Sprintf("lxd-%s", name)
+}
+
+// getProfileContent generates the apparmor profile template from the given
+// container. This includes the stock lxc includes as well as stuff from
+// raw.apparmor.
+func getAAProfileContent(c *ContainerLXC) string {
+	profile := strings.TrimLeft(AA_PROFILE_BASE, "\n")
+
+	// Apply new features
+	if aaParserSupports("unix") {
+		profile += `
+  ### Feature: unix
+  # Allow receive via unix sockets from anywhere
+  unix (receive),
+
+  # Allow all unix in the container
+  unix peer=(label=@{profile_name}),
+`
+	}
+
+	// Apply cgns bits
+	if shared.PathExists("/proc/self/ns/cgroup") {
+		profile += "\n  ### Feature: cgroup namespace\n"
+		profile += "  mount fstype=cgroup -> /sys/fs/cgroup/**,\n"
+		profile += "  mount fstype=cgroup2 -> /sys/fs/cgroup/**,\n"
+	}
+
+	state := c.DaemonState()
+	if state.OS.AppArmorStacking && !state.OS.AppArmorStacked {
+		profile += "\n  ### Feature: apparmor stacking\n"
+		profile += `  ### Configuration: apparmor profile loading (in namespace)
+  deny /sys/k[^e]*{,/**} wklx,
+  deny /sys/ke[^r]*{,/**} wklx,
+  deny /sys/ker[^n]*{,/**} wklx,
+  deny /sys/kern[^e]*{,/**} wklx,
+  deny /sys/kerne[^l]*{,/**} wklx,
+  deny /sys/kernel/[^s]*{,/**} wklx,
+  deny /sys/kernel/s[^e]*{,/**} wklx,
+  deny /sys/kernel/se[^c]*{,/**} wklx,
+  deny /sys/kernel/sec[^u]*{,/**} wklx,
+  deny /sys/kernel/secu[^r]*{,/**} wklx,
+  deny /sys/kernel/secur[^i]*{,/**} wklx,
+  deny /sys/kernel/securi[^t]*{,/**} wklx,
+  deny /sys/kernel/securit[^y]*{,/**} wklx,
+  deny /sys/kernel/security/[^a]*{,/**} wklx,
+  deny /sys/kernel/security/a[^p]*{,/**} wklx,
+  deny /sys/kernel/security/ap[^p]*{,/**} wklx,
+  deny /sys/kernel/security/app[^a]*{,/**} wklx,
+  deny /sys/kernel/security/appa[^r]*{,/**} wklx,
+  deny /sys/kernel/security/appar[^m]*{,/**} wklx,
+  deny /sys/kernel/security/apparm[^o]*{,/**} wklx,
+  deny /sys/kernel/security/apparmo[^r]*{,/**} wklx,
+  deny /sys/kernel/security/apparmor?*{,/**} wklx,
+  deny /sys/kernel/security?*{,/**} wklx,
+  deny /sys/kernel?*{,/**} wklx,
+`
+		profile += fmt.Sprintf("  change_profile -> \":%s:*\",\n", AANamespace(c))
+		profile += fmt.Sprintf("  change_profile -> \":%s://*\",\n", AANamespace(c))
+	} else {
+		profile += "\n  ### Feature: apparmor stacking (not present)\n"
+		profile += "  deny /sys/k*{,/**} wklx,\n"
+	}
+
+	if c.IsNesting() {
+		// Apply nesting bits
+		profile += "\n  ### Configuration: nesting\n"
+		profile += strings.TrimLeft(AA_PROFILE_NESTING, "\n")
+		if !state.OS.AppArmorStacking || state.OS.AppArmorStacked {
+			profile += fmt.Sprintf("  change_profile -> \"%s\",\n", AAProfileFull(c))
+		}
+	}
+
+	if !c.IsPrivileged() || state.OS.RunningInUserNS {
+		// Apply unprivileged bits
+		profile += "\n  ### Configuration: unprivileged containers\n"
+		profile += strings.TrimLeft(AA_PROFILE_UNPRIVILEGED, "\n")
+	}
+
+	// Append raw.apparmor
+	rawApparmor, ok := c.ExpandedConfig()["raw.apparmor"]
+	if ok {
+		profile += "\n  ### Configuration: raw.apparmor\n"
+		for _, line := range strings.Split(strings.Trim(rawApparmor, "\n"), "\n") {
+			profile += fmt.Sprintf("  %s\n", line)
+		}
+	}
+
+	return fmt.Sprintf(`#include <tunables/global>
+profile "%s" flags=(attach_disconnected,mediate_deleted) {
+%s
+}
+`, AAProfileFull(c), strings.Trim(profile, "\n"))
+}
+
+func runApparmor(command string, c *ContainerLXC) error {
+	state := c.DaemonState()
+	if !state.OS.AppArmorAvailable {
+		return nil
+	}
+
+	output, err := shared.RunCommand("apparmor_parser", []string{
+		fmt.Sprintf("-%sWL", command),
+		path.Join(aaPath, "cache"),
+		path.Join(aaPath, "profiles", AAProfileShort(c)),
+	}...)
+
+	if err != nil {
+		logger.Error("Running apparmor",
+			log.Ctx{"action": command, "output": output, "err": err})
+	}
+
+	return err
+}
+
+func getAACacheDir() string {
+	basePath := path.Join(aaPath, "cache")
+
+	major, minor, _, err := getAAParserVersion()
+	if err != nil {
+		return basePath
+	}
+
+	// multiple policy cache directories were only added in v2.13
+	if major < 2 || (major == 2 && minor < 13) {
+		return basePath
+	}
+
+	output, err := shared.RunCommand("apparmor_parser", "-L", basePath, "--print-cache-dir")
+	if err != nil {
+		return basePath
+	}
+
+	return strings.TrimSpace(output)
+}
+
+func mkApparmorNamespace(c *ContainerLXC, namespace string) error {
+	state := c.DaemonState()
+	if !state.OS.AppArmorStacking || state.OS.AppArmorStacked {
+		return nil
+	}
+
+	p := path.Join("/sys/kernel/security/apparmor/policy/namespaces", namespace)
+	if err := os.Mkdir(p, 0755); !os.IsExist(err) {
+		return err
+	}
+
+	return nil
+}
+
+// Ensure that the container's policy is loaded into the kernel so the
+// container can boot.
+func AALoadProfile(c *ContainerLXC) error {
+	state := c.DaemonState()
+	if !state.OS.AppArmorAdmin {
+		return nil
+	}
+
+	if err := mkApparmorNamespace(c, AANamespace(c)); err != nil {
+		return err
+	}
+
+	/* In order to avoid forcing a profile parse (potentially slow) on
+	 * every container start, let's use apparmor's binary policy cache,
+	 * which checks mtime of the files to figure out if the policy needs to
+	 * be regenerated.
+	 *
+	 * Since it uses mtimes, we shouldn't just always write out our local
+	 * apparmor template; instead we should check to see whether the
+	 * template is the same as ours. If it isn't we should write our
+	 * version out so that the new changes are reflected and we definitely
+	 * force a recompile.
+	 */
+	profile := path.Join(aaPath, "profiles", AAProfileShort(c))
+	content, err := ioutil.ReadFile(profile)
+	if err != nil && !os.IsNotExist(err) {
+		return err
+	}
+
+	updated := getAAProfileContent(c)
+
+	if string(content) != string(updated) {
+		if err := os.MkdirAll(path.Join(aaPath, "cache"), 0700); err != nil {
+			return err
+		}
+
+		if err := os.MkdirAll(path.Join(aaPath, "profiles"), 0700); err != nil {
+			return err
+		}
+
+		if err := ioutil.WriteFile(profile, []byte(updated), 0600); err != nil {
+			return err
+		}
+	}
+
+	return runApparmor(APPARMOR_CMD_LOAD, c)
+}
+
+// Ensure that the container's policy namespace is unloaded to free kernel
+// memory. This does not delete the policy from disk or cache.
+func AADestroy(c *ContainerLXC) error {
+	state := c.DaemonState()
+	if !state.OS.AppArmorAdmin {
+		return nil
+	}
+
+	if state.OS.AppArmorStacking && !state.OS.AppArmorStacked {
+		p := path.Join("/sys/kernel/security/apparmor/policy/namespaces", AANamespace(c))
+		if err := os.Remove(p); err != nil {
+			logger.Error("Error removing apparmor namespace", log.Ctx{"err": err, "ns": p})
+		}
+	}
+
+	return runApparmor(APPARMOR_CMD_UNLOAD, c)
+}
+
+// Parse the profile without loading it into the kernel.
+func AAParseProfile(c *ContainerLXC) error {
+	state := c.DaemonState()
+	if !state.OS.AppArmorAvailable {
+		return nil
+	}
+
+	return runApparmor(APPARMOR_CMD_PARSE, c)
+}
+
+// Delete the policy from cache/disk.
+func AADeleteProfile(c *ContainerLXC) {
+	state := c.DaemonState()
+	if !state.OS.AppArmorAdmin {
+		return
+	}
+
+	/* It's ok if these deletes fail: if the container was never started,
+	 * we'll have never written a profile or cached it.
+	 */
+	os.Remove(path.Join(getAACacheDir(), AAProfileShort(c)))
+	os.Remove(path.Join(aaPath, "profiles", AAProfileShort(c)))
+}
+
+func aaParserSupports(feature string) bool {
+	major, minor, micro, err := getAAParserVersion()
+	if err != nil {
+		return false
+	}
+
+	switch feature {
+	case "unix":
+		if major < 2 {
+			return false
+		}
+
+		if major == 2 && minor < 10 {
+			return false
+		}
+
+		if major == 2 && minor == 10 && micro < 95 {
+			return false
+		}
+	}
+
+	return true
+}
+
+func getAAParserVersion() (major int, minor int, micro int, err error) {
+	var out string
+
+	out, err = shared.RunCommand("apparmor_parser", "--version")
+	if err != nil {
+		return
+	}
+
+	_, err = fmt.Sscanf(strings.Split(out, "\n")[0], "AppArmor parser version %d.%d.%d", &major, &minor, &micro)
+
+	return
+}

From 72ec3a098d03e8377f64810ce7677bbac9b00d7f Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Wed, 25 Sep 2019 11:07:20 +0100
Subject: [PATCH 10/72] lxd/instance/instance/backup: Adds backup types to
 instance package

So they can be used outside of main package.

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/instance/instance_backup.go | 148 ++++++++++++++++++++++++++++++++
 1 file changed, 148 insertions(+)
 create mode 100644 lxd/instance/instance_backup.go

diff --git a/lxd/instance/instance_backup.go b/lxd/instance/instance_backup.go
new file mode 100644
index 0000000000..12aaaac041
--- /dev/null
+++ b/lxd/instance/instance_backup.go
@@ -0,0 +1,148 @@
+package instance
+
+import (
+	"os"
+	"strings"
+	"time"
+
+	"github.com/pkg/errors"
+
+	"github.com/lxc/lxd/lxd/state"
+	"github.com/lxc/lxd/shared"
+	"github.com/lxc/lxd/shared/api"
+)
+
+// Backup represents a container backup.
+type Backup struct {
+	state    *state.State
+	Instance Instance
+
+	// Properties.
+	id               int
+	Name             string
+	creationDate     time.Time
+	expiryDate       time.Time
+	InstanceOnly     bool
+	OptimizedStorage bool
+}
+
+// Rename renames a container backup
+func (b *Backup) Rename(newName string) error {
+	oldBackupPath := shared.VarPath("backups", b.Name)
+	newBackupPath := shared.VarPath("backups", newName)
+
+	// Create the new backup path
+	backupsPath := shared.VarPath("backups", b.Instance.Name())
+	if !shared.PathExists(backupsPath) {
+		err := os.MkdirAll(backupsPath, 0700)
+		if err != nil {
+			return err
+		}
+	}
+
+	// Rename the backup directory
+	err := os.Rename(oldBackupPath, newBackupPath)
+	if err != nil {
+		return err
+	}
+
+	// Check if we can remove the container directory
+	empty, _ := shared.PathIsEmpty(backupsPath)
+	if empty {
+		err := os.Remove(backupsPath)
+		if err != nil {
+			return err
+		}
+	}
+
+	// Rename the database record
+	err = b.state.Cluster.ContainerBackupRename(b.Name, newName)
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
+// Delete removes an instance backup
+func (b *Backup) Delete() error {
+	return DoBackupDelete(b.state, b.Name, b.Instance.Name())
+}
+
+func (b *Backup) Render() *api.InstanceBackup {
+	return &api.InstanceBackup{
+		Name:             strings.SplitN(b.Name, "/", 2)[1],
+		CreatedAt:        b.creationDate,
+		ExpiresAt:        b.expiryDate,
+		InstanceOnly:     b.InstanceOnly,
+		ContainerOnly:    b.InstanceOnly,
+		OptimizedStorage: b.OptimizedStorage,
+	}
+}
+
+type BackupInfo struct {
+	Project         string   `json:"project" yaml:"project"`
+	Name            string   `json:"name" yaml:"name"`
+	Backend         string   `json:"backend" yaml:"backend"`
+	Privileged      bool     `json:"privileged" yaml:"privileged"`
+	Pool            string   `json:"pool" yaml:"pool"`
+	Snapshots       []string `json:"snapshots,omitempty" yaml:"snapshots,omitempty"`
+	HasBinaryFormat bool     `json:"-" yaml:"-"`
+}
+
+// Load a backup from the database
+func BackupLoadByName(s *state.State, project, name string) (*Backup, error) {
+	// Get the backup database record
+	args, err := s.Cluster.ContainerGetBackup(project, name)
+	if err != nil {
+		return nil, errors.Wrap(err, "Load backup from database")
+	}
+
+	// Load the instance it belongs to
+	instance, err := InstanceLoadById(s, args.ContainerID)
+	if err != nil {
+		return nil, errors.Wrap(err, "Load container from database")
+	}
+
+	// Return the backup struct
+	return &Backup{
+		state:            s,
+		Instance:         instance,
+		id:               args.ID,
+		Name:             name,
+		creationDate:     args.CreationDate,
+		expiryDate:       args.ExpiryDate,
+		InstanceOnly:     args.InstanceOnly,
+		OptimizedStorage: args.OptimizedStorage,
+	}, nil
+}
+
+func DoBackupDelete(s *state.State, backupName, containerName string) error {
+	backupPath := shared.VarPath("backups", backupName)
+
+	// Delete the on-disk data
+	if shared.PathExists(backupPath) {
+		err := os.RemoveAll(backupPath)
+		if err != nil {
+			return err
+		}
+	}
+
+	// Check if we can remove the container directory
+	backupsPath := shared.VarPath("backups", containerName)
+	empty, _ := shared.PathIsEmpty(backupsPath)
+	if empty {
+		err := os.Remove(backupsPath)
+		if err != nil {
+			return err
+		}
+	}
+
+	// Remove the database record
+	err := s.Cluster.ContainerBackupRemove(backupName)
+	if err != nil {
+		return err
+	}
+
+	return nil
+}

From 21d2b5c14fafc4271f2be44dbdd3632c96ed65ba Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Wed, 25 Sep 2019 11:07:56 +0100
Subject: [PATCH 11/72] lxd/instance/instance/container: Adds some container
 util functions to instance package

So they can be used outside of main package.

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/instance/instance_container.go | 175 +++++++++++++++++++++++++++++
 1 file changed, 175 insertions(+)
 create mode 100644 lxd/instance/instance_container.go

diff --git a/lxd/instance/instance_container.go b/lxd/instance/instance_container.go
new file mode 100644
index 0000000000..204b0ac0ff
--- /dev/null
+++ b/lxd/instance/instance_container.go
@@ -0,0 +1,175 @@
+package instance
+
+import (
+	"encoding/json"
+	"fmt"
+	"os"
+	"strings"
+
+	"github.com/pkg/errors"
+
+	"github.com/lxc/lxd/lxd/db"
+	"github.com/lxc/lxd/lxd/device"
+	"github.com/lxc/lxd/lxd/device/config"
+	"github.com/lxc/lxd/lxd/state"
+	"github.com/lxc/lxd/lxd/sys"
+	"github.com/lxc/lxd/shared"
+	"github.com/lxc/lxd/shared/idmap"
+	"github.com/lxc/lxd/shared/osarch"
+)
+
+func ContainerValidConfig(sysOS *sys.OS, config map[string]string, profile bool, expanded bool) error {
+	if config == nil {
+		return nil
+	}
+
+	for k, v := range config {
+		if profile && strings.HasPrefix(k, "volatile.") {
+			return fmt.Errorf("Volatile keys can only be set on containers")
+		}
+
+		if profile && strings.HasPrefix(k, "image.") {
+			return fmt.Errorf("Image keys can only be set on containers")
+		}
+
+		err := containerValidConfigKey(sysOS, k, v)
+		if err != nil {
+			return err
+		}
+	}
+
+	_, rawSeccomp := config["raw.seccomp"]
+	_, whitelist := config["security.syscalls.whitelist"]
+	_, blacklist := config["security.syscalls.blacklist"]
+	blacklistDefault := shared.IsTrue(config["security.syscalls.blacklist_default"])
+	blacklistCompat := shared.IsTrue(config["security.syscalls.blacklist_compat"])
+
+	if rawSeccomp && (whitelist || blacklist || blacklistDefault || blacklistCompat) {
+		return fmt.Errorf("raw.seccomp is mutually exclusive with security.syscalls*")
+	}
+
+	if whitelist && (blacklist || blacklistDefault || blacklistCompat) {
+		return fmt.Errorf("security.syscalls.whitelist is mutually exclusive with security.syscalls.blacklist*")
+	}
+
+	if expanded && (config["security.privileged"] == "" || !shared.IsTrue(config["security.privileged"])) && sysOS.IdmapSet == nil {
+		return fmt.Errorf("LXD doesn't have a uid/gid allocation. In this mode, only privileged containers are supported")
+	}
+
+	unprivOnly := os.Getenv("LXD_UNPRIVILEGED_ONLY")
+	if shared.IsTrue(unprivOnly) {
+		if config["raw.idmap"] != "" {
+			err := allowedUnprivilegedOnlyMap(config["raw.idmap"])
+			if err != nil {
+				return err
+			}
+		}
+
+		if shared.IsTrue(config["security.privileged"]) {
+			return fmt.Errorf("LXD was configured to only allow unprivileged containers")
+		}
+	}
+
+	return nil
+}
+
+func containerValidConfigKey(os *sys.OS, key string, value string) error {
+	f, err := shared.ConfigKeyChecker(key)
+	if err != nil {
+		return err
+	}
+	if err = f(value); err != nil {
+		return err
+	}
+	if key == "raw.lxc" {
+		return lxcValidConfig(value)
+	}
+	if key == "security.syscalls.blacklist_compat" {
+		for _, arch := range os.Architectures {
+			if arch == osarch.ARCH_64BIT_INTEL_X86 ||
+				arch == osarch.ARCH_64BIT_ARMV8_LITTLE_ENDIAN ||
+				arch == osarch.ARCH_64BIT_POWERPC_BIG_ENDIAN {
+				return nil
+			}
+		}
+		return fmt.Errorf("security.syscalls.blacklist_compat isn't supported on this architecture")
+	}
+	return nil
+}
+
+func allowedUnprivilegedOnlyMap(rawIdmap string) error {
+	rawMaps, err := parseRawIdmap(rawIdmap)
+	if err != nil {
+		return err
+	}
+
+	for _, ent := range rawMaps {
+		if ent.Hostid == 0 {
+			return fmt.Errorf("Cannot map root user into container as LXD was configured to only allow unprivileged containers")
+		}
+	}
+
+	return nil
+}
+
+// ContainerValidDevices validate container device configs.
+func ContainerValidDevices(state *state.State, cluster *db.Cluster, instanceName string, devices config.Devices, expanded bool) error {
+	// Empty device list
+	if devices == nil {
+		return nil
+	}
+
+	// Create a temporary ContainerLXC struct to use as an Instance in device validation.
+	// Populate it's name, localDevices and expandedDevices properties based on the mode of
+	// validation occurring. In non-expanded validation expensive checks should be avoided.
+	instance := &ContainerLXC{
+		name:         instanceName,
+		localDevices: devices.Clone(), // Prevent devices from modifying their config.
+	}
+
+	if expanded {
+		instance.expandedDevices = instance.localDevices // Avoid another clone.
+	}
+
+	// Check each device individually using the device package.
+	for name, config := range devices {
+		_, err := device.New(instance, state, name, config, nil, nil)
+		if err != nil {
+			return err
+		}
+
+	}
+
+	// Check we have a root disk if in expanded validation mode.
+	if expanded {
+		_, _, err := shared.GetRootDiskDevice(devices.CloneNative())
+		if err != nil {
+			return errors.Wrap(err, "Detect root disk device")
+		}
+	}
+
+	return nil
+}
+
+func IDMapsetFromString(idmapString string) (*idmap.IdmapSet, error) {
+	lastIdmap := new(idmap.IdmapSet)
+	err := json.Unmarshal([]byte(idmapString), &lastIdmap.Idmap)
+	if err != nil {
+		return nil, err
+	}
+
+	if len(lastIdmap.Idmap) == 0 {
+		return nil, nil
+	}
+
+	return lastIdmap, nil
+}
+
+func IDMapsetToJSON(idmapSet *idmap.IdmapSet) (string, error) {
+	idmapBytes, err := json.Marshal(idmapSet.Idmap)
+	if err != nil {
+		return "", err
+	}
+
+	return string(idmapBytes), nil
+}

From eda5fa3d4304764dec5ef39e4926fe54d618425d Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Wed, 25 Sep 2019 11:08:45 +0100
Subject: [PATCH 12/72] lxd/instance/instance/devices: Adds device register
 function to instance package

So it can be used outside of main package.

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/instance/instance_devices.go | 49 ++++++++++++++++++++++++++++++++
 1 file changed, 49 insertions(+)
 create mode 100644 lxd/instance/instance_devices.go

diff --git a/lxd/instance/instance_devices.go b/lxd/instance/instance_devices.go
new file mode 100644
index 0000000000..1a1027c668
--- /dev/null
+++ b/lxd/instance/instance_devices.go
@@ -0,0 +1,49 @@
+package instance
+
+import (
+	"github.com/lxc/lxd/lxd/device"
+	"github.com/lxc/lxd/lxd/state"
+	log "github.com/lxc/lxd/shared/log15"
+	"github.com/lxc/lxd/shared/logger"
+)
+
+// DevicesRegister calls the Register() function on all supported devices so they receive events.
+func DevicesRegister(s *state.State) {
+	instances, err := InstanceLoadNodeAll(s)
+	if err != nil {
+		logger.Error("Problem loading containers list", log.Ctx{"err": err})
+		return
+	}
+
+	for _, instanceIf := range instances {
+		c, ok := instanceIf.(*ContainerLXC)
+		if !ok {
+			logger.Errorf("Instance is not container type")
+			continue
+		}
+
+		if !c.IsRunning() {
+			continue
+		}
+
+		devices := c.ExpandedDevices()
+		for _, dev := range devices.Sorted() {
+			d, _, err := c.deviceLoad(dev.Name, dev.Config)
+			if err == device.ErrUnsupportedDevType {
+				continue
+			}
+
+			if err != nil {
+				logger.Error("Failed to load device to register", log.Ctx{"err": err, "container": c.Name(), "device": dev.Name})
+				continue
+			}
+
+			// Check whether device wants to register for any events.
+			err = d.Register()
+			if err != nil {
+				logger.Error("Failed to register device", log.Ctx{"err": err, "container": c.Name(), "device": dev.Name})
+				continue
+			}
+		}
+	}
+}

From 0dde3495bff8307e7e5f46907e3558838a492bda Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Wed, 25 Sep 2019 11:09:25 +0100
Subject: [PATCH 13/72] lxd/instance/container/lxc: Moves containerLXC to
 instance package

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/instance/container_lxc.go | 7038 +++++++++++++++++++++++++++++++++
 1 file changed, 7038 insertions(+)
 create mode 100644 lxd/instance/container_lxc.go

diff --git a/lxd/instance/container_lxc.go b/lxd/instance/container_lxc.go
new file mode 100644
index 0000000000..7194b0cc10
--- /dev/null
+++ b/lxd/instance/container_lxc.go
@@ -0,0 +1,7038 @@
+package instance
+
+import (
+	"bufio"
+	"encoding/json"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"net"
+	"os"
+	"os/exec"
+	"path"
+	"path/filepath"
+	"runtime"
+	"sort"
+	"strconv"
+	"strings"
+	"sync"
+	"syscall"
+	"time"
+
+	"github.com/flosch/pongo2"
+	"github.com/pkg/errors"
+	"golang.org/x/sys/unix"
+	lxc "gopkg.in/lxc/go-lxc.v2"
+	yaml "gopkg.in/yaml.v2"
+
+	"github.com/lxc/lxd/lxd/cluster"
+	"github.com/lxc/lxd/lxd/daemon"
+	"github.com/lxc/lxd/lxd/db"
+	"github.com/lxc/lxd/lxd/db/query"
+	"github.com/lxc/lxd/lxd/device"
+	"github.com/lxc/lxd/lxd/device/config"
+	"github.com/lxc/lxd/lxd/instance/instancetype"
+	"github.com/lxc/lxd/lxd/maas"
+	"github.com/lxc/lxd/lxd/operation"
+	"github.com/lxc/lxd/lxd/project"
+	"github.com/lxc/lxd/lxd/state"
+	"github.com/lxc/lxd/lxd/storage"
+	driver "github.com/lxc/lxd/lxd/storage"
+	"github.com/lxc/lxd/lxd/template"
+	"github.com/lxc/lxd/lxd/util"
+	"github.com/lxc/lxd/shared"
+	"github.com/lxc/lxd/shared/api"
+	"github.com/lxc/lxd/shared/containerwriter"
+	"github.com/lxc/lxd/shared/idmap"
+	"github.com/lxc/lxd/shared/logger"
+	"github.com/lxc/lxd/shared/netutils"
+	"github.com/lxc/lxd/shared/osarch"
+	"github.com/lxc/lxd/shared/units"
+
+	log "github.com/lxc/lxd/shared/log15"
+)
+
+// Operation locking
+type lxcContainerOperation struct {
+	action    string
+	chanDone  chan error
+	chanReset chan bool
+	err       error
+	id        int
+	reusable  bool
+}
+
+func (op *lxcContainerOperation) Create(id int, action string, reusable bool) *lxcContainerOperation {
+	op.id = id
+	op.action = action
+	op.reusable = reusable
+	op.chanDone = make(chan error, 0)
+	op.chanReset = make(chan bool, 0)
+
+	go func(op *lxcContainerOperation) {
+		for {
+			select {
+			case <-op.chanReset:
+				continue
+			case <-time.After(time.Second * 30):
+				op.Done(fmt.Errorf("Container %s operation timed out after 30 seconds", op.action))
+				return
+			}
+		}
+	}(op)
+
+	return op
+}
+
+func (op *lxcContainerOperation) Reset() error {
+	if !op.reusable {
+		return fmt.Errorf("Can't reset a non-reusable operation")
+	}
+
+	op.chanReset <- true
+	return nil
+}
+
+func (op *lxcContainerOperation) Wait() error {
+	<-op.chanDone
+
+	return op.err
+}
+
+func (op *lxcContainerOperation) Done(err error) {
+	lxcContainerOperationsLock.Lock()
+	defer lxcContainerOperationsLock.Unlock()
+
+	// Check if already done
+	runningOp, ok := lxcContainerOperations[op.id]
+	if !ok || runningOp != op {
+		return
+	}
+
+	op.err = err
+	close(op.chanDone)
+
+	delete(lxcContainerOperations, op.id)
+}
+
+var lxcContainerOperationsLock sync.Mutex
+var lxcContainerOperations map[int]*lxcContainerOperation = make(map[int]*lxcContainerOperation)
+
+// Helper functions
+func lxcSetConfigItem(c *lxc.Container, key string, value string) error {
+	if c == nil {
+		return fmt.Errorf("Uninitialized go-lxc struct")
+	}
+
+	if !util.RuntimeLiblxcVersionAtLeast(2, 1, 0) {
+		switch key {
+		case "lxc.uts.name":
+			key = "lxc.utsname"
+		case "lxc.pty.max":
+			key = "lxc.pts"
+		case "lxc.tty.dir":
+			key = "lxc.devttydir"
+		case "lxc.tty.max":
+			key = "lxc.tty"
+		case "lxc.apparmor.profile":
+			key = "lxc.aa_profile"
+		case "lxc.apparmor.allow_incomplete":
+			key = "lxc.aa_allow_incomplete"
+		case "lxc.selinux.context":
+			key = "lxc.se_context"
+		case "lxc.mount.fstab":
+			key = "lxc.mount"
+		case "lxc.console.path":
+			key = "lxc.console"
+		case "lxc.seccomp.profile":
+			key = "lxc.seccomp"
+		case "lxc.signal.halt":
+			key = "lxc.haltsignal"
+		case "lxc.signal.reboot":
+			key = "lxc.rebootsignal"
+		case "lxc.signal.stop":
+			key = "lxc.stopsignal"
+		case "lxc.log.syslog":
+			key = "lxc.syslog"
+		case "lxc.log.level":
+			key = "lxc.loglevel"
+		case "lxc.log.file":
+			key = "lxc.logfile"
+		case "lxc.init.cmd":
+			key = "lxc.init_cmd"
+		case "lxc.init.uid":
+			key = "lxc.init_uid"
+		case "lxc.init.gid":
+			key = "lxc.init_gid"
+		case "lxc.idmap":
+			key = "lxc.id_map"
+		}
+	}
+
+	if strings.HasPrefix(key, "lxc.prlimit.") {
+		if !util.RuntimeLiblxcVersionAtLeast(2, 1, 0) {
+			return fmt.Errorf(`Process limits require liblxc >= 2.1`)
+		}
+	}
+
+	err := c.SetConfigItem(key, value)
+	if err != nil {
+		return fmt.Errorf("Failed to set LXC config: %s=%s", key, value)
+	}
+
+	return nil
+}
+
+func lxcParseRawLXC(line string) (string, string, error) {
+	// Ignore empty lines
+	if len(line) == 0 {
+		return "", "", nil
+	}
+
+	// Skip whitespace {"\t", " "}
+	line = strings.TrimLeft(line, "\t ")
+
+	// Ignore comments
+	if strings.HasPrefix(line, "#") {
+		return "", "", nil
+	}
+
+	// Ensure the format is valid
+	membs := strings.SplitN(line, "=", 2)
+	if len(membs) != 2 {
+		return "", "", fmt.Errorf("Invalid raw.lxc line: %s", line)
+	}
+
+	key := strings.ToLower(strings.Trim(membs[0], " \t"))
+	val := strings.Trim(membs[1], " \t")
+	return key, val, nil
+}
+
+func lxcSupportSeccompNotify(state *state.State) bool {
+	if !state.OS.SeccompListener {
+		return false
+	}
+
+	if !state.OS.LXCFeatures["seccomp_notify"] {
+		return false
+	}
+
+	c, err := lxc.NewContainer("test-seccomp", state.OS.LxcPath)
+	if err != nil {
+		return false
+	}
+
+	err = c.SetConfigItem("lxc.seccomp.notify.proxy", fmt.Sprintf("unix:%s", shared.VarPath("seccomp.socket")))
+	if err != nil {
+		return false
+	}
+
+	c.Release()
+	return true
+}
+
+func lxcValidConfig(rawLxc string) error {
+	for _, line := range strings.Split(rawLxc, "\n") {
+		key, _, err := lxcParseRawLXC(line)
+		if err != nil {
+			return err
+		}
+
+		if key == "" {
+			continue
+		}
+
+		unprivOnly := os.Getenv("LXD_UNPRIVILEGED_ONLY")
+		if shared.IsTrue(unprivOnly) {
+			if key == "lxc.idmap" || key == "lxc.id_map" || key == "lxc.include" {
+				return fmt.Errorf("%s can't be set in raw.lxc as LXD was configured to only allow unprivileged containers", key)
+			}
+		}
+
+		// Blacklist some keys
+		if key == "lxc.logfile" || key == "lxc.log.file" {
+			return fmt.Errorf("Setting lxc.logfile is not allowed")
+		}
+
+		if key == "lxc.syslog" || key == "lxc.log.syslog" {
+			return fmt.Errorf("Setting lxc.log.syslog is not allowed")
+		}
+
+		if key == "lxc.ephemeral" {
+			return fmt.Errorf("Setting lxc.ephemeral is not allowed")
+		}
+
+		if strings.HasPrefix(key, "lxc.prlimit.") {
+			return fmt.Errorf(`Process limits should be set via ` +
+				`"limits.kernel.[limit name]" and not ` +
+				`directly via "lxc.prlimit.[limit name]"`)
+		}
+
+		networkKeyPrefix := "lxc.net."
+		if !util.RuntimeLiblxcVersionAtLeast(2, 1, 0) {
+			networkKeyPrefix = "lxc.network."
+		}
+
+		if strings.HasPrefix(key, networkKeyPrefix) {
+			fields := strings.Split(key, ".")
+
+			if !util.RuntimeLiblxcVersionAtLeast(2, 1, 0) {
+				// lxc.network.X.ipv4 or lxc.network.X.ipv6
+				if len(fields) == 4 && shared.StringInSlice(fields[3], []string{"ipv4", "ipv6"}) {
+					continue
+				}
+
+				// lxc.network.X.ipv4.gateway or lxc.network.X.ipv6.gateway
+				if len(fields) == 5 && shared.StringInSlice(fields[3], []string{"ipv4", "ipv6"}) && fields[4] == "gateway" {
+					continue
+				}
+			} else {
+				// lxc.net.X.ipv4.address or lxc.net.X.ipv6.address
+				if len(fields) == 5 && shared.StringInSlice(fields[3], []string{"ipv4", "ipv6"}) && fields[4] == "address" {
+					continue
+				}
+
+				// lxc.net.X.ipv4.gateway or lxc.net.X.ipv6.gateway
+				if len(fields) == 5 && shared.StringInSlice(fields[3], []string{"ipv4", "ipv6"}) && fields[4] == "gateway" {
+					continue
+				}
+			}
+
+			return fmt.Errorf("Only interface-specific ipv4/ipv6 %s keys are allowed", networkKeyPrefix)
+		}
+	}
+
+	return nil
+}
+
+func lxcStatusCode(state lxc.State) api.StatusCode {
+	return map[int]api.StatusCode{
+		1: api.Stopped,
+		2: api.Starting,
+		3: api.Running,
+		4: api.Stopping,
+		5: api.Aborting,
+		6: api.Freezing,
+		7: api.Frozen,
+		8: api.Thawed,
+		9: api.Error,
+	}[int(state)]
+}
+
+// Loader functions
+func ContainerLXCCreate(s *state.State, args db.ContainerArgs) (*ContainerLXC, error) {
+	// Create the container struct
+	c := &ContainerLXC{
+		state:        s,
+		id:           args.ID,
+		project:      args.Project,
+		name:         args.Name,
+		node:         args.Node,
+		description:  args.Description,
+		ephemeral:    args.Ephemeral,
+		architecture: args.Architecture,
+		dbType:       args.Type,
+		snapshot:     args.Snapshot,
+		stateful:     args.Stateful,
+		creationDate: args.CreationDate,
+		lastUsedDate: args.LastUsedDate,
+		profiles:     args.Profiles,
+		localConfig:  args.Config,
+		localDevices: args.Devices,
+		expiryDate:   args.ExpiryDate,
+	}
+
+	// Cleanup the zero values
+	if c.expiryDate.IsZero() {
+		c.expiryDate = time.Time{}
+	}
+
+	if c.creationDate.IsZero() {
+		c.creationDate = time.Time{}
+	}
+
+	if c.lastUsedDate.IsZero() {
+		c.lastUsedDate = time.Time{}
+	}
+
+	ctxMap := log.Ctx{
+		"project":   args.Project,
+		"name":      c.Name,
+		"ephemeral": c.ephemeral,
+	}
+
+	logger.Info("Creating container", ctxMap)
+
+	// Load the config
+	err := c.init()
+	if err != nil {
+		c.Delete()
+		logger.Error("Failed creating container", ctxMap)
+		return nil, err
+	}
+
+	// Validate expanded config
+	err = ContainerValidConfig(s.OS, c.expandedConfig, false, true)
+	if err != nil {
+		c.Delete()
+		logger.Error("Failed creating container", ctxMap)
+		return nil, err
+	}
+
+	err = ContainerValidDevices(s, s.Cluster, c.Name(), c.expandedDevices, true)
+	if err != nil {
+		c.Delete()
+		logger.Error("Failed creating container", ctxMap)
+		return nil, errors.Wrap(err, "Invalid devices")
+	}
+
+	// Retrieve the container's storage pool
+	_, rootDiskDevice, err := shared.GetRootDiskDevice(c.expandedDevices.CloneNative())
+	if err != nil {
+		c.Delete()
+		return nil, err
+	}
+
+	if rootDiskDevice["pool"] == "" {
+		c.Delete()
+		return nil, fmt.Errorf("The container's root device is missing the pool property")
+	}
+
+	storagePool := rootDiskDevice["pool"]
+
+	// Get the storage pool ID for the container
+	poolID, pool, err := s.Cluster.StoragePoolGet(storagePool)
+	if err != nil {
+		c.Delete()
+		return nil, err
+	}
+
+	// Fill in any default volume config
+	volumeConfig := map[string]string{}
+	err = StorageVolumeFillDefault(storagePool, volumeConfig, pool)
+	if err != nil {
+		c.Delete()
+		return nil, err
+	}
+
+	// Create a new database entry for the container's storage volume
+	_, err = s.Cluster.StoragePoolVolumeCreate(args.Project, args.Name, "", db.StoragePoolVolumeTypeContainer, false, poolID, volumeConfig)
+	if err != nil {
+		c.Delete()
+		return nil, err
+	}
+
+	// Initialize the container storage
+	cStorage, err := StoragePoolVolumeContainerCreateInit(s, args.Project, storagePool, args.Name)
+	if err != nil {
+		c.Delete()
+		s.Cluster.StoragePoolVolumeDelete(args.Project, args.Name, db.StoragePoolVolumeTypeContainer, poolID)
+		logger.Error("Failed to initialize container storage", ctxMap)
+		return nil, err
+	}
+	c.storage = cStorage
+
+	// Setup initial idmap config
+	var idmap *idmap.IdmapSet
+	base := int64(0)
+	if !c.IsPrivileged() {
+		idmap, base, err = findIdmap(
+			s,
+			args.Name,
+			c.expandedConfig["security.idmap.isolated"],
+			c.expandedConfig["security.idmap.base"],
+			c.expandedConfig["security.idmap.size"],
+			c.expandedConfig["raw.idmap"],
+		)
+
+		if err != nil {
+			c.Delete()
+			logger.Error("Failed creating container", ctxMap)
+			return nil, err
+		}
+	}
+
+	var jsonIdmap string
+	if idmap != nil {
+		idmapBytes, err := json.Marshal(idmap.Idmap)
+		if err != nil {
+			c.Delete()
+			logger.Error("Failed creating container", ctxMap)
+			return nil, err
+		}
+		jsonIdmap = string(idmapBytes)
+	} else {
+		jsonIdmap = "[]"
+	}
+
+	err = c.VolatileSet(map[string]string{"volatile.idmap.next": jsonIdmap})
+	if err != nil {
+		c.Delete()
+		logger.Error("Failed creating container", ctxMap)
+		return nil, err
+	}
+
+	err = c.VolatileSet(map[string]string{"volatile.idmap.base": fmt.Sprintf("%v", base)})
+	if err != nil {
+		c.Delete()
+		logger.Error("Failed creating container", ctxMap)
+		return nil, err
+	}
+
+	// Invalid idmap cache
+	c.idmapset = nil
+
+	// Set last_state if not currently set
+	if c.localConfig["volatile.last_state.idmap"] == "" {
+		err = c.VolatileSet(map[string]string{"volatile.last_state.idmap": "[]"})
+		if err != nil {
+			c.Delete()
+			logger.Error("Failed creating container", ctxMap)
+			return nil, err
+		}
+	}
+
+	// Re-run init to update the idmap
+	err = c.init()
+	if err != nil {
+		c.Delete()
+		logger.Error("Failed creating container", ctxMap)
+		return nil, err
+	}
+
+	if !c.IsSnapshot() {
+		// Update MAAS
+		err = c.maasUpdate(nil)
+		if err != nil {
+			c.Delete()
+			logger.Error("Failed creating container", ctxMap)
+			return nil, err
+		}
+
+		// Add devices to container.
+		for k, m := range c.expandedDevices {
+			err = c.deviceAdd(k, m)
+			if err != nil && err != device.ErrUnsupportedDevType {
+				c.Delete()
+				return nil, errors.Wrapf(err, "Failed to add device '%s'", k)
+			}
+		}
+	}
+
+	logger.Info("Created container", ctxMap)
+	EventSendLifecycle(c.project, "container-created",
+		fmt.Sprintf("/1.0/containers/%s", c.name), nil)
+
+	return c, nil
+}
+
+func ContainerLXCLoad(s *state.State, args db.ContainerArgs, profiles []api.Profile) (*ContainerLXC, error) {
+	// Create the container struct
+	c := ContainerLXCInstantiate(s, args)
+
+	// Setup finalizer
+	runtime.SetFinalizer(c, ContainerLXCUnload)
+
+	// Expand config and devices
+	err := c.ExpandConfig(profiles)
+	if err != nil {
+		return nil, err
+	}
+
+	err = c.ExpandDevices(profiles)
+	if err != nil {
+		return nil, err
+	}
+
+	return c, nil
+}
+
+// Unload is called by the garbage collector
+func ContainerLXCUnload(c *ContainerLXC) {
+	runtime.SetFinalizer(c, nil)
+	if c.c != nil {
+		c.c.Release()
+		c.c = nil
+	}
+}
+
+// ContainerLXCInstantiateEmpty create an empty container struct without initializing it, just
+// storing name and project for use with backups.
+func ContainerLXCInstantiateEmpty(name string, project string) *ContainerLXC {
+	c := &ContainerLXC{
+		project: project,
+		name:    name,
+	}
+
+	return c
+}
+
+// Create a container struct without initializing it.
+func ContainerLXCInstantiate(s *state.State, args db.ContainerArgs) *ContainerLXC {
+	c := &ContainerLXC{
+		state:        s,
+		id:           args.ID,
+		project:      args.Project,
+		name:         args.Name,
+		description:  args.Description,
+		ephemeral:    args.Ephemeral,
+		architecture: args.Architecture,
+		dbType:       args.Type,
+		snapshot:     args.Snapshot,
+		creationDate: args.CreationDate,
+		lastUsedDate: args.LastUsedDate,
+		profiles:     args.Profiles,
+		localConfig:  args.Config,
+		localDevices: args.Devices,
+		stateful:     args.Stateful,
+		node:         args.Node,
+		expiryDate:   args.ExpiryDate,
+	}
+
+	// Cleanup the zero values
+	if c.expiryDate.IsZero() {
+		c.expiryDate = time.Time{}
+	}
+
+	if c.creationDate.IsZero() {
+		c.creationDate = time.Time{}
+	}
+
+	if c.lastUsedDate.IsZero() {
+		c.lastUsedDate = time.Time{}
+	}
+
+	return c
+}
+
+// The LXC container driver
+type ContainerLXC struct {
+	// Properties
+	architecture int
+	dbType       instancetype.Type
+	snapshot     bool
+	creationDate time.Time
+	lastUsedDate time.Time
+	ephemeral    bool
+	id           int
+	project      string
+	name         string
+	description  string
+	stateful     bool
+
+	// Config
+	expandedConfig  map[string]string
+	expandedDevices config.Devices
+	fromHook        bool
+	localConfig     map[string]string
+	localDevices    config.Devices
+	profiles        []string
+
+	// Cache
+	c       *lxc.Container
+	cConfig bool
+
+	state    *state.State
+	idmapset *idmap.IdmapSet
+
+	// Storage
+	storage Storage
+
+	// Clustering
+	node string
+
+	// Progress tracking
+	op *operation.Operation
+
+	expiryDate time.Time
+}
+
+func (c *ContainerLXC) Type() instancetype.Type {
+	return c.dbType
+}
+
+func (c *ContainerLXC) createOperation(action string, reusable bool, reuse bool) (*lxcContainerOperation, error) {
+	op, _ := c.getOperation("")
+	if op != nil {
+		if reuse && op.reusable {
+			op.Reset()
+			return op, nil
+		}
+
+		return nil, fmt.Errorf("Container is busy running a %s operation", op.action)
+	}
+
+	lxcContainerOperationsLock.Lock()
+	defer lxcContainerOperationsLock.Unlock()
+
+	op = &lxcContainerOperation{}
+	op.Create(c.id, action, reusable)
+	lxcContainerOperations[c.id] = op
+
+	return lxcContainerOperations[c.id], nil
+}
+
+func (c *ContainerLXC) getOperation(action string) (*lxcContainerOperation, error) {
+	lxcContainerOperationsLock.Lock()
+	defer lxcContainerOperationsLock.Unlock()
+
+	op := lxcContainerOperations[c.id]
+
+	if op == nil {
+		return nil, fmt.Errorf("No running %s container operation", action)
+	}
+
+	if action != "" && op.action != action {
+		return nil, fmt.Errorf("Container is running a %s operation, not a %s operation", op.action, action)
+	}
+
+	return op, nil
+}
+
+func (c *ContainerLXC) waitOperation() error {
+	op, _ := c.getOperation("")
+	if op != nil {
+		err := op.Wait()
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func idmapSize(state *state.State, isolatedStr string, size string) (int64, error) {
+	isolated := false
+	if shared.IsTrue(isolatedStr) {
+		isolated = true
+	}
+
+	var idMapSize int64
+	if size == "" || size == "auto" {
+		if isolated {
+			idMapSize = 65536
+		} else {
+			if len(state.OS.IdmapSet.Idmap) != 2 {
+				return 0, fmt.Errorf("bad initial idmap: %v", state.OS.IdmapSet)
+			}
+
+			idMapSize = state.OS.IdmapSet.Idmap[0].Maprange
+		}
+	} else {
+		size, err := strconv.ParseInt(size, 10, 64)
+		if err != nil {
+			return 0, err
+		}
+
+		idMapSize = size
+	}
+
+	return idMapSize, nil
+}
+
+var idmapLock sync.Mutex
+
+func parseRawIdmap(value string) ([]idmap.IdmapEntry, error) {
+	getRange := func(r string) (int64, int64, error) {
+		entries := strings.Split(r, "-")
+		if len(entries) > 2 {
+			return -1, -1, fmt.Errorf("invalid raw.idmap range %s", r)
+		}
+
+		base, err := strconv.ParseInt(entries[0], 10, 64)
+		if err != nil {
+			return -1, -1, err
+		}
+
+		size := int64(1)
+		if len(entries) > 1 {
+			size, err = strconv.ParseInt(entries[1], 10, 64)
+			if err != nil {
+				return -1, -1, err
+			}
+
+			size -= base
+			size += 1
+		}
+
+		return base, size, nil
+	}
+
+	ret := idmap.IdmapSet{}
+
+	for _, line := range strings.Split(value, "\n") {
+		if line == "" {
+			continue
+		}
+
+		entries := strings.Split(line, " ")
+		if len(entries) != 3 {
+			return nil, fmt.Errorf("invalid raw.idmap line %s", line)
+		}
+
+		outsideBase, outsideSize, err := getRange(entries[1])
+		if err != nil {
+			return nil, err
+		}
+
+		insideBase, insideSize, err := getRange(entries[2])
+		if err != nil {
+			return nil, err
+		}
+
+		if insideSize != outsideSize {
+			return nil, fmt.Errorf("idmap ranges of different sizes %s", line)
+		}
+
+		entry := idmap.IdmapEntry{
+			Hostid:   outsideBase,
+			Nsid:     insideBase,
+			Maprange: insideSize,
+		}
+
+		switch entries[0] {
+		case "both":
+			entry.Isuid = true
+			entry.Isgid = true
+			err := ret.AddSafe(entry)
+			if err != nil {
+				return nil, err
+			}
+		case "uid":
+			entry.Isuid = true
+			err := ret.AddSafe(entry)
+			if err != nil {
+				return nil, err
+			}
+		case "gid":
+			entry.Isgid = true
+			err := ret.AddSafe(entry)
+			if err != nil {
+				return nil, err
+			}
+		default:
+			return nil, fmt.Errorf("invalid raw.idmap type %s", line)
+		}
+	}
+
+	return ret.Idmap, nil
+}
+
+func findIdmap(state *state.State, cName string, isolatedStr string, configBase string, configSize string, rawIdmap string) (*idmap.IdmapSet, int64, error) {
+	isolated := false
+	if shared.IsTrue(isolatedStr) {
+		isolated = true
+	}
+
+	rawMaps, err := parseRawIdmap(rawIdmap)
+	if err != nil {
+		return nil, 0, err
+	}
+
+	if !isolated {
+		newIdmapset := idmap.IdmapSet{Idmap: make([]idmap.IdmapEntry, len(state.OS.IdmapSet.Idmap))}
+		copy(newIdmapset.Idmap, state.OS.IdmapSet.Idmap)
+
+		for _, ent := range rawMaps {
+			err := newIdmapset.AddSafe(ent)
+			if err != nil && err == idmap.ErrHostIdIsSubId {
+				return nil, 0, err
+			}
+		}
+
+		return &newIdmapset, 0, nil
+	}
+
+	size, err := idmapSize(state, isolatedStr, configSize)
+	if err != nil {
+		return nil, 0, err
+	}
+
+	mkIdmap := func(offset int64, size int64) (*idmap.IdmapSet, error) {
+		set := &idmap.IdmapSet{Idmap: []idmap.IdmapEntry{
+			{Isuid: true, Nsid: 0, Hostid: offset, Maprange: size},
+			{Isgid: true, Nsid: 0, Hostid: offset, Maprange: size},
+		}}
+
+		for _, ent := range rawMaps {
+			err := set.AddSafe(ent)
+			if err != nil && err == idmap.ErrHostIdIsSubId {
+				return nil, err
+			}
+		}
+
+		return set, nil
+	}
+
+	if configBase != "" {
+		offset, err := strconv.ParseInt(configBase, 10, 64)
+		if err != nil {
+			return nil, 0, err
+		}
+
+		set, err := mkIdmap(offset, size)
+		if err != nil && err == idmap.ErrHostIdIsSubId {
+			return nil, 0, err
+		}
+
+		return set, offset, nil
+	}
+
+	idmapLock.Lock()
+	defer idmapLock.Unlock()
+
+	cts, err := InstanceLoadAll(state)
+	if err != nil {
+		return nil, 0, err
+	}
+
+	offset := state.OS.IdmapSet.Idmap[0].Hostid + 65536
+
+	mapentries := idmap.ByHostid{}
+	for _, container := range cts {
+		name := container.Name()
+
+		/* Don't change our map Just Because. */
+		if name == cName {
+			continue
+		}
+
+		if container.IsPrivileged() {
+			continue
+		}
+
+		if !shared.IsTrue(container.ExpandedConfig()["security.idmap.isolated"]) {
+			continue
+		}
+
+		cBase := int64(0)
+		if container.ExpandedConfig()["volatile.idmap.base"] != "" {
+			cBase, err = strconv.ParseInt(container.ExpandedConfig()["volatile.idmap.base"], 10, 64)
+			if err != nil {
+				return nil, 0, err
+			}
+		}
+
+		cSize, err := idmapSize(state, container.ExpandedConfig()["security.idmap.isolated"], container.ExpandedConfig()["security.idmap.size"])
+		if err != nil {
+			return nil, 0, err
+		}
+
+		mapentries = append(mapentries, &idmap.IdmapEntry{Hostid: int64(cBase), Maprange: cSize})
+	}
+
+	sort.Sort(mapentries)
+
+	for i := range mapentries {
+		if i == 0 {
+			if mapentries[0].Hostid < offset+size {
+				offset = mapentries[0].Hostid + mapentries[0].Maprange
+				continue
+			}
+
+			set, err := mkIdmap(offset, size)
+			if err != nil && err == idmap.ErrHostIdIsSubId {
+				return nil, 0, err
+			}
+
+			return set, offset, nil
+		}
+
+		if mapentries[i-1].Hostid+mapentries[i-1].Maprange > offset {
+			offset = mapentries[i-1].Hostid + mapentries[i-1].Maprange
+			continue
+		}
+
+		offset = mapentries[i-1].Hostid + mapentries[i-1].Maprange
+		if offset+size < mapentries[i].Hostid {
+			set, err := mkIdmap(offset, size)
+			if err != nil && err == idmap.ErrHostIdIsSubId {
+				return nil, 0, err
+			}
+
+			return set, offset, nil
+		}
+		offset = mapentries[i].Hostid + mapentries[i].Maprange
+	}
+
+	if offset+size < state.OS.IdmapSet.Idmap[0].Hostid+state.OS.IdmapSet.Idmap[0].Maprange {
+		set, err := mkIdmap(offset, size)
+		if err != nil && err == idmap.ErrHostIdIsSubId {
+			return nil, 0, err
+		}
+
+		return set, offset, nil
+	}
+
+	return nil, 0, fmt.Errorf("Not enough uid/gid available for the container")
+}
+
+func (c *ContainerLXC) init() error {
+	// Compute the expanded config and device list
+	err := c.ExpandConfig(nil)
+	if err != nil {
+		return err
+	}
+
+	err = c.ExpandDevices(nil)
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (c *ContainerLXC) InitLXC(config bool) error {
+	// No need to go through all that for snapshots
+	if c.IsSnapshot() {
+		return nil
+	}
+
+	// Check if being called from a hook
+	if c.fromHook {
+		return fmt.Errorf("You can't use go-lxc from inside a LXC hook")
+	}
+
+	// Check if already initialized
+	if c.c != nil {
+		if !config || c.cConfig {
+			return nil
+		}
+	}
+
+	// Load the go-lxc struct
+	cname := project.Prefix(c.Project(), c.Name())
+	cc, err := lxc.NewContainer(cname, c.state.OS.LxcPath)
+	if err != nil {
+		return err
+	}
+
+	freeContainer := true
+	defer func() {
+		if freeContainer {
+			cc.Release()
+		}
+	}()
+
+	// Setup logging
+	logfile := c.LogFilePath()
+	err = lxcSetConfigItem(cc, "lxc.log.file", logfile)
+	if err != nil {
+		return err
+	}
+
+	logLevel := "warn"
+	if daemon.Debug {
+		logLevel = "trace"
+	} else if daemon.Verbose {
+		logLevel = "info"
+	}
+
+	err = lxcSetConfigItem(cc, "lxc.log.level", logLevel)
+	if err != nil {
+		return err
+	}
+
+	if util.RuntimeLiblxcVersionAtLeast(3, 0, 0) {
+		// Default size log buffer
+		err = lxcSetConfigItem(cc, "lxc.console.buffer.size", "auto")
+		if err != nil {
+			return err
+		}
+
+		err = lxcSetConfigItem(cc, "lxc.console.size", "auto")
+		if err != nil {
+			return err
+		}
+
+		// File to dump ringbuffer contents to when requested or
+		// container shutdown.
+		consoleBufferLogFile := c.ConsoleBufferLogPath()
+		err = lxcSetConfigItem(cc, "lxc.console.logfile", consoleBufferLogFile)
+		if err != nil {
+			return err
+		}
+	}
+
+	// Allow for lightweight init
+	c.cConfig = config
+	if !config {
+		if c.c != nil {
+			c.c.Release()
+		}
+
+		c.c = cc
+		freeContainer = false
+		return nil
+	}
+
+	if c.IsPrivileged() {
+		// Base config
+		toDrop := "sys_time sys_module sys_rawio"
+		if !c.state.OS.AppArmorStacking || c.state.OS.AppArmorStacked {
+			toDrop = toDrop + " mac_admin mac_override"
+		}
+
+		err = lxcSetConfigItem(cc, "lxc.cap.drop", toDrop)
+		if err != nil {
+			return err
+		}
+	}
+
+	// Set an appropriate /proc, /sys/ and /sys/fs/cgroup
+	mounts := []string{}
+	if c.IsPrivileged() && !c.state.OS.RunningInUserNS {
+		mounts = append(mounts, "proc:mixed")
+		mounts = append(mounts, "sys:mixed")
+	} else {
+		mounts = append(mounts, "proc:rw")
+		mounts = append(mounts, "sys:rw")
+	}
+
+	if !shared.PathExists("/proc/self/ns/cgroup") {
+		mounts = append(mounts, "cgroup:mixed")
+	}
+
+	err = lxcSetConfigItem(cc, "lxc.mount.auto", strings.Join(mounts, " "))
+	if err != nil {
+		return err
+	}
+
+	err = lxcSetConfigItem(cc, "lxc.autodev", "1")
+	if err != nil {
+		return err
+	}
+
+	err = lxcSetConfigItem(cc, "lxc.pty.max", "1024")
+	if err != nil {
+		return err
+	}
+
+	bindMounts := []string{
+		"/dev/fuse",
+		"/dev/net/tun",
+		"/proc/sys/fs/binfmt_misc",
+		"/sys/firmware/efi/efivars",
+		"/sys/fs/fuse/connections",
+		"/sys/fs/pstore",
+		"/sys/kernel/debug",
+		"/sys/kernel/security"}
+
+	if c.IsPrivileged() && !c.state.OS.RunningInUserNS {
+		err = lxcSetConfigItem(cc, "lxc.mount.entry", "mqueue dev/mqueue mqueue rw,relatime,create=dir,optional 0 0")
+		if err != nil {
+			return err
+		}
+	} else {
+		bindMounts = append(bindMounts, "/dev/mqueue")
+	}
+
+	for _, mnt := range bindMounts {
+		if !shared.PathExists(mnt) {
+			continue
+		}
+
+		if shared.IsDir(mnt) {
+			err = lxcSetConfigItem(cc, "lxc.mount.entry", fmt.Sprintf("%s %s none rbind,create=dir,optional 0 0", mnt, strings.TrimPrefix(mnt, "/")))
+			if err != nil {
+				return err
+			}
+		} else {
+			err = lxcSetConfigItem(cc, "lxc.mount.entry", fmt.Sprintf("%s %s none bind,create=file,optional 0 0", mnt, strings.TrimPrefix(mnt, "/")))
+			if err != nil {
+				return err
+			}
+		}
+	}
+
+	// For lxcfs
+	templateConfDir := os.Getenv("LXD_LXC_TEMPLATE_CONFIG")
+	if templateConfDir == "" {
+		templateConfDir = "/usr/share/lxc/config"
+	}
+
+	if shared.PathExists(fmt.Sprintf("%s/common.conf.d/", templateConfDir)) {
+		err = lxcSetConfigItem(cc, "lxc.include", fmt.Sprintf("%s/common.conf.d/", templateConfDir))
+		if err != nil {
+			return err
+		}
+	}
+
+	// Configure devices cgroup
+	if c.IsPrivileged() && !c.state.OS.RunningInUserNS && c.state.OS.CGroupDevicesController {
+		err = lxcSetConfigItem(cc, "lxc.cgroup.devices.deny", "a")
+		if err != nil {
+			return err
+		}
+
+		devices := []string{
+			"b *:* m",      // Allow mknod of block devices
+			"c *:* m",      // Allow mknod of char devices
+			"c 136:* rwm",  // /dev/pts devices
+			"c 1:3 rwm",    // /dev/null
+			"c 1:5 rwm",    // /dev/zero
+			"c 1:7 rwm",    // /dev/full
+			"c 1:8 rwm",    // /dev/random
+			"c 1:9 rwm",    // /dev/urandom
+			"c 5:0 rwm",    // /dev/tty
+			"c 5:1 rwm",    // /dev/console
+			"c 5:2 rwm",    // /dev/ptmx
+			"c 10:229 rwm", // /dev/fuse
+			"c 10:200 rwm", // /dev/net/tun
+		}
+
+		for _, dev := range devices {
+			err = lxcSetConfigItem(cc, "lxc.cgroup.devices.allow", dev)
+			if err != nil {
+				return err
+			}
+		}
+	}
+
+	if c.IsNesting() {
+		/*
+		 * mount extra /proc and /sys to work around kernel
+		 * restrictions on remounting them when covered
+		 */
+		err = lxcSetConfigItem(cc, "lxc.mount.entry", "proc dev/.lxc/proc proc create=dir,optional 0 0")
+		if err != nil {
+			return err
+		}
+
+		err = lxcSetConfigItem(cc, "lxc.mount.entry", "sys dev/.lxc/sys sysfs create=dir,optional 0 0")
+		if err != nil {
+			return err
+		}
+	}
+
+	// Setup architecture
+	personality, err := osarch.ArchitecturePersonality(c.architecture)
+	if err != nil {
+		personality, err = osarch.ArchitecturePersonality(c.state.OS.Architectures[0])
+		if err != nil {
+			return err
+		}
+	}
+
+	err = lxcSetConfigItem(cc, "lxc.arch", personality)
+	if err != nil {
+		return err
+	}
+
+	// Setup the hooks
+	err = lxcSetConfigItem(cc, "lxc.hook.version", "1")
+	if err != nil {
+		return err
+	}
+
+	err = lxcSetConfigItem(cc, "lxc.hook.pre-start", fmt.Sprintf("/proc/%d/exe callhook %s %d start", os.Getpid(), shared.VarPath(""), c.id))
+	if err != nil {
+		return err
+	}
+
+	err = lxcSetConfigItem(cc, "lxc.hook.stop", fmt.Sprintf("%s callhook %s %d stopns", c.state.OS.ExecPath, shared.VarPath(""), c.id))
+	if err != nil {
+		return err
+	}
+
+	err = lxcSetConfigItem(cc, "lxc.hook.post-stop", fmt.Sprintf("%s callhook %s %d stop", c.state.OS.ExecPath, shared.VarPath(""), c.id))
+	if err != nil {
+		return err
+	}
+
+	// Setup the console
+	err = lxcSetConfigItem(cc, "lxc.tty.max", "0")
+	if err != nil {
+		return err
+	}
+
+	// Setup the hostname
+	err = lxcSetConfigItem(cc, "lxc.uts.name", c.Name())
+	if err != nil {
+		return err
+	}
+
+	// Setup devlxd
+	if c.expandedConfig["security.devlxd"] == "" || shared.IsTrue(c.expandedConfig["security.devlxd"]) {
+		err = lxcSetConfigItem(cc, "lxc.mount.entry", fmt.Sprintf("%s dev/lxd none bind,create=dir 0 0", shared.VarPath("devlxd")))
+		if err != nil {
+			return err
+		}
+	}
+
+	// Setup AppArmor
+	if c.state.OS.AppArmorAvailable {
+		if c.state.OS.AppArmorConfined || !c.state.OS.AppArmorAdmin {
+			// If confined but otherwise able to use AppArmor, use our own profile
+			curProfile := util.AppArmorProfile()
+			curProfile = strings.TrimSuffix(curProfile, " (enforce)")
+			err := lxcSetConfigItem(cc, "lxc.apparmor.profile", curProfile)
+			if err != nil {
+				return err
+			}
+		} else {
+			// If not currently confined, use the container's profile
+			profile := AAProfileFull(c)
+
+			/* In the nesting case, we want to enable the inside
+			 * LXD to load its profile. Unprivileged containers can
+			 * load profiles, but privileged containers cannot, so
+			 * let's not use a namespace so they can fall back to
+			 * the old way of nesting, i.e. using the parent's
+			 * profile.
+			 */
+			if c.state.OS.AppArmorStacking && !c.state.OS.AppArmorStacked {
+				profile = fmt.Sprintf("%s//&:%s:", profile, AANamespace(c))
+			}
+
+			err := lxcSetConfigItem(cc, "lxc.apparmor.profile", profile)
+			if err != nil {
+				return err
+			}
+		}
+	}
+
+	// Setup Seccomp if necessary
+	if SeccompContainerNeedsPolicy(c) {
+		err = lxcSetConfigItem(cc, "lxc.seccomp.profile", SeccompProfilePath(c))
+		if err != nil {
+			return err
+		}
+
+		// Setup notification socket
+		// System requirement errors are handled during policy generation instead of here
+		ok, err := SeccompContainerNeedsIntercept(c)
+		if err == nil && ok {
+			err = lxcSetConfigItem(cc, "lxc.seccomp.notify.proxy", fmt.Sprintf("unix:%s", shared.VarPath("seccomp.socket")))
+			if err != nil {
+				return err
+			}
+		}
+	}
+
+	// Setup idmap
+	idmapset, err := c.NextIdmap()
+	if err != nil {
+		return err
+	}
+
+	if idmapset != nil {
+		lines := idmapset.ToLxcString()
+		for _, line := range lines {
+			err := lxcSetConfigItem(cc, "lxc.idmap", line)
+			if err != nil {
+				return err
+			}
+		}
+	}
+
+	// Setup environment
+	for k, v := range c.expandedConfig {
+		if strings.HasPrefix(k, "environment.") {
+			err = lxcSetConfigItem(cc, "lxc.environment", fmt.Sprintf("%s=%s", strings.TrimPrefix(k, "environment."), v))
+			if err != nil {
+				return err
+			}
+		}
+	}
+
+	// Setup NVIDIA runtime
+	if shared.IsTrue(c.expandedConfig["nvidia.runtime"]) {
+		hookDir := os.Getenv("LXD_LXC_HOOK")
+		if hookDir == "" {
+			hookDir = "/usr/share/lxc/hooks"
+		}
+
+		hookPath := filepath.Join(hookDir, "nvidia")
+		if !shared.PathExists(hookPath) {
+			return fmt.Errorf("The NVIDIA LXC hook couldn't be found")
+		}
+
+		_, err := exec.LookPath("nvidia-container-cli")
+		if err != nil {
+			return fmt.Errorf("The NVIDIA container tools couldn't be found")
+		}
+
+		err = lxcSetConfigItem(cc, "lxc.environment", "NVIDIA_VISIBLE_DEVICES=none")
+		if err != nil {
+			return err
+		}
+
+		nvidiaDriver := c.expandedConfig["nvidia.driver.capabilities"]
+		if nvidiaDriver == "" {
+			err = lxcSetConfigItem(cc, "lxc.environment", "NVIDIA_DRIVER_CAPABILITIES=compute,utility")
+			if err != nil {
+				return err
+			}
+		} else {
+			err = lxcSetConfigItem(cc, "lxc.environment", fmt.Sprintf("NVIDIA_DRIVER_CAPABILITIES=%s", nvidiaDriver))
+			if err != nil {
+				return err
+			}
+		}
+
+		nvidiaRequireCuda := c.expandedConfig["nvidia.require.cuda"]
+		if nvidiaRequireCuda == "" {
+			err = lxcSetConfigItem(cc, "lxc.environment", fmt.Sprintf("NVIDIA_REQUIRE_CUDA=%s", nvidiaRequireCuda))
+			if err != nil {
+				return err
+			}
+		}
+
+		nvidiaRequireDriver := c.expandedConfig["nvidia.require.driver"]
+		if nvidiaRequireDriver == "" {
+			err = lxcSetConfigItem(cc, "lxc.environment", fmt.Sprintf("NVIDIA_REQUIRE_DRIVER=%s", nvidiaRequireDriver))
+			if err != nil {
+				return err
+			}
+		}
+
+		err = lxcSetConfigItem(cc, "lxc.hook.mount", hookPath)
+		if err != nil {
+			return err
+		}
+	}
+
+	// Memory limits
+	if c.state.OS.CGroupMemoryController {
+		memory := c.expandedConfig["limits.memory"]
+		memoryEnforce := c.expandedConfig["limits.memory.enforce"]
+		memorySwap := c.expandedConfig["limits.memory.swap"]
+		memorySwapPriority := c.expandedConfig["limits.memory.swap.priority"]
+
+		// Configure the memory limits
+		if memory != "" {
+			var valueInt int64
+			if strings.HasSuffix(memory, "%") {
+				percent, err := strconv.ParseInt(strings.TrimSuffix(memory, "%"), 10, 64)
+				if err != nil {
+					return err
+				}
+
+				memoryTotal, err := shared.DeviceTotalMemory()
+				if err != nil {
+					return err
+				}
+
+				valueInt = int64((memoryTotal / 100) * percent)
+			} else {
+				valueInt, err = units.ParseByteSizeString(memory)
+				if err != nil {
+					return err
+				}
+			}
+
+			if memoryEnforce == "soft" {
+				err = lxcSetConfigItem(cc, "lxc.cgroup.memory.soft_limit_in_bytes", fmt.Sprintf("%d", valueInt))
+				if err != nil {
+					return err
+				}
+			} else {
+				if c.state.OS.CGroupSwapAccounting && (memorySwap == "" || shared.IsTrue(memorySwap)) {
+					err = lxcSetConfigItem(cc, "lxc.cgroup.memory.limit_in_bytes", fmt.Sprintf("%d", valueInt))
+					if err != nil {
+						return err
+					}
+					err = lxcSetConfigItem(cc, "lxc.cgroup.memory.memsw.limit_in_bytes", fmt.Sprintf("%d", valueInt))
+					if err != nil {
+						return err
+					}
+				} else {
+					err = lxcSetConfigItem(cc, "lxc.cgroup.memory.limit_in_bytes", fmt.Sprintf("%d", valueInt))
+					if err != nil {
+						return err
+					}
+				}
+				// Set soft limit to value 10% less than hard limit
+				err = lxcSetConfigItem(cc, "lxc.cgroup.memory.soft_limit_in_bytes", fmt.Sprintf("%.0f", float64(valueInt)*0.9))
+				if err != nil {
+					return err
+				}
+			}
+		}
+
+		// Configure the swappiness
+		if memorySwap != "" && !shared.IsTrue(memorySwap) {
+			err = lxcSetConfigItem(cc, "lxc.cgroup.memory.swappiness", "0")
+			if err != nil {
+				return err
+			}
+		} else if memorySwapPriority != "" {
+			priority, err := strconv.Atoi(memorySwapPriority)
+			if err != nil {
+				return err
+			}
+
+			err = lxcSetConfigItem(cc, "lxc.cgroup.memory.swappiness", fmt.Sprintf("%d", 60-10+priority))
+			if err != nil {
+				return err
+			}
+		}
+	}
+
+	// CPU limits
+	cpuPriority := c.expandedConfig["limits.cpu.priority"]
+	cpuAllowance := c.expandedConfig["limits.cpu.allowance"]
+
+	if (cpuPriority != "" || cpuAllowance != "") && c.state.OS.CGroupCPUController {
+		cpuShares, cpuCfsQuota, cpuCfsPeriod, err := device.ParseCPU(cpuAllowance, cpuPriority)
+		if err != nil {
+			return err
+		}
+
+		if cpuShares != "1024" {
+			err = lxcSetConfigItem(cc, "lxc.cgroup.cpu.shares", cpuShares)
+			if err != nil {
+				return err
+			}
+		}
+
+		if cpuCfsPeriod != "-1" {
+			err = lxcSetConfigItem(cc, "lxc.cgroup.cpu.cfs_period_us", cpuCfsPeriod)
+			if err != nil {
+				return err
+			}
+		}
+
+		if cpuCfsQuota != "-1" {
+			err = lxcSetConfigItem(cc, "lxc.cgroup.cpu.cfs_quota_us", cpuCfsQuota)
+			if err != nil {
+				return err
+			}
+		}
+	}
+
+	// Processes
+	if c.state.OS.CGroupPidsController {
+		processes := c.expandedConfig["limits.processes"]
+		if processes != "" {
+			valueInt, err := strconv.ParseInt(processes, 10, 64)
+			if err != nil {
+				return err
+			}
+
+			err = lxcSetConfigItem(cc, "lxc.cgroup.pids.max", fmt.Sprintf("%d", valueInt))
+			if err != nil {
+				return err
+			}
+		}
+	}
+
+	// Setup process limits
+	for k, v := range c.expandedConfig {
+		if strings.HasPrefix(k, "limits.kernel.") {
+			prlimitSuffix := strings.TrimPrefix(k, "limits.kernel.")
+			prlimitKey := fmt.Sprintf("lxc.prlimit.%s", prlimitSuffix)
+			err = lxcSetConfigItem(cc, prlimitKey, v)
+			if err != nil {
+				return err
+			}
+		}
+	}
+
+	// Setup shmounts
+	if c.state.OS.LXCFeatures["mount_injection_file"] {
+		err = lxcSetConfigItem(cc, "lxc.mount.auto", fmt.Sprintf("shmounts:%s:/dev/.lxd-mounts", c.ShmountsPath()))
+	} else {
+		err = lxcSetConfigItem(cc, "lxc.mount.entry", fmt.Sprintf("%s dev/.lxd-mounts none bind,create=dir 0 0", c.ShmountsPath()))
+	}
+	if err != nil {
+		return err
+	}
+
+	// Apply raw.lxc
+	if lxcConfig, ok := c.expandedConfig["raw.lxc"]; ok {
+		f, err := ioutil.TempFile("", "lxd_config_")
+		if err != nil {
+			return err
+		}
+
+		err = shared.WriteAll(f, []byte(lxcConfig))
+		f.Close()
+		defer os.Remove(f.Name())
+		if err != nil {
+			return err
+		}
+
+		if err := cc.LoadConfigFile(f.Name()); err != nil {
+			return fmt.Errorf("Failed to load raw.lxc")
+		}
+	}
+
+	if c.c != nil {
+		c.c.Release()
+	}
+	c.c = cc
+	freeContainer = false
+
+	return nil
+}
+
+// runHooks executes the callback functions returned from a function.
+func (c *ContainerLXC) runHooks(hooks []func() error) error {
+	// Run any post start hooks.
+	if len(hooks) > 0 {
+		for _, hook := range hooks {
+			err := hook()
+			if err != nil {
+				return err
+			}
+		}
+	}
+
+	return nil
+}
+
+// deviceLoad instantiates and validates a new device and returns it along with enriched config.
+func (c *ContainerLXC) deviceLoad(deviceName string, rawConfig config.Device) (device.Device, config.Device, error) {
+	var configCopy config.Device
+	var err error
+
+	// Create copy of config and load some fields from volatile if device is nic or infiniband.
+	if shared.StringInSlice(rawConfig["type"], []string{"nic", "infiniband"}) {
+		configCopy, err = c.FillNetworkDevice(deviceName, rawConfig)
+		if err != nil {
+			return nil, nil, err
+		}
+	} else {
+		// Othewise copy the config so it cannot be modified by device.
+		configCopy = rawConfig.Clone()
+	}
+
+	d, err := device.New(c, c.state, deviceName, configCopy, c.deviceVolatileGetFunc(deviceName), c.deviceVolatileSetFunc(deviceName))
+
+	// Return device and config copy even if error occurs as caller may still use device.
+	return d, configCopy, err
+}
+
+// deviceAdd loads a new device and calls its Add() function.
+func (c *ContainerLXC) deviceAdd(deviceName string, rawConfig config.Device) error {
+	d, _, err := c.deviceLoad(deviceName, rawConfig)
+	if err != nil {
+		return err
+	}
+
+	return d.Add()
+}
+
+// deviceStart loads a new device and calls its Start() function. After processing the runtime
+// config returned from Start(), it also runs the device's Register() function irrespective of
+// whether the container is running or not.
+func (c *ContainerLXC) deviceStart(deviceName string, rawConfig config.Device, isRunning bool) (*device.RunConfig, error) {
+	d, configCopy, err := c.deviceLoad(deviceName, rawConfig)
+	if err != nil {
+		return nil, err
+	}
+
+	if canHotPlug, _ := d.CanHotPlug(); isRunning && !canHotPlug {
+		return nil, fmt.Errorf("Device cannot be started when container is running")
+	}
+
+	runConf, err := d.Start()
+	if err != nil {
+		return nil, err
+	}
+
+	// If runConf supplied, perform any container specific setup of device.
+	if runConf != nil {
+		// Shift device file ownership if needed before mounting into container.
+		// This needs to be done whether or not container is running.
+		if len(runConf.Mounts) > 0 {
+			err := c.deviceStaticShiftMounts(runConf.Mounts)
+			if err != nil {
+				return nil, err
+			}
+		}
+
+		// If container is running and then live attach device.
+		if isRunning {
+			// Attach mounts if requested.
+			if len(runConf.Mounts) > 0 {
+				err = c.deviceHandleMounts(runConf.Mounts)
+				if err != nil {
+					return nil, err
+				}
+			}
+
+			// Add cgroup rules if requested.
+			if len(runConf.CGroups) > 0 {
+				err = c.deviceAddCgroupRules(runConf.CGroups)
+				if err != nil {
+					return nil, err
+				}
+			}
+
+			// Attach network interface if requested.
+			if len(runConf.NetworkInterface) > 0 {
+				err = c.deviceAttachNIC(configCopy, runConf.NetworkInterface)
+				if err != nil {
+					return nil, err
+				}
+			}
+
+			// If running, run post start hooks now (if not running LXD will run them
+			// once the instance is started).
+			err = c.runHooks(runConf.PostHooks)
+			if err != nil {
+				return nil, err
+			}
+		}
+	}
+
+	return runConf, nil
+}
+
+// deviceStaticShiftMounts statically shift device mount files ownership to active idmap if needed.
+func (c *ContainerLXC) deviceStaticShiftMounts(mounts []device.MountEntryItem) error {
+	idmapSet, err := c.CurrentIdmap()
+	if err != nil {
+		return fmt.Errorf("Failed to get idmap for device: %s", err)
+	}
+
+	// If there is an idmap being applied and LXD not running in a user namespace then shift the
+	// device files before they are mounted.
+	if idmapSet != nil && !c.state.OS.RunningInUserNS {
+		for _, mount := range mounts {
+			// Skip UID/GID shifting if OwnerShift mode is not static, or the host-side
+			// DevPath is empty (meaning an unmount request that doesn't need shifting).
+			if mount.OwnerShift != device.MountOwnerShiftStatic || mount.DevPath == "" {
+				continue
+			}
+
+			err := idmapSet.ShiftFile(mount.DevPath)
+			if err != nil {
+				// uidshift failing is weird, but not a big problem. Log and proceed.
+				logger.Debugf("Failed to uidshift device %s: %s\n", mount.DevPath, err)
+			}
+		}
+	}
+
+	return nil
+}
+
+// deviceAddCgroupRules live adds cgroup rules to a container.
+func (c *ContainerLXC) deviceAddCgroupRules(cgroups []device.RunConfigItem) error {
+	for _, rule := range cgroups {
+		// Only apply devices cgroup rules if container is running privileged and host has devices cgroup controller.
+		if strings.HasPrefix(rule.Key, "devices.") && (!c.isCurrentlyPrivileged() || c.state.OS.RunningInUserNS || !c.state.OS.CGroupDevicesController) {
+			continue
+		}
+
+		// Add the new device cgroup rule.
+		err := c.CGroupSet(rule.Key, rule.Value)
+		if err != nil {
+			return fmt.Errorf("Failed to add cgroup rule for device")
+		}
+	}
+
+	return nil
+}
+
+// deviceAttachNIC live attaches a NIC device to a container.
+func (c *ContainerLXC) deviceAttachNIC(configCopy map[string]string, netIF []device.RunConfigItem) error {
+	devName := ""
+	for _, dev := range netIF {
+		if dev.Key == "link" {
+			devName = dev.Value
+			break
+		}
+	}
+
+	if devName == "" {
+		return fmt.Errorf("Device didn't provide a link property to use")
+	}
+
+	// Load the go-lxc struct.
+	err := c.InitLXC(false)
+	if err != nil {
+		return err
+	}
+
+	// Add the interface to the container.
+	err = c.c.AttachInterface(devName, configCopy["name"])
+	if err != nil {
+		return fmt.Errorf("Failed to attach interface: %s to %s: %s", devName, configCopy["name"], err)
+	}
+
+	return nil
+}
+
+// deviceUpdate loads a new device and calls its Update() function.
+func (c *ContainerLXC) deviceUpdate(deviceName string, rawConfig config.Device, oldDevices config.Devices, isRunning bool) error {
+	d, _, err := c.deviceLoad(deviceName, rawConfig)
+	if err != nil {
+		return err
+	}
+
+	err = d.Update(oldDevices, isRunning)
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
+// deviceStop loads a new device and calls its Stop() function.
+func (c *ContainerLXC) deviceStop(deviceName string, rawConfig config.Device, stopHookNetnsPath string) error {
+	d, configCopy, err := c.deviceLoad(deviceName, rawConfig)
+
+	// If deviceLoad fails with unsupported device type then return.
+	if err == device.ErrUnsupportedDevType {
+		return err
+	}
+
+	// If deviceLoad fails for any other reason then just log the error and proceed, as in the
+	// scenario that a new version of LXD has additional validation restrictions than older
+	// versions we still need to allow previously valid devices to be stopped.
+	if err != nil {
+		// If there is no device returned, then we cannot proceed, so return as error.
+		if d == nil {
+			return fmt.Errorf("Device stop validation failed for '%s': %v", deviceName, err)
+
+		}
+
+		logger.Errorf("Device stop validation failed for '%s': %v", deviceName, err)
+	}
+
+	canHotPlug, _ := d.CanHotPlug()
+
+	// An empty netns path means we haven't been called from the LXC stop hook, so are running.
+	if stopHookNetnsPath == "" && !canHotPlug {
+		return fmt.Errorf("Device cannot be stopped when container is running")
+	}
+
+	runConf, err := d.Stop()
+	if err != nil {
+		return err
+	}
+
+	if runConf != nil {
+		// If network interface settings returned, then detach NIC from container.
+		if len(runConf.NetworkInterface) > 0 {
+			err = c.deviceDetachNIC(configCopy, runConf.NetworkInterface, stopHookNetnsPath)
+			if err != nil {
+				return err
+			}
+		}
+
+		// Add cgroup rules if requested and container is running.
+		if len(runConf.CGroups) > 0 && stopHookNetnsPath == "" {
+			err = c.deviceAddCgroupRules(runConf.CGroups)
+			if err != nil {
+				return err
+			}
+		}
+
+		// Detach mounts if requested and container is running.
+		if len(runConf.Mounts) > 0 && stopHookNetnsPath == "" {
+			err = c.deviceHandleMounts(runConf.Mounts)
+			if err != nil {
+				return err
+			}
+		}
+
+		// Run post stop hooks irrespective of run state of instance.
+		err = c.runHooks(runConf.PostHooks)
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+// deviceDetachNIC detaches a NIC device from a container.
+func (c *ContainerLXC) deviceDetachNIC(configCopy map[string]string, netIF []device.RunConfigItem, stopHookNetnsPath string) error {
+	// Get requested device name to detach interface back to on the host.
+	devName := ""
+	for _, dev := range netIF {
+		if dev.Key == "link" {
+			devName = dev.Value
+			break
+		}
+	}
+
+	if devName == "" {
+		return fmt.Errorf("Device didn't provide a link property to use")
+	}
+
+	// If container is running, perform live detach of interface back to host.
+	if stopHookNetnsPath == "" {
+		// For some reason, having network config confuses detach, so get our own go-lxc struct.
+		cname := project.Prefix(c.Project(), c.Name())
+		cc, err := lxc.NewContainer(cname, c.state.OS.LxcPath)
+		if err != nil {
+			return err
+		}
+		defer cc.Release()
+
+		// Get interfaces inside container.
+		ifaces, err := cc.Interfaces()
+		if err != nil {
+			return fmt.Errorf("Failed to list network interfaces: %v", err)
+		}
+
+		// If interface doesn't exist inside container, cannot proceed.
+		if !shared.StringInSlice(configCopy["name"], ifaces) {
+			return nil
+		}
+
+		err = cc.DetachInterfaceRename(configCopy["name"], devName)
+		if err != nil {
+			return errors.Wrapf(err, "Failed to detach interface: %s to %s", configCopy["name"], devName)
+		}
+	} else {
+		// Currently liblxc does not move devices back to the host on stop that were added
+		// after the the container was started. For this reason we utilise the lxc.hook.stop
+		// hook so that we can capture the netns path, enter the namespace and move the nics
+		// back to the host and rename them if liblxc hasn't already done it.
+		// We can only move back devices that have an expected host_name record and where
+		// that device doesn't already exist on the host as if a device exists on the host
+		// we can't know whether that is because liblxc has moved it back already or whether
+		// it is a conflicting device.
+		if !shared.PathExists(fmt.Sprintf("/sys/class/net/%s", devName)) {
+			err := c.detachInterfaceRename(stopHookNetnsPath, configCopy["name"], devName)
+			if err != nil {
+				return errors.Wrapf(err, "Failed to detach interface: %s to %s", configCopy["name"], devName)
+			}
+		}
+	}
+
+	return nil
+}
+
+// deviceHandleMounts live attaches or detaches mounts on a container.
+// If the mount DevPath is empty the mount action is treated as unmount.
+func (c *ContainerLXC) deviceHandleMounts(mounts []device.MountEntryItem) error {
+	for _, mount := range mounts {
+		if mount.DevPath != "" {
+			flags := 0
+
+			// Convert options into flags.
+			for _, opt := range mount.Opts {
+				if opt == "bind" {
+					flags |= unix.MS_BIND
+				} else if opt == "rbind" {
+					flags |= unix.MS_BIND | unix.MS_REC
+				}
+			}
+
+			shiftfs := false
+			if mount.OwnerShift == device.MountOwnerShiftDynamic {
+				shiftfs = true
+			}
+
+			// Mount it into the container.
+			err := c.insertMount(mount.DevPath, mount.TargetPath, mount.FSType, flags, shiftfs)
+			if err != nil {
+				return fmt.Errorf("Failed to add mount for device inside container: %s", err)
+			}
+		} else {
+			relativeTargetPath := strings.TrimPrefix(mount.TargetPath, "/")
+			if c.FileExists(relativeTargetPath) == nil {
+				err := c.removeMount(mount.TargetPath)
+				if err != nil {
+					return fmt.Errorf("Error unmounting the device path inside container: %s", err)
+				}
+
+				err = c.FileRemove(relativeTargetPath)
+				if err != nil {
+					// Only warn here and don't fail as removing a directory
+					// mount may fail if there was already files inside
+					// directory before it was mouted over preventing delete.
+					logger.Warnf("Could not remove the device path inside container: %s", err)
+				}
+			}
+		}
+	}
+
+	return nil
+}
+
+// deviceRemove loads a new device and calls its Remove() function.
+func (c *ContainerLXC) deviceRemove(deviceName string, rawConfig config.Device) error {
+	d, _, err := c.deviceLoad(deviceName, rawConfig)
+
+	// If deviceLoad fails with unsupported device type then return.
+	if err == device.ErrUnsupportedDevType {
+		return err
+	}
+
+	// If deviceLoad fails for any other reason then just log the error and proceed, as in the
+	// scenario that a new version of LXD has additional validation restrictions than older
+	// versions we still need to allow previously valid devices to be stopped.
+	if err != nil {
+		logger.Errorf("Device remove validation failed for '%s': %v", deviceName, err)
+	}
+
+	return d.Remove()
+}
+
+// deviceVolatileGetFunc returns a function that retrieves a named device's volatile config and
+// removes its device prefix from the keys.
+func (c *ContainerLXC) deviceVolatileGetFunc(devName string) func() map[string]string {
+	return func() map[string]string {
+		volatile := make(map[string]string)
+		prefix := fmt.Sprintf("volatile.%s.", devName)
+		for k, v := range c.localConfig {
+			if strings.HasPrefix(k, prefix) {
+				volatile[strings.TrimPrefix(k, prefix)] = v
+			}
+		}
+		return volatile
+	}
+}
+
+// deviceVolatileSetFunc returns a function that can be called to save a named device's volatile
+// config using keys that do not have the device's name prefixed.
+func (c *ContainerLXC) deviceVolatileSetFunc(devName string) func(save map[string]string) error {
+	return func(save map[string]string) error {
+		volatileSave := make(map[string]string)
+		for k, v := range save {
+			volatileSave[fmt.Sprintf("volatile.%s.%s", devName, k)] = v
+		}
+
+		return c.VolatileSet(volatileSave)
+	}
+}
+
+// deviceResetVolatile resets a device's volatile data when its removed or updated in such a way
+// that it is removed then added immediately afterwards.
+func (c *ContainerLXC) deviceResetVolatile(devName string, oldConfig, newConfig config.Device) error {
+	volatileClear := make(map[string]string)
+	devicePrefix := fmt.Sprintf("volatile.%s.", devName)
+
+	// If the device type has changed, remove all old volatile keys.
+	// This will occur if the newConfig is empty (i.e the device is actually being removed) or
+	// if the device type is being changed but keeping the same name.
+	if newConfig["type"] != oldConfig["type"] || newConfig["nictype"] != oldConfig["nictype"] {
+		for k := range c.localConfig {
+			if !strings.HasPrefix(k, devicePrefix) {
+				continue
+			}
+
+			volatileClear[k] = ""
+		}
+
+		return c.VolatileSet(volatileClear)
+	}
+
+	// If the device type remains the same, then just remove any volatile keys that have
+	// the same key name present in the new config (i.e the new config is replacing the
+	// old volatile key).
+	for k := range c.localConfig {
+		if !strings.HasPrefix(k, devicePrefix) {
+			continue
+		}
+
+		devKey := strings.TrimPrefix(k, devicePrefix)
+		if _, found := newConfig[devKey]; found {
+			volatileClear[k] = ""
+		}
+	}
+
+	return c.VolatileSet(volatileClear)
+}
+
+// DeviceEventHandler actions the results of a RunConfig after an event has occurred on a device.
+func (c *ContainerLXC) DeviceEventHandler(runConf *device.RunConfig) error {
+	// Device events can only be processed when the container is running.
+	if !c.IsRunning() {
+		return nil
+	}
+
+	if runConf == nil {
+		return nil
+	}
+
+	// Shift device file ownership if needed before mounting devices into container.
+	if len(runConf.Mounts) > 0 {
+		err := c.deviceStaticShiftMounts(runConf.Mounts)
+		if err != nil {
+			return err
+		}
+
+		err = c.deviceHandleMounts(runConf.Mounts)
+		if err != nil {
+			return err
+		}
+	}
+
+	// Add cgroup rules if requested.
+	if len(runConf.CGroups) > 0 {
+		err := c.deviceAddCgroupRules(runConf.CGroups)
+		if err != nil {
+			return err
+		}
+	}
+
+	// Run any post hooks requested.
+	err := c.runHooks(runConf.PostHooks)
+	if err != nil {
+		return err
+	}
+
+	// Generate uevent inside container if requested.
+	if len(runConf.Uevents) > 0 {
+		for _, eventParts := range runConf.Uevents {
+			ueventArray := make([]string, 4)
+			ueventArray[0] = "forkuevent"
+			ueventArray[1] = "inject"
+			ueventArray[2] = fmt.Sprintf("%d", c.InitPID())
+			length := 0
+			for _, part := range eventParts {
+				length = length + len(part) + 1
+			}
+			ueventArray[3] = fmt.Sprintf("%d", length)
+			ueventArray = append(ueventArray, eventParts...)
+			_, err := shared.RunCommand(c.state.OS.ExecPath, ueventArray...)
+			if err != nil {
+				return err
+			}
+		}
+	}
+
+	return nil
+}
+
+// Initialize storage interface for this container
+func (c *ContainerLXC) InitStorage() error {
+	if c.storage != nil {
+		return nil
+	}
+
+	s, err := StoragePoolVolumeContainerLoadInit(c.state, c.Project(), c.Name())
+	if err != nil {
+		return err
+	}
+
+	c.storage = s
+
+	return nil
+}
+
+// Config handling
+func (c *ContainerLXC) ExpandConfig(profiles []api.Profile) error {
+	if profiles == nil && len(c.profiles) > 0 {
+		var err error
+		profiles, err = c.state.Cluster.ProfilesGet(c.project, c.profiles)
+		if err != nil {
+			return err
+		}
+	}
+
+	c.expandedConfig = db.ProfilesExpandConfig(c.localConfig, profiles)
+
+	return nil
+}
+
+func (c *ContainerLXC) ExpandDevices(profiles []api.Profile) error {
+	if profiles == nil && len(c.profiles) > 0 {
+		var err error
+		profiles, err = c.state.Cluster.ProfilesGet(c.project, c.profiles)
+		if err != nil {
+			return err
+		}
+	}
+
+	c.expandedDevices = db.ProfilesExpandDevices(c.localDevices, profiles)
+
+	return nil
+}
+
+func shiftBtrfsRootfs(path string, diskIdmap *idmap.IdmapSet, shift bool) error {
+	var err error
+	roSubvols := []string{}
+	subvols, _ := driver.BTRFSSubVolumesGet(path)
+	sort.Sort(sort.StringSlice(subvols))
+	for _, subvol := range subvols {
+		subvol = filepath.Join(path, subvol)
+
+		if !driver.BTRFSSubVolumeIsRo(subvol) {
+			continue
+		}
+
+		roSubvols = append(roSubvols, subvol)
+		driver.BTRFSSubVolumeMakeRw(subvol)
+	}
+
+	if shift {
+		err = diskIdmap.ShiftRootfs(path, nil)
+	} else {
+		err = diskIdmap.UnshiftRootfs(path, nil)
+	}
+
+	for _, subvol := range roSubvols {
+		driver.BTRFSSubVolumeMakeRo(subvol)
+	}
+
+	return err
+}
+
+func ShiftBtrfsRootfs(path string, diskIdmap *idmap.IdmapSet) error {
+	return shiftBtrfsRootfs(path, diskIdmap, true)
+}
+
+func UnshiftBtrfsRootfs(path string, diskIdmap *idmap.IdmapSet) error {
+	return shiftBtrfsRootfs(path, diskIdmap, false)
+}
+
+// Start functions
+func (c *ContainerLXC) startCommon() (string, []func() error, error) {
+	var ourStart bool
+	postStartHooks := []func() error{}
+
+	// Load the go-lxc struct
+	err := c.InitLXC(true)
+	if err != nil {
+		return "", postStartHooks, errors.Wrap(err, "Load go-lxc struct")
+	}
+
+	// Check that we're not already running
+	if c.IsRunning() {
+		return "", postStartHooks, fmt.Errorf("The container is already running")
+	}
+
+	// Load any required kernel modules
+	kernelModules := c.expandedConfig["linux.kernel_modules"]
+	if kernelModules != "" {
+		for _, module := range strings.Split(kernelModules, ",") {
+			module = strings.TrimPrefix(module, " ")
+			err := util.LoadModule(module)
+			if err != nil {
+				return "", postStartHooks, fmt.Errorf("Failed to load kernel module '%s': %s", module, err)
+			}
+		}
+	}
+
+	/* Deal with idmap changes */
+	nextIdmap, err := c.NextIdmap()
+	if err != nil {
+		return "", postStartHooks, errors.Wrap(err, "Set ID map")
+	}
+
+	diskIdmap, err := c.DiskIdmap()
+	if err != nil {
+		return "", postStartHooks, errors.Wrap(err, "Set last ID map")
+	}
+
+	if !nextIdmap.Equals(diskIdmap) && !(diskIdmap == nil && c.state.OS.Shiftfs) {
+		if shared.IsTrue(c.expandedConfig["security.protection.shift"]) {
+			return "", postStartHooks, fmt.Errorf("Container is protected against filesystem shifting")
+		}
+
+		logger.Debugf("Container idmap changed, remapping")
+		c.updateProgress("Remapping container filesystem")
+
+		ourStart, err = c.StorageStart()
+		if err != nil {
+			return "", postStartHooks, errors.Wrap(err, "Storage start")
+		}
+
+		if diskIdmap != nil {
+			if c.Storage().GetStorageType() == StorageTypeZfs {
+				err = diskIdmap.UnshiftRootfs(c.RootfsPath(), storage.ZFSIdmapSetSkipper)
+			} else if c.Storage().GetStorageType() == StorageTypeBtrfs {
+				err = UnshiftBtrfsRootfs(c.RootfsPath(), diskIdmap)
+			} else {
+				err = diskIdmap.UnshiftRootfs(c.RootfsPath(), nil)
+			}
+			if err != nil {
+				if ourStart {
+					c.StorageStop()
+				}
+				return "", postStartHooks, err
+			}
+		}
+
+		if nextIdmap != nil && !c.state.OS.Shiftfs {
+			if c.Storage().GetStorageType() == StorageTypeZfs {
+				err = nextIdmap.ShiftRootfs(c.RootfsPath(), storage.ZFSIdmapSetSkipper)
+			} else if c.Storage().GetStorageType() == StorageTypeBtrfs {
+				err = ShiftBtrfsRootfs(c.RootfsPath(), nextIdmap)
+			} else {
+				err = nextIdmap.ShiftRootfs(c.RootfsPath(), nil)
+			}
+			if err != nil {
+				if ourStart {
+					c.StorageStop()
+				}
+				return "", postStartHooks, err
+			}
+		}
+
+		jsonDiskIdmap := "[]"
+		if nextIdmap != nil && !c.state.OS.Shiftfs {
+			idmapBytes, err := json.Marshal(nextIdmap.Idmap)
+			if err != nil {
+				return "", postStartHooks, err
+			}
+			jsonDiskIdmap = string(idmapBytes)
+		}
+
+		err = c.VolatileSet(map[string]string{"volatile.last_state.idmap": jsonDiskIdmap})
+		if err != nil {
+			return "", postStartHooks, errors.Wrapf(err, "Set volatile.last_state.idmap config key on container %q (id %d)", c.name, c.id)
+		}
+
+		c.updateProgress("")
+	}
+
+	var idmapBytes []byte
+	if nextIdmap == nil {
+		idmapBytes = []byte("[]")
+	} else {
+		idmapBytes, err = json.Marshal(nextIdmap.Idmap)
+		if err != nil {
+			return "", postStartHooks, err
+		}
+	}
+
+	if c.localConfig["volatile.idmap.current"] != string(idmapBytes) {
+		err = c.VolatileSet(map[string]string{"volatile.idmap.current": string(idmapBytes)})
+		if err != nil {
+			return "", postStartHooks, errors.Wrapf(err, "Set volatile.idmap.current config key on container %q (id %d)", c.name, c.id)
+		}
+	}
+
+	// Generate the Seccomp profile
+	if err := SeccompCreateProfile(c); err != nil {
+		return "", postStartHooks, err
+	}
+
+	// Cleanup any existing leftover devices
+	c.removeUnixDevices()
+	c.removeDiskDevices()
+
+	// Create any missing directories.
+	err = os.MkdirAll(c.LogPath(), 0700)
+	if err != nil {
+		return "", postStartHooks, err
+	}
+
+	err = os.MkdirAll(c.DevicesPath(), 0711)
+	if err != nil {
+		return "", postStartHooks, err
+	}
+
+	err = os.MkdirAll(c.ShmountsPath(), 0711)
+	if err != nil {
+		return "", postStartHooks, err
+	}
+
+	// Create the devices
+	nicID := -1
+
+	// Setup devices in sorted order, this ensures that device mounts are added in path order.
+	for _, dev := range c.expandedDevices.Sorted() {
+		// Start the device.
+		runConf, err := c.deviceStart(dev.Name, dev.Config, false)
+		if err != nil {
+			return "", postStartHooks, errors.Wrapf(err, "Failed to start device '%s'", dev.Name)
+		}
+
+		if runConf == nil {
+			continue
+		}
+
+		// Process rootfs setup.
+		if runConf.RootFS.Path != "" {
+			if !util.RuntimeLiblxcVersionAtLeast(2, 1, 0) {
+				// Set the rootfs backend type if supported (must happen before any other lxc.rootfs)
+				err := lxcSetConfigItem(c.c, "lxc.rootfs.backend", "dir")
+				if err == nil {
+					value := c.c.ConfigItem("lxc.rootfs.backend")
+					if len(value) == 0 || value[0] != "dir" {
+						lxcSetConfigItem(c.c, "lxc.rootfs.backend", "")
+					}
+				}
+			}
+
+			if util.RuntimeLiblxcVersionAtLeast(2, 1, 0) {
+				rootfsPath := fmt.Sprintf("dir:%s", runConf.RootFS.Path)
+				err = lxcSetConfigItem(c.c, "lxc.rootfs.path", rootfsPath)
+			} else {
+				err = lxcSetConfigItem(c.c, "lxc.rootfs", runConf.RootFS.Path)
+			}
+
+			if err != nil {
+				return "", postStartHooks, errors.Wrapf(err, "Failed to setup device rootfs '%s'", dev.Name)
+			}
+
+			if len(runConf.RootFS.Opts) > 0 {
+				err = lxcSetConfigItem(c.c, "lxc.rootfs.options", strings.Join(runConf.RootFS.Opts, ","))
+				if err != nil {
+					return "", postStartHooks, errors.Wrapf(err, "Failed to setup device rootfs '%s'", dev.Name)
+				}
+			}
+
+			if c.state.OS.Shiftfs && !c.IsPrivileged() && diskIdmap == nil {
+				// Host side mark mount.
+				err = lxcSetConfigItem(c.c, "lxc.hook.pre-start", fmt.Sprintf("/bin/mount -t shiftfs -o mark,passthrough=3 %s %s", c.RootfsPath(), c.RootfsPath()))
+				if err != nil {
+					return "", postStartHooks, errors.Wrapf(err, "Failed to setup device mount shiftfs '%s'", dev.Name)
+				}
+
+				// Container side shift mount.
+				err = lxcSetConfigItem(c.c, "lxc.hook.pre-mount", fmt.Sprintf("/bin/mount -t shiftfs -o passthrough=3 %s %s", c.RootfsPath(), c.RootfsPath()))
+				if err != nil {
+					return "", postStartHooks, errors.Wrapf(err, "Failed to setup device mount shiftfs '%s'", dev.Name)
+				}
+
+				// Host side umount of mark mount.
+				err = lxcSetConfigItem(c.c, "lxc.hook.start-host", fmt.Sprintf("/bin/umount -l %s", c.RootfsPath()))
+				if err != nil {
+					return "", postStartHooks, errors.Wrapf(err, "Failed to setup device mount shiftfs '%s'", dev.Name)
+				}
+			}
+		}
+
+		// Pass any cgroups rules into LXC.
+		if len(runConf.CGroups) > 0 {
+			for _, rule := range runConf.CGroups {
+				err = lxcSetConfigItem(c.c, fmt.Sprintf("lxc.cgroup.%s", rule.Key), rule.Value)
+				if err != nil {
+					return "", postStartHooks, errors.Wrapf(err, "Failed to setup device cgroup '%s'", dev.Name)
+				}
+			}
+		}
+
+		// Pass any mounts into LXC.
+		if len(runConf.Mounts) > 0 {
+			for _, mount := range runConf.Mounts {
+				if shared.StringInSlice("propagation", mount.Opts) && !util.RuntimeLiblxcVersionAtLeast(3, 0, 0) {
+					return "", postStartHooks, errors.Wrapf(fmt.Errorf("liblxc 3.0 is required for mount propagation configuration"), "Failed to setup device mount '%s'", dev.Name)
+				}
+
+				if mount.OwnerShift == device.MountOwnerShiftDynamic && !c.IsPrivileged() {
+					if !c.state.OS.Shiftfs {
+						return "", postStartHooks, errors.Wrapf(fmt.Errorf("shiftfs is required but isn't supported on system"), "Failed to setup device mount '%s'", dev.Name)
+					}
+
+					err = lxcSetConfigItem(c.c, "lxc.hook.pre-start", fmt.Sprintf("/bin/mount -t shiftfs -o mark,passthrough=3 %s %s", mount.DevPath, mount.DevPath))
+					if err != nil {
+						return "", postStartHooks, errors.Wrapf(err, "Failed to setup device mount shiftfs '%s'", dev.Name)
+					}
+
+					err = lxcSetConfigItem(c.c, "lxc.hook.pre-mount", fmt.Sprintf("/bin/mount -t shiftfs -o passthrough=3 %s %s", mount.DevPath, mount.DevPath))
+					if err != nil {
+						return "", postStartHooks, errors.Wrapf(err, "Failed to setup device mount shiftfs '%s'", dev.Name)
+					}
+
+					err = lxcSetConfigItem(c.c, "lxc.hook.start-host", fmt.Sprintf("/bin/umount -l %s", mount.DevPath))
+					if err != nil {
+						return "", postStartHooks, errors.Wrapf(err, "Failed to setup device mount shiftfs '%s'", dev.Name)
+					}
+				}
+
+				mntVal := fmt.Sprintf("%s %s %s %s %d %d", shared.EscapePathFstab(mount.DevPath), shared.EscapePathFstab(mount.TargetPath), mount.FSType, strings.Join(mount.Opts, ","), mount.Freq, mount.PassNo)
+				err = lxcSetConfigItem(c.c, "lxc.mount.entry", mntVal)
+				if err != nil {
+					return "", postStartHooks, errors.Wrapf(err, "Failed to setup device mount '%s'", dev.Name)
+				}
+			}
+		}
+
+		// Pass any network setup config into LXC.
+		if len(runConf.NetworkInterface) > 0 {
+			// Increment nicID so that LXC network index is unique per device.
+			nicID++
+
+			networkKeyPrefix := "lxc.net"
+			if !util.RuntimeLiblxcVersionAtLeast(2, 1, 0) {
+				networkKeyPrefix = "lxc.network"
+			}
+
+			for _, nicItem := range runConf.NetworkInterface {
+				err = lxcSetConfigItem(c.c, fmt.Sprintf("%s.%d.%s", networkKeyPrefix, nicID, nicItem.Key), nicItem.Value)
+				if err != nil {
+					return "", postStartHooks, errors.Wrapf(err, "Failed to setup device network interface '%s'", dev.Name)
+				}
+			}
+		}
+
+		// Add any post start hooks.
+		if len(runConf.PostHooks) > 0 {
+			postStartHooks = append(postStartHooks, runConf.PostHooks...)
+		}
+	}
+
+	// Rotate the log file
+	logfile := c.LogFilePath()
+	if shared.PathExists(logfile) {
+		os.Remove(logfile + ".old")
+		err := os.Rename(logfile, logfile+".old")
+		if err != nil {
+			return "", postStartHooks, err
+		}
+	}
+
+	// Storage is guaranteed to be mountable now (must be called after devices setup).
+	ourStart, err = c.StorageStart()
+	if err != nil {
+		return "", postStartHooks, err
+	}
+
+	// Generate the LXC config
+	configPath := filepath.Join(c.LogPath(), "lxc.conf")
+	err = c.c.SaveConfigFile(configPath)
+	if err != nil {
+		os.Remove(configPath)
+		return "", postStartHooks, err
+	}
+
+	// Set ownership to match container root
+	currentIdmapset, err := c.CurrentIdmap()
+	if err != nil {
+		if ourStart {
+			c.StorageStop()
+		}
+		return "", postStartHooks, err
+	}
+
+	uid := int64(0)
+	if currentIdmapset != nil {
+		uid, _ = currentIdmapset.ShiftFromNs(0, 0)
+	}
+
+	err = os.Chown(c.Path(), int(uid), 0)
+	if err != nil {
+		if ourStart {
+			c.StorageStop()
+		}
+		return "", postStartHooks, err
+	}
+
+	// We only need traversal by root in the container
+	err = os.Chmod(c.Path(), 0100)
+	if err != nil {
+		if ourStart {
+			c.StorageStop()
+		}
+		return "", postStartHooks, err
+	}
+
+	// Update the backup.yaml file
+	err = WriteBackupFile(c)
+	if err != nil {
+		if ourStart {
+			c.StorageStop()
+		}
+		return "", postStartHooks, err
+	}
+
+	// If starting stateless, wipe state
+	if !c.IsStateful() && shared.PathExists(c.StatePath()) {
+		os.RemoveAll(c.StatePath())
+	}
+
+	// Unmount any previously mounted shiftfs
+	unix.Unmount(c.RootfsPath(), unix.MNT_DETACH)
+
+	return configPath, postStartHooks, nil
+}
+
+// detachInterfaceRename enters the container's network namespace and moves the named interface
+// in ifName back to the network namespace of the running process as the name specified in hostName.
+func (c *ContainerLXC) detachInterfaceRename(netns string, ifName string, hostName string) error {
+	lxdPID := os.Getpid()
+
+	// Run forknet detach
+	_, err := shared.RunCommand(
+		c.state.OS.ExecPath,
+		"forknet",
+		"detach",
+		netns,
+		fmt.Sprintf("%d", lxdPID),
+		ifName,
+		hostName,
+	)
+
+	// Process forknet detach response
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (c *ContainerLXC) Start(stateful bool) error {
+	var ctxMap log.Ctx
+
+	// Setup a new operation
+	op, err := c.createOperation("start", false, false)
+	if err != nil {
+		return errors.Wrap(err, "Create container start operation")
+	}
+	defer op.Done(nil)
+
+	err = daemon.SetupSharedMounts()
+	if err != nil {
+		return fmt.Errorf("Daemon failed to setup shared mounts base: %s.\nDoes security.nesting need to be turned on?", err)
+	}
+
+	// Run the shared start code
+	configPath, postStartHooks, err := c.startCommon()
+	if err != nil {
+		return errors.Wrap(err, "Common start logic")
+	}
+
+	// Ensure that the container storage volume is mounted.
+	_, err = c.StorageStart()
+	if err != nil {
+		return errors.Wrap(err, "Storage start")
+	}
+
+	ctxMap = log.Ctx{
+		"project":   c.project,
+		"name":      c.name,
+		"action":    op.action,
+		"created":   c.creationDate,
+		"ephemeral": c.ephemeral,
+		"used":      c.lastUsedDate,
+		"stateful":  stateful}
+
+	logger.Info("Starting container", ctxMap)
+
+	// If stateful, restore now
+	if stateful {
+		if !c.stateful {
+			return fmt.Errorf("Container has no existing state to restore")
+		}
+
+		criuMigrationArgs := CriuMigrationArgs{
+			Cmd:          lxc.MIGRATE_RESTORE,
+			StateDir:     c.StatePath(),
+			Function:     "snapshot",
+			Stop:         false,
+			ActionScript: false,
+			DumpDir:      "",
+			PreDumpDir:   "",
+		}
+
+		err := c.Migrate(&criuMigrationArgs)
+		if err != nil && !c.IsRunning() {
+			return errors.Wrap(err, "Migrate")
+		}
+
+		os.RemoveAll(c.StatePath())
+		c.stateful = false
+
+		err = c.state.Cluster.ContainerSetStateful(c.id, false)
+		if err != nil {
+			logger.Error("Failed starting container", ctxMap)
+			return errors.Wrap(err, "Start container")
+		}
+
+		// Run any post start hooks.
+		err = c.runHooks(postStartHooks)
+		if err != nil {
+			// Attempt to stop container.
+			op.Done(err)
+			c.Stop(false)
+			return err
+		}
+
+		logger.Info("Started container", ctxMap)
+		return nil
+	} else if c.stateful {
+		/* stateless start required when we have state, let's delete it */
+		err := os.RemoveAll(c.StatePath())
+		if err != nil {
+			return err
+		}
+
+		c.stateful = false
+		err = c.state.Cluster.ContainerSetStateful(c.id, false)
+		if err != nil {
+			return errors.Wrap(err, "Persist stateful flag")
+		}
+	}
+
+	name := project.Prefix(c.Project(), c.name)
+
+	// Start the LXC container
+	_, err = shared.RunCommand(
+		c.state.OS.ExecPath,
+		"forkstart",
+		name,
+		c.state.OS.LxcPath,
+		configPath)
+	if err != nil && !c.IsRunning() {
+		// Attempt to extract the LXC errors
+		lxcLog := ""
+		logPath := filepath.Join(c.LogPath(), "lxc.log")
+		if shared.PathExists(logPath) {
+			logContent, err := ioutil.ReadFile(logPath)
+			if err == nil {
+				for _, line := range strings.Split(string(logContent), "\n") {
+					fields := strings.Fields(line)
+					if len(fields) < 4 {
+						continue
+					}
+
+					// We only care about errors
+					if fields[2] != "ERROR" {
+						continue
+					}
+
+					// Prepend the line break
+					if len(lxcLog) == 0 {
+						lxcLog += "\n"
+					}
+
+					lxcLog += fmt.Sprintf("  %s\n", strings.Join(fields[0:], " "))
+				}
+			}
+		}
+
+		logger.Error("Failed starting container", ctxMap)
+
+		// Return the actual error
+		return err
+	}
+
+	// Run any post start hooks.
+	err = c.runHooks(postStartHooks)
+	if err != nil {
+		// Attempt to stop container.
+		op.Done(err)
+		c.Stop(false)
+		return err
+	}
+
+	logger.Info("Started container", ctxMap)
+	EventSendLifecycle(c.project, "container-started",
+		fmt.Sprintf("/1.0/containers/%s", c.name), nil)
+
+	return nil
+}
+
+func (c *ContainerLXC) OnStart() error {
+	// Make sure we can't call go-lxc functions by mistake
+	c.fromHook = true
+
+	// Start the storage for this container
+	ourStart, err := c.StorageStartSensitive()
+	if err != nil {
+		return err
+	}
+
+	// Load the container AppArmor profile
+	err = AALoadProfile(c)
+	if err != nil {
+		if ourStart {
+			c.StorageStop()
+		}
+		return err
+	}
+
+	// Template anything that needs templating
+	key := "volatile.apply_template"
+	if c.localConfig[key] != "" {
+		// Run any template that needs running
+		err = c.templateApplyNow(c.localConfig[key])
+		if err != nil {
+			AADestroy(c)
+			if ourStart {
+				c.StorageStop()
+			}
+			return err
+		}
+
+		// Remove the volatile key from the DB
+		err := c.state.Cluster.ContainerConfigRemove(c.id, key)
+		if err != nil {
+			AADestroy(c)
+			if ourStart {
+				c.StorageStop()
+			}
+			return err
+		}
+	}
+
+	err = c.templateApplyNow("start")
+	if err != nil {
+		AADestroy(c)
+		if ourStart {
+			c.StorageStop()
+		}
+		return err
+	}
+
+	// Trigger a rebalance
+	device.TaskSchedulerTrigger("container", c.name, "started")
+
+	// Apply network priority
+	if c.expandedConfig["limits.network.priority"] != "" {
+		go func(c *ContainerLXC) {
+			c.fromHook = false
+			err := c.setNetworkPriority()
+			if err != nil {
+				logger.Error("Failed to apply network priority", log.Ctx{"container": c.name, "err": err})
+			}
+		}(c)
+	}
+
+	// Database updates
+	err = c.state.Cluster.Transaction(func(tx *db.ClusterTx) error {
+		// Record current state
+		err = tx.ContainerSetState(c.id, "RUNNING")
+		if err != nil {
+			return errors.Wrap(err, "Error updating container state")
+		}
+
+		// Update time container last started time
+		err = tx.ContainerLastUsedUpdate(c.id, time.Now().UTC())
+		if err != nil {
+			return errors.Wrap(err, "Error updating last used")
+		}
+
+		return nil
+	})
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
+// Stop functions
+func (c *ContainerLXC) Stop(stateful bool) error {
+	var ctxMap log.Ctx
+
+	// Check that we're not already stopped
+	if !c.IsRunning() {
+		return fmt.Errorf("The container is already stopped")
+	}
+
+	// Setup a new operation
+	op, err := c.createOperation("stop", false, true)
+	if err != nil {
+		return err
+	}
+
+	ctxMap = log.Ctx{
+		"project":   c.project,
+		"name":      c.name,
+		"action":    op.action,
+		"created":   c.creationDate,
+		"ephemeral": c.ephemeral,
+		"used":      c.lastUsedDate,
+		"stateful":  stateful}
+
+	logger.Info("Stopping container", ctxMap)
+
+	// Handle stateful stop
+	if stateful {
+		// Cleanup any existing state
+		stateDir := c.StatePath()
+		os.RemoveAll(stateDir)
+
+		err := os.MkdirAll(stateDir, 0700)
+		if err != nil {
+			op.Done(err)
+			logger.Error("Failed stopping container", ctxMap)
+			return err
+		}
+
+		criuMigrationArgs := CriuMigrationArgs{
+			Cmd:          lxc.MIGRATE_DUMP,
+			StateDir:     stateDir,
+			Function:     "snapshot",
+			Stop:         true,
+			ActionScript: false,
+			DumpDir:      "",
+			PreDumpDir:   "",
+		}
+
+		// Checkpoint
+		err = c.Migrate(&criuMigrationArgs)
+		if err != nil {
+			op.Done(err)
+			logger.Error("Failed stopping container", ctxMap)
+			return err
+		}
+
+		err = op.Wait()
+		if err != nil && c.IsRunning() {
+			logger.Error("Failed stopping container", ctxMap)
+			return err
+		}
+
+		c.stateful = true
+		err = c.state.Cluster.ContainerSetStateful(c.id, true)
+		if err != nil {
+			op.Done(err)
+			logger.Error("Failed stopping container", ctxMap)
+			return err
+		}
+
+		op.Done(nil)
+		logger.Info("Stopped container", ctxMap)
+		EventSendLifecycle(c.project, "container-stopped",
+			fmt.Sprintf("/1.0/containers/%s", c.name), nil)
+		return nil
+	} else if shared.PathExists(c.StatePath()) {
+		os.RemoveAll(c.StatePath())
+	}
+
+	// Load the go-lxc struct
+	if c.expandedConfig["raw.lxc"] != "" {
+		err = c.InitLXC(true)
+		if err != nil {
+			op.Done(err)
+			logger.Error("Failed stopping container", ctxMap)
+			return err
+		}
+	} else {
+		err = c.InitLXC(false)
+		if err != nil {
+			op.Done(err)
+			logger.Error("Failed stopping container", ctxMap)
+			return err
+		}
+	}
+
+	// Fork-bomb mitigation, prevent forking from this point on
+	if c.state.OS.CGroupPidsController {
+		// Attempt to disable forking new processes
+		c.CGroupSet("pids.max", "0")
+	} else if c.state.OS.CGroupFreezerController {
+		// Attempt to freeze the container
+		freezer := make(chan bool, 1)
+		go func() {
+			c.Freeze()
+			freezer <- true
+		}()
+
+		select {
+		case <-freezer:
+		case <-time.After(time.Second * 5):
+			c.Unfreeze()
+		}
+	}
+
+	if err := c.c.Stop(); err != nil {
+		op.Done(err)
+		logger.Error("Failed stopping container", ctxMap)
+		return err
+	}
+
+	err = op.Wait()
+	if err != nil && c.IsRunning() {
+		logger.Error("Failed stopping container", ctxMap)
+		return err
+	}
+
+	logger.Info("Stopped container", ctxMap)
+	EventSendLifecycle(c.project, "container-stopped",
+		fmt.Sprintf("/1.0/containers/%s", c.name), nil)
+
+	return nil
+}
+
+func (c *ContainerLXC) Shutdown(timeout time.Duration) error {
+	var ctxMap log.Ctx
+
+	// Check that we're not already stopped
+	if !c.IsRunning() {
+		return fmt.Errorf("The container is already stopped")
+	}
+
+	// Setup a new operation
+	op, err := c.createOperation("stop", true, true)
+	if err != nil {
+		return err
+	}
+
+	ctxMap = log.Ctx{
+		"project":   c.project,
+		"name":      c.name,
+		"action":    "shutdown",
+		"created":   c.creationDate,
+		"ephemeral": c.ephemeral,
+		"used":      c.lastUsedDate,
+		"timeout":   timeout}
+
+	logger.Info("Shutting down container", ctxMap)
+
+	// Load the go-lxc struct
+	if c.expandedConfig["raw.lxc"] != "" {
+		err = c.InitLXC(true)
+		if err != nil {
+			op.Done(err)
+			logger.Error("Failed stopping container", ctxMap)
+			return err
+		}
+	} else {
+		err = c.InitLXC(false)
+		if err != nil {
+			op.Done(err)
+			logger.Error("Failed stopping container", ctxMap)
+			return err
+		}
+	}
+
+	if err := c.c.Shutdown(timeout); err != nil {
+		op.Done(err)
+		logger.Error("Failed shutting down container", ctxMap)
+		return err
+	}
+
+	err = op.Wait()
+	if err != nil && c.IsRunning() {
+		logger.Error("Failed shutting down container", ctxMap)
+		return err
+	}
+
+	logger.Info("Shut down container", ctxMap)
+	EventSendLifecycle(c.project, "container-shutdown",
+		fmt.Sprintf("/1.0/containers/%s", c.name), nil)
+
+	return nil
+}
+
+// OnStopNS is triggered by LXC's stop hook once a container is shutdown but before the container's
+// namespaces have been closed. The netns path of the stopped container is provided.
+func (c *ContainerLXC) OnStopNS(target string, netns string) error {
+	// Validate target
+	if !shared.StringInSlice(target, []string{"stop", "reboot"}) {
+		logger.Error("Container sent invalid target to OnStopNS", log.Ctx{"container": c.Name(), "target": target})
+		return fmt.Errorf("Invalid stop target: %s", target)
+	}
+
+	// Clean up devices.
+	c.cleanupDevices(netns)
+
+	return nil
+}
+
+// OnStop is triggered by LXC's post-stop hook once a container is shutdown and after the
+// container's namespaces have been closed.
+func (c *ContainerLXC) OnStop(target string) error {
+	// Validate target
+	if !shared.StringInSlice(target, []string{"stop", "reboot"}) {
+		logger.Error("Container sent invalid target to OnStop", log.Ctx{"container": c.Name(), "target": target})
+		return fmt.Errorf("Invalid stop target: %s", target)
+	}
+
+	// Get operation
+	op, _ := c.getOperation("")
+	if op != nil && op.action != "stop" {
+		return fmt.Errorf("Container is already running a %s operation", op.action)
+	}
+
+	// Make sure we can't call go-lxc functions by mistake
+	c.fromHook = true
+
+	// Remove directory ownership (to avoid issue if uidmap is re-used)
+	err := os.Chown(c.Path(), 0, 0)
+	if err != nil {
+		if op != nil {
+			op.Done(err)
+		}
+
+		return err
+	}
+
+	err = os.Chmod(c.Path(), 0100)
+	if err != nil {
+		if op != nil {
+			op.Done(err)
+		}
+
+		return err
+	}
+
+	// Stop the storage for this container
+	_, err = c.StorageStop()
+	if err != nil {
+		if op != nil {
+			op.Done(err)
+		}
+
+		return err
+	}
+
+	// Log user actions
+	if op == nil {
+		ctxMap := log.Ctx{
+			"project":   c.project,
+			"name":      c.name,
+			"action":    target,
+			"created":   c.creationDate,
+			"ephemeral": c.ephemeral,
+			"used":      c.lastUsedDate,
+			"stateful":  false}
+
+		logger.Info(fmt.Sprintf("Container initiated %s", target), ctxMap)
+	}
+
+	// Record power state
+	err = c.state.Cluster.ContainerSetState(c.id, "STOPPED")
+	if err != nil {
+		logger.Error("Failed to set container state", log.Ctx{"container": c.Name(), "err": err})
+	}
+
+	go func(c *ContainerLXC, target string, op *lxcContainerOperation) {
+		c.fromHook = false
+		err = nil
+
+		// Unlock on return
+		if op != nil {
+			defer op.Done(err)
+		}
+
+		// Wait for other post-stop actions to be done
+		c.IsRunning()
+
+		// Unload the apparmor profile
+		err = AADestroy(c)
+		if err != nil {
+			logger.Error("Failed to destroy apparmor namespace", log.Ctx{"container": c.Name(), "err": err})
+		}
+
+		// Clean all the unix devices
+		err = c.removeUnixDevices()
+		if err != nil {
+			logger.Error("Unable to remove unix devices", log.Ctx{"container": c.Name(), "err": err})
+		}
+
+		// Clean all the disk devices
+		err = c.removeDiskDevices()
+		if err != nil {
+			logger.Error("Unable to remove disk devices", log.Ctx{"container": c.Name(), "err": err})
+		}
+
+		// Reboot the container
+		if target == "reboot" {
+			// Start the container again
+			err = c.Start(false)
+			return
+		}
+
+		// Trigger a rebalance
+		device.TaskSchedulerTrigger("container", c.name, "stopped")
+
+		// Destroy ephemeral containers
+		if c.ephemeral {
+			err = c.Delete()
+		}
+	}(c, target, op)
+
+	return nil
+}
+
+// cleanupDevices performs any needed device cleanup steps when container is stopped.
+func (c *ContainerLXC) cleanupDevices(netns string) {
+	for _, dev := range c.expandedDevices.Sorted() {
+		// Use the device interface if device supports it.
+		err := c.deviceStop(dev.Name, dev.Config, netns)
+		if err == device.ErrUnsupportedDevType {
+			continue
+		} else if err != nil {
+			logger.Errorf("Failed to stop device '%s': %v", dev.Name, err)
+		}
+	}
+}
+
+// Freezer functions
+func (c *ContainerLXC) Freeze() error {
+	ctxMap := log.Ctx{
+		"project":   c.project,
+		"name":      c.name,
+		"created":   c.creationDate,
+		"ephemeral": c.ephemeral,
+		"used":      c.lastUsedDate}
+
+	// Check that we're running
+	if !c.IsRunning() {
+		return fmt.Errorf("The container isn't running")
+	}
+
+	// Check if the CGroup is available
+	if !c.state.OS.CGroupFreezerController {
+		logger.Info("Unable to freeze container (lack of kernel support)", ctxMap)
+		return nil
+	}
+
+	// Check that we're not already frozen
+	if c.IsFrozen() {
+		return fmt.Errorf("The container is already frozen")
+	}
+
+	logger.Info("Freezing container", ctxMap)
+
+	// Load the go-lxc struct
+	err := c.InitLXC(false)
+	if err != nil {
+		ctxMap["err"] = err
+		logger.Error("Failed freezing container", ctxMap)
+		return err
+	}
+
+	err = c.c.Freeze()
+	if err != nil {
+		ctxMap["err"] = err
+		logger.Error("Failed freezing container", ctxMap)
+		return err
+	}
+
+	logger.Info("Froze container", ctxMap)
+	EventSendLifecycle(c.project, "container-paused",
+		fmt.Sprintf("/1.0/containers/%s", c.name), nil)
+
+	return err
+}
+
+func (c *ContainerLXC) Unfreeze() error {
+	ctxMap := log.Ctx{
+		"project":   c.project,
+		"name":      c.name,
+		"created":   c.creationDate,
+		"ephemeral": c.ephemeral,
+		"used":      c.lastUsedDate}
+
+	// Check that we're running
+	if !c.IsRunning() {
+		return fmt.Errorf("The container isn't running")
+	}
+
+	// Check if the CGroup is available
+	if !c.state.OS.CGroupFreezerController {
+		logger.Info("Unable to unfreeze container (lack of kernel support)", ctxMap)
+		return nil
+	}
+
+	// Check that we're frozen
+	if !c.IsFrozen() {
+		return fmt.Errorf("The container is already running")
+	}
+
+	logger.Info("Unfreezing container", ctxMap)
+
+	// Load the go-lxc struct
+	err := c.InitLXC(false)
+	if err != nil {
+		logger.Error("Failed unfreezing container", ctxMap)
+		return err
+	}
+
+	err = c.c.Unfreeze()
+	if err != nil {
+		logger.Error("Failed unfreezing container", ctxMap)
+	}
+
+	logger.Info("Unfroze container", ctxMap)
+	EventSendLifecycle(c.project, "container-resumed",
+		fmt.Sprintf("/1.0/containers/%s", c.name), nil)
+
+	return err
+}
+
+var LxcMonitorStateError = fmt.Errorf("Monitor is hung")
+
+// Get lxc container state, with 1 second timeout
+// If we don't get a reply, assume the lxc monitor is hung
+func (c *ContainerLXC) getLxcState() (lxc.State, error) {
+	if c.IsSnapshot() {
+		return lxc.StateMap["STOPPED"], nil
+	}
+
+	// Load the go-lxc struct
+	err := c.InitLXC(false)
+	if err != nil {
+		return lxc.StateMap["STOPPED"], err
+	}
+
+	monitor := make(chan lxc.State, 1)
+
+	go func(c *lxc.Container) {
+		monitor <- c.State()
+	}(c.c)
+
+	select {
+	case state := <-monitor:
+		return state, nil
+	case <-time.After(5 * time.Second):
+		return lxc.StateMap["FROZEN"], LxcMonitorStateError
+	}
+}
+
+func (c *ContainerLXC) Render() (interface{}, interface{}, error) {
+	// Ignore err as the arch string on error is correct (unknown)
+	architectureName, _ := osarch.ArchitectureName(c.architecture)
+
+	if c.IsSnapshot() {
+		// Prepare the ETag
+		etag := []interface{}{c.expiryDate}
+
+		ct := api.InstanceSnapshot{
+			CreatedAt:       c.creationDate,
+			ExpandedConfig:  c.expandedConfig,
+			ExpandedDevices: c.expandedDevices.CloneNative(),
+			LastUsedAt:      c.lastUsedDate,
+			Name:            strings.SplitN(c.name, "/", 2)[1],
+			Stateful:        c.stateful,
+		}
+		ct.Architecture = architectureName
+		ct.Config = c.localConfig
+		ct.Devices = c.localDevices.CloneNative()
+		ct.Ephemeral = c.ephemeral
+		ct.Profiles = c.profiles
+		ct.ExpiresAt = c.expiryDate
+
+		return &ct, etag, nil
+	}
+
+	// Prepare the ETag
+	etag := []interface{}{c.architecture, c.localConfig, c.localDevices, c.ephemeral, c.profiles}
+
+	// FIXME: Render shouldn't directly access the go-lxc struct
+	cState, err := c.getLxcState()
+	if err != nil {
+		return nil, nil, errors.Wrap(err, "Get container stated")
+	}
+	statusCode := lxcStatusCode(cState)
+
+	ct := api.Instance{
+		ExpandedConfig:  c.expandedConfig,
+		ExpandedDevices: c.expandedDevices.CloneNative(),
+		Name:            c.name,
+		Status:          statusCode.String(),
+		StatusCode:      statusCode,
+		Location:        c.node,
+		Type:            c.Type().String(),
+	}
+
+	ct.Description = c.description
+	ct.Architecture = architectureName
+	ct.Config = c.localConfig
+	ct.CreatedAt = c.creationDate
+	ct.Devices = c.localDevices.CloneNative()
+	ct.Ephemeral = c.ephemeral
+	ct.LastUsedAt = c.lastUsedDate
+	ct.Profiles = c.profiles
+	ct.Stateful = c.stateful
+
+	return &ct, etag, nil
+}
+
+func (c *ContainerLXC) RenderFull() (*api.InstanceFull, interface{}, error) {
+	if c.IsSnapshot() {
+		return nil, nil, fmt.Errorf("RenderFull only works with containers")
+	}
+
+	// Get the Container struct
+	base, etag, err := c.Render()
+	if err != nil {
+		return nil, nil, err
+	}
+
+	// Convert to ContainerFull
+	ct := api.InstanceFull{Instance: *base.(*api.Instance)}
+
+	// Add the ContainerState
+	ct.State, err = c.RenderState()
+	if err != nil {
+		return nil, nil, err
+	}
+
+	// Add the ContainerSnapshots
+	snaps, err := c.Snapshots()
+	if err != nil {
+		return nil, nil, err
+	}
+
+	for _, snap := range snaps {
+		render, _, err := snap.Render()
+		if err != nil {
+			return nil, nil, err
+		}
+
+		if ct.Snapshots == nil {
+			ct.Snapshots = []api.InstanceSnapshot{}
+		}
+
+		ct.Snapshots = append(ct.Snapshots, *render.(*api.InstanceSnapshot))
+	}
+
+	// Add the ContainerBackups
+	backups, err := c.Backups()
+	if err != nil {
+		return nil, nil, err
+	}
+
+	for _, backup := range backups {
+		render := backup.Render()
+
+		if ct.Backups == nil {
+			ct.Backups = []api.InstanceBackup{}
+		}
+
+		ct.Backups = append(ct.Backups, *render)
+	}
+
+	return &ct, etag, nil
+}
+
+func (c *ContainerLXC) RenderState() (*api.InstanceState, error) {
+	cState, err := c.getLxcState()
+	if err != nil {
+		return nil, err
+	}
+	statusCode := lxcStatusCode(cState)
+	status := api.InstanceState{
+		Status:     statusCode.String(),
+		StatusCode: statusCode,
+	}
+
+	if c.IsRunning() {
+		pid := c.InitPID()
+		status.CPU = c.cpuState()
+		status.Disk = c.diskState()
+		status.Memory = c.memoryState()
+		status.Network = c.networkState()
+		status.Pid = int64(pid)
+		status.Processes = c.processesState()
+	}
+
+	return &status, nil
+}
+
+func (c *ContainerLXC) Snapshots() ([]Instance, error) {
+	var snaps []db.Instance
+
+	if c.IsSnapshot() {
+		return []Instance{}, nil
+	}
+
+	// Get all the snapshots
+	err := c.state.Cluster.Transaction(func(tx *db.ClusterTx) error {
+		var err error
+		snaps, err = tx.ContainerGetSnapshotsFull(c.Project(), c.name)
+		if err != nil {
+			return err
+		}
+
+		return nil
+	})
+	if err != nil {
+		return nil, err
+	}
+
+	// Build the snapshot list
+	containers, err := instanceLoadAllInternal(snaps, c.state)
+	if err != nil {
+		return nil, err
+	}
+
+	instances := make([]Instance, len(containers))
+	for k, v := range containers {
+		instances[k] = Instance(v)
+	}
+
+	return instances, nil
+}
+
+func (c *ContainerLXC) Backups() ([]Backup, error) {
+	// Get all the backups
+	backupNames, err := c.state.Cluster.ContainerGetBackups(c.project, c.name)
+	if err != nil {
+		return nil, err
+	}
+
+	// Build the backup list
+	backups := []Backup{}
+	for _, backupName := range backupNames {
+		backup, err := BackupLoadByName(c.state, c.project, backupName)
+		if err != nil {
+			return nil, err
+		}
+
+		backups = append(backups, *backup)
+	}
+
+	return backups, nil
+}
+
+func (c *ContainerLXC) Restore(sourceContainer Instance, stateful bool) error {
+	var ctxMap log.Ctx
+
+	// Initialize storage interface for the container.
+	err := c.InitStorage()
+	if err != nil {
+		return err
+	}
+
+	ourStart, err := c.StorageStart()
+	if err != nil {
+		return err
+	}
+	if ourStart {
+		defer c.StorageStop()
+	}
+
+	/* let's also check for CRIU if necessary, before doing a bunch of
+	 * filesystem manipulations
+	 */
+	if shared.PathExists(c.StatePath()) {
+		_, err := exec.LookPath("criu")
+		if err != nil {
+			return fmt.Errorf("Failed to restore container state. CRIU isn't installed")
+		}
+	}
+
+	// Stop the container
+	wasRunning := false
+	if c.IsRunning() {
+		wasRunning = true
+
+		ephemeral := c.IsEphemeral()
+		if ephemeral {
+			// Unset ephemeral flag
+			args := db.ContainerArgs{
+				Architecture: c.Architecture(),
+				Config:       c.LocalConfig(),
+				Description:  c.Description(),
+				Devices:      c.LocalDevices(),
+				Ephemeral:    false,
+				Profiles:     c.Profiles(),
+				Project:      c.Project(),
+				Type:         c.Type(),
+				Snapshot:     c.IsSnapshot(),
+			}
+
+			err := c.Update(args, false)
+			if err != nil {
+				return err
+			}
+
+			// On function return, set the flag back on
+			defer func() {
+				args.Ephemeral = ephemeral
+				c.Update(args, true)
+			}()
+		}
+
+		// This will unmount the container storage.
+		err := c.Stop(false)
+		if err != nil {
+			return err
+		}
+
+		// Ensure that storage is mounted for state path checks.
+		ourStart, err := c.StorageStart()
+		if err != nil {
+			return err
+		}
+		if ourStart {
+			defer c.StorageStop()
+		}
+	}
+
+	ctxMap = log.Ctx{
+		"project":   c.project,
+		"name":      c.name,
+		"created":   c.creationDate,
+		"ephemeral": c.ephemeral,
+		"used":      c.lastUsedDate,
+		"source":    sourceContainer.Name()}
+
+	logger.Info("Restoring container", ctxMap)
+
+	// Restore the rootfs
+	err = c.storage.ContainerRestore(c, sourceContainer)
+	if err != nil {
+		logger.Error("Failed restoring container filesystem", ctxMap)
+		return err
+	}
+
+	// Restore the configuration
+	args := db.ContainerArgs{
+		Architecture: sourceContainer.Architecture(),
+		Config:       sourceContainer.LocalConfig(),
+		Description:  sourceContainer.Description(),
+		Devices:      sourceContainer.LocalDevices(),
+		Ephemeral:    sourceContainer.IsEphemeral(),
+		Profiles:     sourceContainer.Profiles(),
+		Project:      sourceContainer.Project(),
+		Type:         sourceContainer.Type(),
+		Snapshot:     sourceContainer.IsSnapshot(),
+	}
+
+	err = c.Update(args, false)
+	if err != nil {
+		logger.Error("Failed restoring container configuration", ctxMap)
+		return err
+	}
+
+	// The old backup file may be out of date (e.g. it doesn't have all the
+	// current snapshots of the container listed); let's write a new one to
+	// be safe.
+	err = WriteBackupFile(c)
+	if err != nil {
+		return err
+	}
+
+	// If the container wasn't running but was stateful, should we restore
+	// it as running?
+	if stateful == true {
+		if !shared.PathExists(c.StatePath()) {
+			return fmt.Errorf("Stateful snapshot restore requested by snapshot is stateless")
+		}
+
+		logger.Debug("Performing stateful restore", ctxMap)
+		c.stateful = true
+
+		criuMigrationArgs := CriuMigrationArgs{
+			Cmd:          lxc.MIGRATE_RESTORE,
+			StateDir:     c.StatePath(),
+			Function:     "snapshot",
+			Stop:         false,
+			ActionScript: false,
+			DumpDir:      "",
+			PreDumpDir:   "",
+		}
+
+		// Checkpoint
+		err := c.Migrate(&criuMigrationArgs)
+		if err != nil {
+			return err
+		}
+
+		// Remove the state from the parent container; we only keep
+		// this in snapshots.
+		err2 := os.RemoveAll(c.StatePath())
+		if err2 != nil {
+			logger.Error("Failed to delete snapshot state", log.Ctx{"path": c.StatePath(), "err": err2})
+		}
+
+		if err != nil {
+			logger.Info("Failed restoring container", ctxMap)
+			return err
+		}
+
+		logger.Debug("Performed stateful restore", ctxMap)
+		logger.Info("Restored container", ctxMap)
+		return nil
+	}
+
+	EventSendLifecycle(c.project, "container-snapshot-restored",
+		fmt.Sprintf("/1.0/containers/%s", c.name), map[string]interface{}{
+			"snapshot_name": c.name,
+		})
+
+	// Restart the container
+	if wasRunning {
+		logger.Info("Restored container", ctxMap)
+		return c.Start(false)
+	}
+
+	logger.Info("Restored container", ctxMap)
+
+	return nil
+}
+
+func (c *ContainerLXC) cleanup() {
+	// Unmount any leftovers
+	c.removeUnixDevices()
+	c.removeDiskDevices()
+
+	// Remove the security profiles
+	AADeleteProfile(c)
+	SeccompDeleteProfile(c)
+
+	// Remove the devices path
+	os.Remove(c.DevicesPath())
+
+	// Remove the shmounts path
+	os.RemoveAll(c.ShmountsPath())
+}
+
+func (c *ContainerLXC) Delete() error {
+	ctxMap := log.Ctx{
+		"project":   c.project,
+		"name":      c.name,
+		"created":   c.creationDate,
+		"ephemeral": c.ephemeral,
+		"used":      c.lastUsedDate}
+
+	logger.Info("Deleting container", ctxMap)
+
+	if shared.IsTrue(c.expandedConfig["security.protection.delete"]) && !c.IsSnapshot() {
+		err := fmt.Errorf("Container is protected")
+		logger.Warn("Failed to delete container", log.Ctx{"name": c.Name(), "err": err})
+		return err
+	}
+
+	// Check if we're dealing with "lxd import"
+	isImport := false
+	if c.storage != nil {
+		_, poolName, _ := c.storage.GetContainerPoolInfo()
+
+		if c.IsSnapshot() {
+			cName, _, _ := shared.ContainerGetParentAndSnapshotName(c.name)
+			if shared.PathExists(shared.VarPath("storage-pools", poolName, "containers", cName, ".importing")) {
+				isImport = true
+			}
+		} else {
+			if shared.PathExists(shared.VarPath("storage-pools", poolName, "containers", c.name, ".importing")) {
+				isImport = true
+			}
+		}
+	}
+
+	// Attempt to initialize storage interface for the container.
+	err := c.InitStorage()
+	if err != nil {
+		logger.Warnf("Failed to init storage: %v", err)
+	}
+
+	if c.IsSnapshot() {
+		// Remove the snapshot
+		if c.storage != nil && !isImport {
+			err := c.storage.ContainerSnapshotDelete(c)
+			if err != nil {
+				logger.Warn("Failed to delete snapshot", log.Ctx{"name": c.Name(), "err": err})
+				return err
+			}
+		}
+	} else {
+		// Remove all snapshots
+		err := InstanceDeleteSnapshots(c.state, c.Project(), c.Name())
+		if err != nil {
+			logger.Warn("Failed to delete snapshots", log.Ctx{"name": c.Name(), "err": err})
+			return err
+		}
+
+		// Remove all backups
+		backups, err := c.Backups()
+		if err != nil {
+			return err
+		}
+
+		for _, backup := range backups {
+			err = backup.Delete()
+			if err != nil {
+				return err
+			}
+		}
+
+		// Clean things up
+		c.cleanup()
+
+		// Delete the container from disk
+		if c.storage != nil && !isImport {
+			_, poolName, _ := c.storage.GetContainerPoolInfo()
+			containerMountPoint := driver.GetContainerMountPoint(c.Project(), poolName, c.Name())
+			if shared.PathExists(c.Path()) ||
+				shared.PathExists(containerMountPoint) {
+				err := c.storage.ContainerDelete(c)
+				if err != nil {
+					logger.Error("Failed deleting container storage", log.Ctx{"name": c.Name(), "err": err})
+					return err
+				}
+			}
+		}
+
+		// Delete the MAAS entry
+		err = c.maasDelete()
+		if err != nil {
+			logger.Error("Failed deleting container MAAS record", log.Ctx{"name": c.Name(), "err": err})
+			return err
+		}
+
+		// Remove devices from container.
+		for k, m := range c.expandedDevices {
+			err = c.deviceRemove(k, m)
+			if err != nil && err != device.ErrUnsupportedDevType {
+				return errors.Wrapf(err, "Failed to remove device '%s'", k)
+			}
+		}
+	}
+
+	// Remove the database record
+	if err := c.state.Cluster.ContainerRemove(c.project, c.Name()); err != nil {
+		logger.Error("Failed deleting container entry", log.Ctx{"name": c.Name(), "err": err})
+		return err
+	}
+
+	// Remove the database entry for the pool device
+	if c.storage != nil {
+		// Get the name of the storage pool the container is attached to. This
+		// reverse-engineering works because container names are globally
+		// unique.
+		poolID, _, _ := c.storage.GetContainerPoolInfo()
+
+		// Remove volume from storage pool.
+		err := c.state.Cluster.StoragePoolVolumeDelete(c.Project(), c.Name(), db.StoragePoolVolumeTypeContainer, poolID)
+		if err != nil {
+			return err
+		}
+	}
+
+	logger.Info("Deleted container", ctxMap)
+
+	if c.IsSnapshot() {
+		EventSendLifecycle(c.project, "container-snapshot-deleted",
+			fmt.Sprintf("/1.0/containers/%s", c.name), map[string]interface{}{
+				"snapshot_name": c.name,
+			})
+	} else {
+		EventSendLifecycle(c.project, "container-deleted",
+			fmt.Sprintf("/1.0/containers/%s", c.name), nil)
+	}
+
+	return nil
+}
+
+func (c *ContainerLXC) Rename(newName string) error {
+	oldName := c.Name()
+	ctxMap := log.Ctx{
+		"project":   c.project,
+		"name":      c.name,
+		"created":   c.creationDate,
+		"ephemeral": c.ephemeral,
+		"used":      c.lastUsedDate,
+		"newname":   newName}
+
+	logger.Info("Renaming container", ctxMap)
+
+	// Initialize storage interface for the container.
+	err := c.InitStorage()
+	if err != nil {
+		return err
+	}
+
+	// Sanity checks
+	if !c.IsSnapshot() && !shared.ValidHostname(newName) {
+		return fmt.Errorf("Invalid container name")
+	}
+
+	if c.IsRunning() {
+		return fmt.Errorf("Renaming of running container not allowed")
+	}
+
+	// Clean things up
+	c.cleanup()
+
+	// Rename the MAAS entry
+	if !c.IsSnapshot() {
+		err = c.maasRename(newName)
+		if err != nil {
+			return err
+		}
+	}
+
+	// Rename the logging path
+	os.RemoveAll(shared.LogPath(newName))
+	if shared.PathExists(c.LogPath()) {
+		err := os.Rename(c.LogPath(), shared.LogPath(newName))
+		if err != nil {
+			logger.Error("Failed renaming container", ctxMap)
+			return err
+		}
+	}
+
+	// Rename the storage entry
+	if c.IsSnapshot() {
+		err := c.storage.ContainerSnapshotRename(c, newName)
+		if err != nil {
+			logger.Error("Failed renaming container", ctxMap)
+			return err
+		}
+	} else {
+		err := c.storage.ContainerRename(c, newName)
+		if err != nil {
+			logger.Error("Failed renaming container", ctxMap)
+			return err
+		}
+	}
+
+	// Rename the backups
+	backups, err := c.Backups()
+	if err != nil {
+		return err
+	}
+
+	for _, backup := range backups {
+		backupName := strings.Split(backup.Name, "/")[1]
+		newName := fmt.Sprintf("%s/%s", newName, backupName)
+
+		err = backup.Rename(newName)
+		if err != nil {
+			return err
+		}
+	}
+
+	poolID, _, _ := c.storage.GetContainerPoolInfo()
+
+	if !c.IsSnapshot() {
+		// Rename all the snapshots
+		results, err := c.state.Cluster.ContainerGetSnapshots(c.project, oldName)
+		if err != nil {
+			logger.Error("Failed to get container snapshots", ctxMap)
+			return err
+		}
+
+		for _, sname := range results {
+			// Rename the snapshot
+			oldSnapName := strings.SplitN(sname, shared.SnapshotDelimiter, 2)[1]
+			baseSnapName := filepath.Base(sname)
+			newSnapshotName := newName + shared.SnapshotDelimiter + baseSnapName
+			err := c.state.Cluster.Transaction(func(tx *db.ClusterTx) error {
+				return tx.InstanceSnapshotRename(c.project, oldName, oldSnapName, baseSnapName)
+			})
+			if err != nil {
+				logger.Error("Failed renaming snapshot", ctxMap)
+				return err
+			}
+
+			// Rename storage volume for the snapshot.
+			err = c.state.Cluster.StoragePoolVolumeRename(c.project, sname, newSnapshotName, db.StoragePoolVolumeTypeContainer, poolID)
+			if err != nil {
+				logger.Error("Failed renaming storage volume", ctxMap)
+				return err
+			}
+		}
+	}
+
+	// Rename the database entry
+	err = c.state.Cluster.Transaction(func(tx *db.ClusterTx) error {
+		if c.IsSnapshot() {
+			oldParts := strings.SplitN(oldName, shared.SnapshotDelimiter, 2)
+			newParts := strings.SplitN(newName, shared.SnapshotDelimiter, 2)
+			return tx.InstanceSnapshotRename(c.project, oldParts[0], oldParts[1], newParts[1])
+		} else {
+			return tx.InstanceRename(c.project, oldName, newName)
+		}
+	})
+	if err != nil {
+		logger.Error("Failed renaming container", ctxMap)
+		return err
+	}
+
+	// Rename storage volume for the container.
+	err = c.state.Cluster.StoragePoolVolumeRename(c.project, oldName, newName, db.StoragePoolVolumeTypeContainer, poolID)
+	if err != nil {
+		logger.Error("Failed renaming storage volume", ctxMap)
+		return err
+	}
+
+	// Set the new name in the struct
+	c.name = newName
+
+	// Update the storage volume name in the storage interface.
+	sNew := c.storage.GetStoragePoolVolumeWritable()
+	c.storage.SetStoragePoolVolumeWritable(&sNew)
+
+	// Invalidate the go-lxc cache
+	if c.c != nil {
+		c.c.Release()
+		c.c = nil
+	}
+
+	c.cConfig = false
+
+	// Update lease files
+	NetworkUpdateStatic(c.state, "")
+
+	logger.Info("Renamed container", ctxMap)
+
+	if c.IsSnapshot() {
+		EventSendLifecycle(c.project, "container-snapshot-renamed",
+			fmt.Sprintf("/1.0/containers/%s", oldName), map[string]interface{}{
+				"new_name":      newName,
+				"snapshot_name": oldName,
+			})
+	} else {
+		EventSendLifecycle(c.project, "container-renamed",
+			fmt.Sprintf("/1.0/containers/%s", oldName), map[string]interface{}{
+				"new_name": newName,
+			})
+	}
+
+	return nil
+}
+
+func (c *ContainerLXC) CGroupGet(key string) (string, error) {
+	// Load the go-lxc struct
+	err := c.InitLXC(false)
+	if err != nil {
+		return "", err
+	}
+
+	// Make sure the container is running
+	if !c.IsRunning() {
+		return "", fmt.Errorf("Can't get cgroups on a stopped container")
+	}
+
+	value := c.c.CgroupItem(key)
+	return strings.Join(value, "\n"), nil
+}
+
+func (c *ContainerLXC) CGroupSet(key string, value string) error {
+	// Load the go-lxc struct
+	err := c.InitLXC(false)
+	if err != nil {
+		return err
+	}
+
+	// Make sure the container is running
+	if !c.IsRunning() {
+		return fmt.Errorf("Can't set cgroups on a stopped container")
+	}
+
+	err = c.c.SetCgroupItem(key, value)
+	if err != nil {
+		return fmt.Errorf("Failed to set cgroup %s=\"%s\": %s", key, value, err)
+	}
+
+	return nil
+}
+
+func (c *ContainerLXC) VolatileSet(changes map[string]string) error {
+	// Sanity check
+	for key := range changes {
+		if !strings.HasPrefix(key, "volatile.") {
+			return fmt.Errorf("Only volatile keys can be modified with VolatileSet")
+		}
+	}
+
+	// Update the database
+	var err error
+	if c.IsSnapshot() {
+		err = c.state.Cluster.Transaction(func(tx *db.ClusterTx) error {
+			return tx.InstanceSnapshotConfigUpdate(c.id, changes)
+		})
+	} else {
+		err = c.state.Cluster.Transaction(func(tx *db.ClusterTx) error {
+			return tx.ContainerConfigUpdate(c.id, changes)
+		})
+	}
+	if err != nil {
+		return errors.Wrap(err, "Failed to volatile config")
+	}
+
+	// Apply the change locally
+	for key, value := range changes {
+		if value == "" {
+			delete(c.expandedConfig, key)
+			delete(c.localConfig, key)
+			continue
+		}
+
+		c.expandedConfig[key] = value
+		c.localConfig[key] = value
+	}
+
+	return nil
+}
+
+func (c *ContainerLXC) Update(args db.ContainerArgs, userRequested bool) error {
+	// Set sane defaults for unset keys
+	if args.Project == "" {
+		args.Project = "default"
+	}
+
+	if args.Architecture == 0 {
+		args.Architecture = c.architecture
+	}
+
+	if args.Config == nil {
+		args.Config = map[string]string{}
+	}
+
+	if args.Devices == nil {
+		args.Devices = config.Devices{}
+	}
+
+	if args.Profiles == nil {
+		args.Profiles = []string{}
+	}
+
+	// Validate the new config
+	err := ContainerValidConfig(c.state.OS, args.Config, false, false)
+	if err != nil {
+		return errors.Wrap(err, "Invalid config")
+	}
+
+	// Validate the new devices without using expanded devices validation (expensive checks disabled).
+	err = ContainerValidDevices(c.state, c.state.Cluster, c.Name(), args.Devices, false)
+	if err != nil {
+		return errors.Wrap(err, "Invalid devices")
+	}
+
+	// Validate the new profiles
+	profiles, err := c.state.Cluster.Profiles(args.Project)
+	if err != nil {
+		return errors.Wrap(err, "Failed to get profiles")
+	}
+
+	checkedProfiles := []string{}
+	for _, profile := range args.Profiles {
+		if !shared.StringInSlice(profile, profiles) {
+			return fmt.Errorf("Requested profile '%s' doesn't exist", profile)
+		}
+
+		if shared.StringInSlice(profile, checkedProfiles) {
+			return fmt.Errorf("Duplicate profile found in request")
+		}
+
+		checkedProfiles = append(checkedProfiles, profile)
+	}
+
+	// Validate the new architecture
+	if args.Architecture != 0 {
+		_, err = osarch.ArchitectureName(args.Architecture)
+		if err != nil {
+			return fmt.Errorf("Invalid architecture id: %s", err)
+		}
+	}
+
+	// Check that volatile and image keys weren't modified
+	if userRequested {
+		for k, v := range args.Config {
+			if strings.HasPrefix(k, "volatile.") && c.localConfig[k] != v {
+				return fmt.Errorf("Volatile keys are read-only")
+			}
+
+			if strings.HasPrefix(k, "image.") && c.localConfig[k] != v {
+				return fmt.Errorf("Image keys are read-only")
+			}
+		}
+
+		for k, v := range c.localConfig {
+			if strings.HasPrefix(k, "volatile.") && args.Config[k] != v {
+				return fmt.Errorf("Volatile keys are read-only")
+			}
+
+			if strings.HasPrefix(k, "image.") && args.Config[k] != v {
+				return fmt.Errorf("Image keys are read-only")
+			}
+		}
+	}
+
+	// Get a copy of the old configuration
+	oldDescription := c.Description()
+	oldArchitecture := 0
+	err = shared.DeepCopy(&c.architecture, &oldArchitecture)
+	if err != nil {
+		return err
+	}
+
+	oldEphemeral := false
+	err = shared.DeepCopy(&c.ephemeral, &oldEphemeral)
+	if err != nil {
+		return err
+	}
+
+	oldExpandedDevices := config.Devices{}
+	err = shared.DeepCopy(&c.expandedDevices, &oldExpandedDevices)
+	if err != nil {
+		return err
+	}
+
+	oldExpandedConfig := map[string]string{}
+	err = shared.DeepCopy(&c.expandedConfig, &oldExpandedConfig)
+	if err != nil {
+		return err
+	}
+
+	oldLocalDevices := config.Devices{}
+	err = shared.DeepCopy(&c.localDevices, &oldLocalDevices)
+	if err != nil {
+		return err
+	}
+
+	oldLocalConfig := map[string]string{}
+	err = shared.DeepCopy(&c.localConfig, &oldLocalConfig)
+	if err != nil {
+		return err
+	}
+
+	oldProfiles := []string{}
+	err = shared.DeepCopy(&c.profiles, &oldProfiles)
+	if err != nil {
+		return err
+	}
+
+	oldExpiryDate := c.expiryDate
+
+	// Define a function which reverts everything.  Defer this function
+	// so that it doesn't need to be explicitly called in every failing
+	// return path.  Track whether or not we want to undo the changes
+	// using a closure.
+	undoChanges := true
+	defer func() {
+		if undoChanges {
+			c.description = oldDescription
+			c.architecture = oldArchitecture
+			c.ephemeral = oldEphemeral
+			c.expandedConfig = oldExpandedConfig
+			c.expandedDevices = oldExpandedDevices
+			c.localConfig = oldLocalConfig
+			c.localDevices = oldLocalDevices
+			c.profiles = oldProfiles
+			c.expiryDate = oldExpiryDate
+			if c.c != nil {
+				c.c.Release()
+				c.c = nil
+			}
+			c.cConfig = false
+			c.InitLXC(true)
+			device.TaskSchedulerTrigger("container", c.name, "changed")
+		}
+	}()
+
+	// Apply the various changes
+	c.description = args.Description
+	c.architecture = args.Architecture
+	c.ephemeral = args.Ephemeral
+	c.localConfig = args.Config
+	c.localDevices = args.Devices
+	c.profiles = args.Profiles
+	c.expiryDate = args.ExpiryDate
+
+	// Expand the config and refresh the LXC config
+	err = c.ExpandConfig(nil)
+	if err != nil {
+		return errors.Wrap(err, "Expand config")
+	}
+
+	err = c.ExpandDevices(nil)
+	if err != nil {
+		return errors.Wrap(err, "Expand devices")
+	}
+
+	// Diff the configurations
+	changedConfig := []string{}
+	for key := range oldExpandedConfig {
+		if oldExpandedConfig[key] != c.expandedConfig[key] {
+			if !shared.StringInSlice(key, changedConfig) {
+				changedConfig = append(changedConfig, key)
+			}
+		}
+	}
+
+	for key := range c.expandedConfig {
+		if oldExpandedConfig[key] != c.expandedConfig[key] {
+			if !shared.StringInSlice(key, changedConfig) {
+				changedConfig = append(changedConfig, key)
+			}
+		}
+	}
+
+	// Diff the devices
+	removeDevices, addDevices, updateDevices, updateDiff := oldExpandedDevices.Update(c.expandedDevices, func(oldDevice config.Device, newDevice config.Device) []string {
+		// This function needs to return a list of fields that are excluded from differences
+		// between oldDevice and newDevice. The result of this is that as long as the
+		// devices are otherwise identical except for the fields returned here, then the
+		// device is considered to be being "updated" rather than "added & removed".
+		if oldDevice["type"] != newDevice["type"] || oldDevice["nictype"] != newDevice["nictype"] {
+			return []string{} // Device types aren't the same, so this cannot be an update.
+		}
+
+		d, err := device.New(c, c.state, "", newDevice, nil, nil)
+		if err != nil {
+			return []string{} // Couldn't create Device, so this cannot be an update.
+		}
+
+		_, updateFields := d.CanHotPlug()
+		return updateFields
+	})
+
+	// Do some validation of the config diff
+	err = ContainerValidConfig(c.state.OS, c.expandedConfig, false, true)
+	if err != nil {
+		return errors.Wrap(err, "Invalid expanded config")
+	}
+
+	// Do full expanded validation of the devices diff.
+	err = ContainerValidDevices(c.state, c.state.Cluster, c.Name(), c.expandedDevices, true)
+	if err != nil {
+		return errors.Wrap(err, "Invalid expanded devices")
+	}
+
+	// Run through initLXC to catch anything we missed
+	if c.c != nil {
+		c.c.Release()
+		c.c = nil
+	}
+	c.cConfig = false
+	err = c.InitLXC(true)
+	if err != nil {
+		return errors.Wrap(err, "Initialize LXC")
+	}
+
+	// Initialize storage interface for the container.
+	err = c.InitStorage()
+	if err != nil {
+		return errors.Wrap(err, "Initialize storage")
+	}
+
+	// If apparmor changed, re-validate the apparmor profile
+	if shared.StringInSlice("raw.apparmor", changedConfig) || shared.StringInSlice("security.nesting", changedConfig) {
+		err = AAParseProfile(c)
+		if err != nil {
+			return errors.Wrap(err, "Parse AppArmor profile")
+		}
+	}
+
+	if shared.StringInSlice("security.idmap.isolated", changedConfig) || shared.StringInSlice("security.idmap.base", changedConfig) || shared.StringInSlice("security.idmap.size", changedConfig) || shared.StringInSlice("raw.idmap", changedConfig) || shared.StringInSlice("security.privileged", changedConfig) {
+		var idmap *idmap.IdmapSet
+		base := int64(0)
+		if !c.IsPrivileged() {
+			// update the idmap
+			idmap, base, err = findIdmap(
+				c.state,
+				c.Name(),
+				c.expandedConfig["security.idmap.isolated"],
+				c.expandedConfig["security.idmap.base"],
+				c.expandedConfig["security.idmap.size"],
+				c.expandedConfig["raw.idmap"],
+			)
+			if err != nil {
+				return errors.Wrap(err, "Failed to get ID map")
+			}
+		}
+
+		var jsonIdmap string
+		if idmap != nil {
+			idmapBytes, err := json.Marshal(idmap.Idmap)
+			if err != nil {
+				return err
+			}
+			jsonIdmap = string(idmapBytes)
+		} else {
+			jsonIdmap = "[]"
+		}
+		c.localConfig["volatile.idmap.next"] = jsonIdmap
+		c.localConfig["volatile.idmap.base"] = fmt.Sprintf("%v", base)
+
+		// Invalid idmap cache
+		c.idmapset = nil
+	}
+
+	// Update MAAS
+	updateMAAS := false
+	for _, key := range []string{"maas.subnet.ipv4", "maas.subnet.ipv6", "ipv4.address", "ipv6.address"} {
+		if shared.StringInSlice(key, updateDiff) {
+			updateMAAS = true
+			break
+		}
+	}
+
+	if !c.IsSnapshot() && updateMAAS {
+		err = c.maasUpdate(oldExpandedDevices.CloneNative())
+		if err != nil {
+			return err
+		}
+	}
+
+	// Use the device interface to apply update changes.
+	err = c.updateDevices(removeDevices, addDevices, updateDevices, oldExpandedDevices)
+	if err != nil {
+		return err
+	}
+
+	// Apply the live changes
+	isRunning := c.IsRunning()
+	if isRunning {
+		// Live update the container config
+		for _, key := range changedConfig {
+			value := c.expandedConfig[key]
+
+			if key == "raw.apparmor" || key == "security.nesting" {
+				// Update the AppArmor profile
+				err = AALoadProfile(c)
+				if err != nil {
+					return err
+				}
+			} else if key == "security.devlxd" {
+				if value == "" || shared.IsTrue(value) {
+					err = c.insertMount(shared.VarPath("devlxd"), "/dev/lxd", "none", unix.MS_BIND, false)
+					if err != nil {
+						return err
+					}
+				} else if c.FileExists("/dev/lxd") == nil {
+					err = c.removeMount("/dev/lxd")
+					if err != nil {
+						return err
+					}
+
+					err = c.FileRemove("/dev/lxd")
+					if err != nil {
+						return err
+					}
+				}
+			} else if key == "linux.kernel_modules" && value != "" {
+				for _, module := range strings.Split(value, ",") {
+					module = strings.TrimPrefix(module, " ")
+					err := util.LoadModule(module)
+					if err != nil {
+						return fmt.Errorf("Failed to load kernel module '%s': %s", module, err)
+					}
+				}
+			} else if key == "limits.disk.priority" {
+				if !c.state.OS.CGroupBlkioController {
+					continue
+				}
+
+				priorityInt := 5
+				diskPriority := c.expandedConfig["limits.disk.priority"]
+				if diskPriority != "" {
+					priorityInt, err = strconv.Atoi(diskPriority)
+					if err != nil {
+						return err
+					}
+				}
+
+				// Minimum valid value is 10
+				priority := priorityInt * 100
+				if priority == 0 {
+					priority = 10
+				}
+
+				err = c.CGroupSet("blkio.weight", fmt.Sprintf("%d", priority))
+				if err != nil {
+					return err
+				}
+			} else if key == "limits.memory" || strings.HasPrefix(key, "limits.memory.") {
+				// Skip if no memory CGroup
+				if !c.state.OS.CGroupMemoryController {
+					continue
+				}
+
+				// Set the new memory limit
+				memory := c.expandedConfig["limits.memory"]
+				memoryEnforce := c.expandedConfig["limits.memory.enforce"]
+				memorySwap := c.expandedConfig["limits.memory.swap"]
+
+				// Parse memory
+				if memory == "" {
+					memory = "-1"
+				} else if strings.HasSuffix(memory, "%") {
+					percent, err := strconv.ParseInt(strings.TrimSuffix(memory, "%"), 10, 64)
+					if err != nil {
+						return err
+					}
+
+					memoryTotal, err := shared.DeviceTotalMemory()
+					if err != nil {
+						return err
+					}
+
+					memory = fmt.Sprintf("%d", int64((memoryTotal/100)*percent))
+				} else {
+					valueInt, err := units.ParseByteSizeString(memory)
+					if err != nil {
+						return err
+					}
+					memory = fmt.Sprintf("%d", valueInt)
+				}
+
+				// Store the old values for revert
+				oldMemswLimit := ""
+				if c.state.OS.CGroupSwapAccounting {
+					oldMemswLimit, err = c.CGroupGet("memory.memsw.limit_in_bytes")
+					if err != nil {
+						oldMemswLimit = ""
+					}
+				}
+
+				oldLimit, err := c.CGroupGet("memory.limit_in_bytes")
+				if err != nil {
+					oldLimit = ""
+				}
+
+				oldSoftLimit, err := c.CGroupGet("memory.soft_limit_in_bytes")
+				if err != nil {
+					oldSoftLimit = ""
+				}
+
+				revertMemory := func() {
+					if oldSoftLimit != "" {
+						c.CGroupSet("memory.soft_limit_in_bytes", oldSoftLimit)
+					}
+
+					if oldLimit != "" {
+						c.CGroupSet("memory.limit_in_bytes", oldLimit)
+					}
+
+					if oldMemswLimit != "" {
+						c.CGroupSet("memory.memsw.limit_in_bytes", oldMemswLimit)
+					}
+				}
+
+				// Reset everything
+				if c.state.OS.CGroupSwapAccounting {
+					err = c.CGroupSet("memory.memsw.limit_in_bytes", "-1")
+					if err != nil {
+						revertMemory()
+						return err
+					}
+				}
+
+				err = c.CGroupSet("memory.limit_in_bytes", "-1")
+				if err != nil {
+					revertMemory()
+					return err
+				}
+
+				err = c.CGroupSet("memory.soft_limit_in_bytes", "-1")
+				if err != nil {
+					revertMemory()
+					return err
+				}
+
+				// Set the new values
+				if memoryEnforce == "soft" {
+					// Set new limit
+					err = c.CGroupSet("memory.soft_limit_in_bytes", memory)
+					if err != nil {
+						revertMemory()
+						return err
+					}
+				} else {
+					if c.state.OS.CGroupSwapAccounting && (memorySwap == "" || shared.IsTrue(memorySwap)) {
+						err = c.CGroupSet("memory.limit_in_bytes", memory)
+						if err != nil {
+							revertMemory()
+							return err
+						}
+
+						err = c.CGroupSet("memory.memsw.limit_in_bytes", memory)
+						if err != nil {
+							revertMemory()
+							return err
+						}
+					} else {
+						err = c.CGroupSet("memory.limit_in_bytes", memory)
+						if err != nil {
+							revertMemory()
+							return err
+						}
+					}
+
+					// Set soft limit to value 10% less than hard limit
+					valueInt, err := strconv.ParseInt(memory, 10, 64)
+					if err != nil {
+						revertMemory()
+						return err
+					}
+
+					err = c.CGroupSet("memory.soft_limit_in_bytes", fmt.Sprintf("%.0f", float64(valueInt)*0.9))
+					if err != nil {
+						revertMemory()
+						return err
+					}
+				}
+
+				// Configure the swappiness
+				if key == "limits.memory.swap" || key == "limits.memory.swap.priority" {
+					memorySwap := c.expandedConfig["limits.memory.swap"]
+					memorySwapPriority := c.expandedConfig["limits.memory.swap.priority"]
+					if memorySwap != "" && !shared.IsTrue(memorySwap) {
+						err = c.CGroupSet("memory.swappiness", "0")
+						if err != nil {
+							return err
+						}
+					} else {
+						priority := 0
+						if memorySwapPriority != "" {
+							priority, err = strconv.Atoi(memorySwapPriority)
+							if err != nil {
+								return err
+							}
+						}
+
+						err = c.CGroupSet("memory.swappiness", fmt.Sprintf("%d", 60-10+priority))
+						if err != nil {
+							return err
+						}
+					}
+				}
+			} else if key == "limits.network.priority" {
+				err := c.setNetworkPriority()
+				if err != nil {
+					return err
+				}
+			} else if key == "limits.cpu" {
+				// Trigger a scheduler re-run
+				device.TaskSchedulerTrigger("container", c.name, "changed")
+			} else if key == "limits.cpu.priority" || key == "limits.cpu.allowance" {
+				// Skip if no cpu CGroup
+				if !c.state.OS.CGroupCPUController {
+					continue
+				}
+
+				// Apply new CPU limits
+				cpuShares, cpuCfsQuota, cpuCfsPeriod, err := device.ParseCPU(c.expandedConfig["limits.cpu.allowance"], c.expandedConfig["limits.cpu.priority"])
+				if err != nil {
+					return err
+				}
+
+				err = c.CGroupSet("cpu.shares", cpuShares)
+				if err != nil {
+					return err
+				}
+
+				err = c.CGroupSet("cpu.cfs_period_us", cpuCfsPeriod)
+				if err != nil {
+					return err
+				}
+
+				err = c.CGroupSet("cpu.cfs_quota_us", cpuCfsQuota)
+				if err != nil {
+					return err
+				}
+			} else if key == "limits.processes" {
+				if !c.state.OS.CGroupPidsController {
+					continue
+				}
+
+				if value == "" {
+					err = c.CGroupSet("pids.max", "max")
+					if err != nil {
+						return err
+					}
+				} else {
+					valueInt, err := strconv.ParseInt(value, 10, 64)
+					if err != nil {
+						return err
+					}
+
+					err = c.CGroupSet("pids.max", fmt.Sprintf("%d", valueInt))
+					if err != nil {
+						return err
+					}
+				}
+			}
+		}
+	}
+
+	// Finally, apply the changes to the database
+	err = query.Retry(func() error {
+		tx, err := c.state.Cluster.Begin()
+		if err != nil {
+			return err
+		}
+
+		// Snapshots should update only their descriptions and expiry date.
+		if c.IsSnapshot() {
+			err = db.InstanceSnapshotUpdate(tx, c.id, c.description, c.expiryDate)
+			if err != nil {
+				tx.Rollback()
+				return errors.Wrap(err, "Snapshot update")
+			}
+		} else {
+			err = db.ContainerConfigClear(tx, c.id)
+			if err != nil {
+				tx.Rollback()
+				return err
+
+			}
+			err = db.ContainerConfigInsert(tx, c.id, c.localConfig)
+			if err != nil {
+				tx.Rollback()
+				return errors.Wrap(err, "Config insert")
+			}
+
+			err = db.ContainerProfilesInsert(tx, c.id, c.project, c.profiles)
+			if err != nil {
+				tx.Rollback()
+				return errors.Wrap(err, "Profiles insert")
+			}
+
+			err = db.DevicesAdd(tx, "instance", int64(c.id), c.localDevices)
+			if err != nil {
+				tx.Rollback()
+				return errors.Wrap(err, "Device add")
+			}
+
+			err = db.ContainerUpdate(tx, c.id, c.description, c.architecture, c.ephemeral, c.expiryDate)
+			if err != nil {
+				tx.Rollback()
+				return errors.Wrap(err, "Container update")
+			}
+
+		}
+
+		if err := db.TxCommit(tx); err != nil {
+			return err
+		}
+		return nil
+	})
+	if err != nil {
+		return errors.Wrap(err, "Failed to update database")
+	}
+
+	/* we can call Update in some cases when the directory doesn't exist
+	 * yet before container creation; this is okay, because at the end of
+	 * container creation we write the backup file, so let's not worry about
+	 * ENOENT. */
+	if c.storage.ContainerStorageReady(c) {
+		err := WriteBackupFile(c)
+		if err != nil && !os.IsNotExist(err) {
+			return errors.Wrap(err, "Failed to write backup file")
+		}
+	}
+
+	// Send devlxd notifications
+	if isRunning {
+		// Config changes (only for user.* keys
+		for _, key := range changedConfig {
+			if !strings.HasPrefix(key, "user.") {
+				continue
+			}
+
+			msg := map[string]string{
+				"key":       key,
+				"old_value": oldExpandedConfig[key],
+				"value":     c.expandedConfig[key],
+			}
+
+			err = DevLXDEventSend(c, "config", msg)
+			if err != nil {
+				return err
+			}
+		}
+
+		// Device changes
+		for k, m := range removeDevices {
+			msg := map[string]interface{}{
+				"action": "removed",
+				"name":   k,
+				"config": m,
+			}
+
+			err = DevLXDEventSend(c, "device", msg)
+			if err != nil {
+				return err
+			}
+		}
+
+		for k, m := range updateDevices {
+			msg := map[string]interface{}{
+				"action": "updated",
+				"name":   k,
+				"config": m,
+			}
+
+			err = DevLXDEventSend(c, "device", msg)
+			if err != nil {
+				return err
+			}
+		}
+
+		for k, m := range addDevices {
+			msg := map[string]interface{}{
+				"action": "added",
+				"name":   k,
+				"config": m,
+			}
+
+			err = DevLXDEventSend(c, "device", msg)
+			if err != nil {
+				return err
+			}
+		}
+	}
+
+	// Success, update the closure to mark that the changes should be kept.
+	undoChanges = false
+
+	var endpoint string
+
+	if c.IsSnapshot() {
+		cName, sName, _ := shared.ContainerGetParentAndSnapshotName(c.name)
+		endpoint = fmt.Sprintf("/1.0/containers/%s/snapshots/%s", cName, sName)
+	} else {
+		endpoint = fmt.Sprintf("/1.0/containers/%s", c.name)
+	}
+
+	EventSendLifecycle(c.project, "container-updated", endpoint, nil)
+
+	return nil
+}
+
+func (c *ContainerLXC) updateDevices(removeDevices config.Devices, addDevices config.Devices, updateDevices config.Devices, oldExpandedDevices config.Devices) error {
+	isRunning := c.IsRunning()
+
+	// Remove devices in reverse order to how they were added.
+	for _, dev := range removeDevices.Reversed() {
+		if isRunning {
+			err := c.deviceStop(dev.Name, dev.Config, "")
+			if err == device.ErrUnsupportedDevType {
+				continue // No point in trying to remove device below.
+			} else if err != nil {
+				return errors.Wrapf(err, "Failed to stop device '%s'", dev.Name)
+			}
+		}
+
+		err := c.deviceRemove(dev.Name, dev.Config)
+		if err != nil && err != device.ErrUnsupportedDevType {
+			return errors.Wrapf(err, "Failed to remove device '%s'", dev.Name)
+		}
+
+		// Check whether we are about to add the same device back with updated config and
+		// if not, or if the device type has changed, then remove all volatile keys for
+		// this device (as its an actual removal or a device type change).
+		err = c.deviceResetVolatile(dev.Name, dev.Config, addDevices[dev.Name])
+		if err != nil {
+			return errors.Wrapf(err, "Failed to reset volatile data for device '%s'", dev.Name)
+		}
+	}
+
+	// Add devices in sorted order, this ensures that device mounts are added in path order.
+	for _, dev := range addDevices.Sorted() {
+		err := c.deviceAdd(dev.Name, dev.Config)
+		if err == device.ErrUnsupportedDevType {
+			continue // No point in trying to start device below.
+		} else if err != nil {
+			return errors.Wrapf(err, "Failed to add device '%s'", dev.Name)
+		}
+
+		if isRunning {
+			_, err := c.deviceStart(dev.Name, dev.Config, isRunning)
+			if err != nil && err != device.ErrUnsupportedDevType {
+				return errors.Wrapf(err, "Failed to start device '%s'", dev.Name)
+			}
+		}
+	}
+
+	for _, dev := range updateDevices.Sorted() {
+		err := c.deviceUpdate(dev.Name, dev.Config, oldExpandedDevices, isRunning)
+		if err != nil && err != device.ErrUnsupportedDevType {
+			return errors.Wrapf(err, "Failed to update device '%s'", dev.Name)
+		}
+	}
+
+	return nil
+}
+
+func (c *ContainerLXC) Export(w io.Writer, properties map[string]string) error {
+	ctxMap := log.Ctx{
+		"project":   c.project,
+		"name":      c.name,
+		"created":   c.creationDate,
+		"ephemeral": c.ephemeral,
+		"used":      c.lastUsedDate}
+
+	if c.IsRunning() {
+		return fmt.Errorf("Cannot export a running container as an image")
+	}
+
+	logger.Info("Exporting container", ctxMap)
+
+	// Start the storage
+	ourStart, err := c.StorageStart()
+	if err != nil {
+		logger.Error("Failed exporting container", ctxMap)
+		return err
+	}
+	if ourStart {
+		defer c.StorageStop()
+	}
+
+	// Unshift the container
+	idmap, err := c.DiskIdmap()
+	if err != nil {
+		logger.Error("Failed exporting container", ctxMap)
+		return err
+	}
+
+	if idmap != nil {
+		if !c.IsSnapshot() && shared.IsTrue(c.expandedConfig["security.protection.shift"]) {
+			return fmt.Errorf("Container is protected against filesystem shifting")
+		}
+
+		var err error
+
+		if c.Storage().GetStorageType() == StorageTypeZfs {
+			err = idmap.UnshiftRootfs(c.RootfsPath(), storage.ZFSIdmapSetSkipper)
+		} else if c.Storage().GetStorageType() == StorageTypeBtrfs {
+			err = UnshiftBtrfsRootfs(c.RootfsPath(), idmap)
+		} else {
+			err = idmap.UnshiftRootfs(c.RootfsPath(), nil)
+		}
+		if err != nil {
+			logger.Error("Failed exporting container", ctxMap)
+			return err
+		}
+
+		if c.Storage().GetStorageType() == StorageTypeZfs {
+			defer idmap.ShiftRootfs(c.RootfsPath(), storage.ZFSIdmapSetSkipper)
+		} else if c.Storage().GetStorageType() == StorageTypeBtrfs {
+			defer ShiftBtrfsRootfs(c.RootfsPath(), idmap)
+		} else {
+			defer idmap.ShiftRootfs(c.RootfsPath(), nil)
+		}
+	}
+
+	// Create the tarball
+	ctw := containerwriter.NewContainerTarWriter(w, idmap)
+
+	// Keep track of the first path we saw for each path with nlink>1
+	cDir := c.Path()
+
+	// Path inside the tar image is the pathname starting after cDir
+	offset := len(cDir) + 1
+
+	writeToTar := func(path string, fi os.FileInfo, err error) error {
+		if err != nil {
+			return err
+		}
+
+		err = ctw.WriteFile(offset, path, fi)
+		if err != nil {
+			logger.Debugf("Error tarring up %s: %s", path, err)
+			return err
+		}
+		return nil
+	}
+
+	// Look for metadata.yaml
+	fnam := filepath.Join(cDir, "metadata.yaml")
+	if !shared.PathExists(fnam) {
+		// Generate a new metadata.yaml
+		tempDir, err := ioutil.TempDir("", "lxd_lxd_metadata_")
+		if err != nil {
+			ctw.Close()
+			logger.Error("Failed exporting container", ctxMap)
+			return err
+		}
+		defer os.RemoveAll(tempDir)
+
+		// Get the container's architecture
+		var arch string
+		if c.IsSnapshot() {
+			parentName, _, _ := shared.ContainerGetParentAndSnapshotName(c.name)
+			parent, err := InstanceLoadByProjectAndName(c.state, c.project, parentName)
+			if err != nil {
+				ctw.Close()
+				logger.Error("Failed exporting container", ctxMap)
+				return err
+			}
+
+			arch, _ = osarch.ArchitectureName(parent.Architecture())
+		} else {
+			arch, _ = osarch.ArchitectureName(c.architecture)
+		}
+
+		if arch == "" {
+			arch, err = osarch.ArchitectureName(c.state.OS.Architectures[0])
+			if err != nil {
+				logger.Error("Failed exporting container", ctxMap)
+				return err
+			}
+		}
+
+		// Fill in the metadata
+		meta := api.ImageMetadata{}
+		meta.Architecture = arch
+		meta.CreationDate = time.Now().UTC().Unix()
+		meta.Properties = properties
+
+		data, err := yaml.Marshal(&meta)
+		if err != nil {
+			ctw.Close()
+			logger.Error("Failed exporting container", ctxMap)
+			return err
+		}
+
+		// Write the actual file
+		fnam = filepath.Join(tempDir, "metadata.yaml")
+		err = ioutil.WriteFile(fnam, data, 0644)
+		if err != nil {
+			ctw.Close()
+			logger.Error("Failed exporting container", ctxMap)
+			return err
+		}
+
+		fi, err := os.Lstat(fnam)
+		if err != nil {
+			ctw.Close()
+			logger.Error("Failed exporting container", ctxMap)
+			return err
+		}
+
+		tmpOffset := len(path.Dir(fnam)) + 1
+		if err := ctw.WriteFile(tmpOffset, fnam, fi); err != nil {
+			ctw.Close()
+			logger.Debugf("Error writing to tarfile: %s", err)
+			logger.Error("Failed exporting container", ctxMap)
+			return err
+		}
+	} else {
+		if properties != nil {
+			// Parse the metadata
+			content, err := ioutil.ReadFile(fnam)
+			if err != nil {
+				ctw.Close()
+				logger.Error("Failed exporting container", ctxMap)
+				return err
+			}
+
+			metadata := new(api.ImageMetadata)
+			err = yaml.Unmarshal(content, &metadata)
+			if err != nil {
+				ctw.Close()
+				logger.Error("Failed exporting container", ctxMap)
+				return err
+			}
+			metadata.Properties = properties
+
+			// Generate a new metadata.yaml
+			tempDir, err := ioutil.TempDir("", "lxd_lxd_metadata_")
+			if err != nil {
+				ctw.Close()
+				logger.Error("Failed exporting container", ctxMap)
+				return err
+			}
+			defer os.RemoveAll(tempDir)
+
+			data, err := yaml.Marshal(&metadata)
+			if err != nil {
+				ctw.Close()
+				logger.Error("Failed exporting container", ctxMap)
+				return err
+			}
+
+			// Write the actual file
+			fnam = filepath.Join(tempDir, "metadata.yaml")
+			err = ioutil.WriteFile(fnam, data, 0644)
+			if err != nil {
+				ctw.Close()
+				logger.Error("Failed exporting container", ctxMap)
+				return err
+			}
+		}
+
+		// Include metadata.yaml in the tarball
+		fi, err := os.Lstat(fnam)
+		if err != nil {
+			ctw.Close()
+			logger.Debugf("Error statting %s during export", fnam)
+			logger.Error("Failed exporting container", ctxMap)
+			return err
+		}
+
+		if properties != nil {
+			tmpOffset := len(path.Dir(fnam)) + 1
+			err = ctw.WriteFile(tmpOffset, fnam, fi)
+		} else {
+			err = ctw.WriteFile(offset, fnam, fi)
+		}
+		if err != nil {
+			ctw.Close()
+			logger.Debugf("Error writing to tarfile: %s", err)
+			logger.Error("Failed exporting container", ctxMap)
+			return err
+		}
+	}
+
+	// Include all the rootfs files
+	fnam = c.RootfsPath()
+	err = filepath.Walk(fnam, writeToTar)
+	if err != nil {
+		logger.Error("Failed exporting container", ctxMap)
+		return err
+	}
+
+	// Include all the templates
+	fnam = c.TemplatesPath()
+	if shared.PathExists(fnam) {
+		err = filepath.Walk(fnam, writeToTar)
+		if err != nil {
+			logger.Error("Failed exporting container", ctxMap)
+			return err
+		}
+	}
+
+	err = ctw.Close()
+	if err != nil {
+		logger.Error("Failed exporting container", ctxMap)
+		return err
+	}
+
+	logger.Info("Exported container", ctxMap)
+	return nil
+}
+
+func collectCRIULogFile(c Instance, imagesDir string, function string, method string) error {
+	t := time.Now().Format(time.RFC3339)
+	newPath := shared.LogPath(c.Name(), fmt.Sprintf("%s_%s_%s.log", function, method, t))
+	return shared.FileCopy(filepath.Join(imagesDir, fmt.Sprintf("%s.log", method)), newPath)
+}
+
+func getCRIULogErrors(imagesDir string, method string) (string, error) {
+	f, err := os.Open(path.Join(imagesDir, fmt.Sprintf("%s.log", method)))
+	if err != nil {
+		return "", err
+	}
+
+	defer f.Close()
+
+	scanner := bufio.NewScanner(f)
+	ret := []string{}
+	for scanner.Scan() {
+		line := scanner.Text()
+		if strings.Contains(line, "Error") || strings.Contains(line, "Warn") {
+			ret = append(ret, scanner.Text())
+		}
+	}
+
+	return strings.Join(ret, "\n"), nil
+}
+
+type CriuMigrationArgs struct {
+	Cmd          uint
+	StateDir     string
+	Function     string
+	Stop         bool
+	ActionScript bool
+	DumpDir      string
+	PreDumpDir   string
+	Features     lxc.CriuFeatures
+}
+
+func (c *ContainerLXC) Migrate(args *CriuMigrationArgs) error {
+	ctxMap := log.Ctx{
+		"project":      c.project,
+		"name":         c.name,
+		"created":      c.creationDate,
+		"ephemeral":    c.ephemeral,
+		"used":         c.lastUsedDate,
+		"statedir":     args.StateDir,
+		"actionscript": args.ActionScript,
+		"predumpdir":   args.PreDumpDir,
+		"features":     args.Features,
+		"stop":         args.Stop}
+
+	_, err := exec.LookPath("criu")
+	if err != nil {
+		return fmt.Errorf("Unable to perform container live migration. CRIU isn't installed")
+	}
+
+	logger.Info("Migrating container", ctxMap)
+
+	// Initialize storage interface for the container.
+	err = c.InitStorage()
+	if err != nil {
+		return err
+	}
+
+	prettyCmd := ""
+	switch args.Cmd {
+	case lxc.MIGRATE_PRE_DUMP:
+		prettyCmd = "pre-dump"
+	case lxc.MIGRATE_DUMP:
+		prettyCmd = "dump"
+	case lxc.MIGRATE_RESTORE:
+		prettyCmd = "restore"
+	case lxc.MIGRATE_FEATURE_CHECK:
+		prettyCmd = "feature-check"
+	default:
+		prettyCmd = "unknown"
+		logger.Warn("Unknown migrate call", log.Ctx{"cmd": args.Cmd})
+	}
+
+	preservesInodes := c.storage.PreservesInodes()
+	/* This feature was only added in 2.0.1, let's not ask for it
+	 * before then or migrations will fail.
+	 */
+	if !util.RuntimeLiblxcVersionAtLeast(2, 0, 1) {
+		preservesInodes = false
+	}
+
+	finalStateDir := args.StateDir
+	var migrateErr error
+
+	/* For restore, we need an extra fork so that we daemonize monitor
+	 * instead of having it be a child of LXD, so let's hijack the command
+	 * here and do the extra fork.
+	 */
+	if args.Cmd == lxc.MIGRATE_RESTORE {
+		// Run the shared start
+		_, postStartHooks, err := c.startCommon()
+		if err != nil {
+			return err
+		}
+
+		/*
+		 * For unprivileged containers we need to shift the
+		 * perms on the images images so that they can be
+		 * opened by the process after it is in its user
+		 * namespace.
+		 */
+		idmapset, err := c.CurrentIdmap()
+		if err != nil {
+			return err
+		}
+
+		if idmapset != nil {
+			ourStart, err := c.StorageStart()
+			if err != nil {
+				return err
+			}
+
+			if c.Storage().GetStorageType() == StorageTypeZfs {
+				err = idmapset.ShiftRootfs(args.StateDir, storage.ZFSIdmapSetSkipper)
+			} else if c.Storage().GetStorageType() == StorageTypeBtrfs {
+				err = ShiftBtrfsRootfs(args.StateDir, idmapset)
+			} else {
+				err = idmapset.ShiftRootfs(args.StateDir, nil)
+			}
+			if ourStart {
+				_, err2 := c.StorageStop()
+				if err != nil {
+					return err
+				}
+
+				if err2 != nil {
+					return err2
+				}
+			}
+		}
+
+		configPath := filepath.Join(c.LogPath(), "lxc.conf")
+
+		if args.DumpDir != "" {
+			finalStateDir = fmt.Sprintf("%s/%s", args.StateDir, args.DumpDir)
+		}
+
+		_, migrateErr = shared.RunCommand(
+			c.state.OS.ExecPath,
+			"forkmigrate",
+			c.name,
+			c.state.OS.LxcPath,
+			configPath,
+			finalStateDir,
+			fmt.Sprintf("%v", preservesInodes))
+
+		if migrateErr == nil {
+			// Run any post start hooks.
+			err := c.runHooks(postStartHooks)
+			if err != nil {
+				// Attempt to stop container.
+				c.Stop(false)
+				return err
+			}
+		}
+	} else if args.Cmd == lxc.MIGRATE_FEATURE_CHECK {
+		err := c.InitLXC(true)
+		if err != nil {
+			return err
+		}
+
+		opts := lxc.MigrateOptions{
+			FeaturesToCheck: args.Features,
+		}
+		migrateErr = c.c.Migrate(args.Cmd, opts)
+		if migrateErr != nil {
+			logger.Info("CRIU feature check failed", ctxMap)
+			return migrateErr
+		}
+		return nil
+	} else {
+		err := c.InitLXC(true)
+		if err != nil {
+			return err
+		}
+
+		script := ""
+		if args.ActionScript {
+			script = filepath.Join(args.StateDir, "action.sh")
+		}
+
+		if args.DumpDir != "" {
+			finalStateDir = fmt.Sprintf("%s/%s", args.StateDir, args.DumpDir)
+		}
+
+		// TODO: make this configurable? Ultimately I think we don't
+		// want to do that; what we really want to do is have "modes"
+		// of criu operation where one is "make this succeed" and the
+		// other is "make this fast". Anyway, for now, let's choose a
+		// really big size so it almost always succeeds, even if it is
+		// slow.
+		ghostLimit := uint64(256 * 1024 * 1024)
+
+		opts := lxc.MigrateOptions{
+			Stop:            args.Stop,
+			Directory:       finalStateDir,
+			Verbose:         true,
+			PreservesInodes: preservesInodes,
+			ActionScript:    script,
+			GhostLimit:      ghostLimit,
+		}
+		if args.PreDumpDir != "" {
+			opts.PredumpDir = fmt.Sprintf("../%s", args.PreDumpDir)
+		}
+
+		if !c.IsRunning() {
+			// otherwise the migration will needlessly fail
+			args.Stop = false
+		}
+
+		migrateErr = c.c.Migrate(args.Cmd, opts)
+	}
+
+	collectErr := collectCRIULogFile(c, finalStateDir, args.Function, prettyCmd)
+	if collectErr != nil {
+		logger.Error("Error collecting checkpoint log file", log.Ctx{"err": collectErr})
+	}
+
+	if migrateErr != nil {
+		log, err2 := getCRIULogErrors(finalStateDir, prettyCmd)
+		if err2 == nil {
+			logger.Info("Failed migrating container", ctxMap)
+			migrateErr = fmt.Errorf("%s %s failed\n%s", args.Function, prettyCmd, log)
+		}
+
+		return migrateErr
+	}
+
+	logger.Info("Migrated container", ctxMap)
+
+	return nil
+}
+
+func (c *ContainerLXC) TemplateApply(trigger string) error {
+	// "create" and "copy" are deferred until next start
+	if shared.StringInSlice(trigger, []string{"create", "copy"}) {
+		// The two events are mutually exclusive so only keep the last one
+		err := c.VolatileSet(map[string]string{"volatile.apply_template": trigger})
+		if err != nil {
+			return errors.Wrap(err, "Failed to set apply_template volatile key")
+		}
+
+		return nil
+	}
+
+	return c.templateApplyNow(trigger)
+}
+
+func (c *ContainerLXC) templateApplyNow(trigger string) error {
+	// If there's no metadata, just return
+	fname := filepath.Join(c.Path(), "metadata.yaml")
+	if !shared.PathExists(fname) {
+		return nil
+	}
+
+	// Parse the metadata
+	content, err := ioutil.ReadFile(fname)
+	if err != nil {
+		return errors.Wrap(err, "Failed to read metadata")
+	}
+
+	metadata := new(api.ImageMetadata)
+	err = yaml.Unmarshal(content, &metadata)
+
+	if err != nil {
+		return errors.Wrapf(err, "Could not parse %s", fname)
+	}
+
+	// Find rootUid and rootGid
+	idmapset, err := c.DiskIdmap()
+	if err != nil {
+		return errors.Wrap(err, "Failed to set ID map")
+	}
+
+	rootUid := int64(0)
+	rootGid := int64(0)
+
+	// Get the right uid and gid for the container
+	if idmapset != nil {
+		rootUid, rootGid = idmapset.ShiftIntoNs(0, 0)
+	}
+
+	// Figure out the container architecture
+	arch, err := osarch.ArchitectureName(c.architecture)
+	if err != nil {
+		arch, err = osarch.ArchitectureName(c.state.OS.Architectures[0])
+		if err != nil {
+			return errors.Wrap(err, "Failed to detect system architecture")
+		}
+	}
+
+	// Generate the container metadata
+	containerMeta := make(map[string]string)
+	containerMeta["name"] = c.name
+	containerMeta["architecture"] = arch
+
+	if c.ephemeral {
+		containerMeta["ephemeral"] = "true"
+	} else {
+		containerMeta["ephemeral"] = "false"
+	}
+
+	if c.IsPrivileged() {
+		containerMeta["privileged"] = "true"
+	} else {
+		containerMeta["privileged"] = "false"
+	}
+
+	// Go through the templates
+	for tplPath, tpl := range metadata.Templates {
+		var w *os.File
+
+		// Check if the template should be applied now
+		found := false
+		for _, tplTrigger := range tpl.When {
+			if tplTrigger == trigger {
+				found = true
+				break
+			}
+		}
+
+		if !found {
+			continue
+		}
+
+		// Open the file to template, create if needed
+		fullpath := filepath.Join(c.RootfsPath(), strings.TrimLeft(tplPath, "/"))
+		if shared.PathExists(fullpath) {
+			if tpl.CreateOnly {
+				continue
+			}
+
+			// Open the existing file
+			w, err = os.Create(fullpath)
+			if err != nil {
+				return errors.Wrap(err, "Failed to create template file")
+			}
+		} else {
+			// Create the directories leading to the file
+			shared.MkdirAllOwner(path.Dir(fullpath), 0755, int(rootUid), int(rootGid))
+
+			// Create the file itself
+			w, err = os.Create(fullpath)
+			if err != nil {
+				return err
+			}
+
+			// Fix ownership and mode
+			w.Chown(int(rootUid), int(rootGid))
+			w.Chmod(0644)
+		}
+		defer w.Close()
+
+		// Read the template
+		tplString, err := ioutil.ReadFile(filepath.Join(c.TemplatesPath(), tpl.Template))
+		if err != nil {
+			return errors.Wrap(err, "Failed to read template file")
+		}
+
+		// Restrict filesystem access to within the container's rootfs
+		tplSet := pongo2.NewSet(fmt.Sprintf("%s-%s", c.name, tpl.Template), template.ChrootLoader{Path: c.RootfsPath()})
+
+		tplRender, err := tplSet.FromString("{% autoescape off %}" + string(tplString) + "{% endautoescape %}")
+		if err != nil {
+			return errors.Wrap(err, "Failed to render template")
+		}
+
+		configGet := func(confKey, confDefault *pongo2.Value) *pongo2.Value {
+			val, ok := c.expandedConfig[confKey.String()]
+			if !ok {
+				return confDefault
+			}
+
+			return pongo2.AsValue(strings.TrimRight(val, "\r\n"))
+		}
+
+		// Render the template
+		tplRender.ExecuteWriter(pongo2.Context{"trigger": trigger,
+			"path":       tplPath,
+			"container":  containerMeta,
+			"config":     c.expandedConfig,
+			"devices":    c.expandedDevices,
+			"properties": tpl.Properties,
+			"config_get": configGet}, w)
+	}
+
+	return nil
+}
+
+func (c *ContainerLXC) FileExists(path string) error {
+	// Setup container storage if needed
+	var ourStart bool
+	var err error
+	if !c.IsRunning() {
+		ourStart, err = c.StorageStart()
+		if err != nil {
+			return err
+		}
+	}
+
+	// Check if the file exists in the container
+	_, stderr, err := shared.RunCommandSplit(
+		nil,
+		c.state.OS.ExecPath,
+		"forkfile",
+		"exists",
+		c.RootfsPath(),
+		fmt.Sprintf("%d", c.InitPID()),
+		path,
+	)
+
+	// Tear down container storage if needed
+	if !c.IsRunning() && ourStart {
+		_, err := c.StorageStop()
+		if err != nil {
+			return err
+		}
+	}
+
+	// Process forkcheckfile response
+	if stderr != "" {
+		if strings.HasPrefix(stderr, "error:") {
+			return fmt.Errorf(strings.TrimPrefix(strings.TrimSuffix(stderr, "\n"), "error: "))
+		}
+
+		for _, line := range strings.Split(strings.TrimRight(stderr, "\n"), "\n") {
+			logger.Debugf("forkcheckfile: %s", line)
+		}
+	}
+
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (c *ContainerLXC) FilePull(srcpath string, dstpath string) (int64, int64, os.FileMode, string, []string, error) {
+	var ourStart bool
+	var err error
+	// Setup container storage if needed
+	if !c.IsRunning() {
+		ourStart, err = c.StorageStart()
+		if err != nil {
+			return -1, -1, 0, "", nil, err
+		}
+	}
+
+	// Get the file from the container
+	_, stderr, err := shared.RunCommandSplit(
+		nil,
+		c.state.OS.ExecPath,
+		"forkfile",
+		"pull",
+		c.RootfsPath(),
+		fmt.Sprintf("%d", c.InitPID()),
+		srcpath,
+		dstpath,
+	)
+
+	// Tear down container storage if needed
+	if !c.IsRunning() && ourStart {
+		_, err := c.StorageStop()
+		if err != nil {
+			return -1, -1, 0, "", nil, err
+		}
+	}
+
+	uid := int64(-1)
+	gid := int64(-1)
+	mode := -1
+	type_ := "unknown"
+	var dirEnts []string
+	var errStr string
+
+	// Process forkgetfile response
+	for _, line := range strings.Split(strings.TrimRight(stderr, "\n"), "\n") {
+		if line == "" {
+			continue
+		}
+
+		// Extract errors
+		if strings.HasPrefix(line, "error: ") {
+			errStr = strings.TrimPrefix(line, "error: ")
+			continue
+		}
+
+		if strings.HasPrefix(line, "errno: ") {
+			errno := strings.TrimPrefix(line, "errno: ")
+			if errno == "2" {
+				return -1, -1, 0, "", nil, os.ErrNotExist
+			}
+
+			return -1, -1, 0, "", nil, fmt.Errorf(errStr)
+		}
+
+		// Extract the uid
+		if strings.HasPrefix(line, "uid: ") {
+			uid, err = strconv.ParseInt(strings.TrimPrefix(line, "uid: "), 10, 64)
+			if err != nil {
+				return -1, -1, 0, "", nil, err
+			}
+
+			continue
+		}
+
+		// Extract the gid
+		if strings.HasPrefix(line, "gid: ") {
+			gid, err = strconv.ParseInt(strings.TrimPrefix(line, "gid: "), 10, 64)
+			if err != nil {
+				return -1, -1, 0, "", nil, err
+			}
+
+			continue
+		}
+
+		// Extract the mode
+		if strings.HasPrefix(line, "mode: ") {
+			mode, err = strconv.Atoi(strings.TrimPrefix(line, "mode: "))
+			if err != nil {
+				return -1, -1, 0, "", nil, err
+			}
+
+			continue
+		}
+
+		if strings.HasPrefix(line, "type: ") {
+			type_ = strings.TrimPrefix(line, "type: ")
+			continue
+		}
+
+		if strings.HasPrefix(line, "entry: ") {
+			ent := strings.TrimPrefix(line, "entry: ")
+			ent = strings.Replace(ent, "\x00", "\n", -1)
+			dirEnts = append(dirEnts, ent)
+			continue
+		}
+
+		logger.Debugf("forkgetfile: %s", line)
+	}
+
+	if err != nil {
+		return -1, -1, 0, "", nil, err
+	}
+
+	// Unmap uid and gid if needed
+	if !c.IsRunning() {
+		idmapset, err := c.DiskIdmap()
+		if err != nil {
+			return -1, -1, 0, "", nil, err
+		}
+
+		if idmapset != nil {
+			uid, gid = idmapset.ShiftFromNs(uid, gid)
+		}
+	}
+
+	return uid, gid, os.FileMode(mode), type_, dirEnts, nil
+}
+
+func (c *ContainerLXC) FilePush(type_ string, srcpath string, dstpath string, uid int64, gid int64, mode int, write string) error {
+	var rootUid int64
+	var rootGid int64
+	var errStr string
+
+	// Map uid and gid if needed
+	if !c.IsRunning() {
+		idmapset, err := c.DiskIdmap()
+		if err != nil {
+			return err
+		}
+
+		if idmapset != nil {
+			uid, gid = idmapset.ShiftIntoNs(uid, gid)
+			rootUid, rootGid = idmapset.ShiftIntoNs(0, 0)
+		}
+	}
+
+	var ourStart bool
+	var err error
+	// Setup container storage if needed
+	if !c.IsRunning() {
+		ourStart, err = c.StorageStart()
+		if err != nil {
+			return err
+		}
+	}
+
+	defaultMode := 0640
+	if type_ == "directory" {
+		defaultMode = 0750
+	}
+
+	// Push the file to the container
+	_, stderr, err := shared.RunCommandSplit(
+		nil,
+		c.state.OS.ExecPath,
+		"forkfile",
+		"push",
+		c.RootfsPath(),
+		fmt.Sprintf("%d", c.InitPID()),
+		srcpath,
+		dstpath,
+		type_,
+		fmt.Sprintf("%d", uid),
+		fmt.Sprintf("%d", gid),
+		fmt.Sprintf("%d", mode),
+		fmt.Sprintf("%d", rootUid),
+		fmt.Sprintf("%d", rootGid),
+		fmt.Sprintf("%d", int(os.FileMode(defaultMode)&os.ModePerm)),
+		write,
+	)
+
+	// Tear down container storage if needed
+	if !c.IsRunning() && ourStart {
+		_, err := c.StorageStop()
+		if err != nil {
+			return err
+		}
+	}
+
+	// Process forkgetfile response
+	for _, line := range strings.Split(strings.TrimRight(stderr, "\n"), "\n") {
+		if line == "" {
+			continue
+		}
+
+		// Extract errors
+		if strings.HasPrefix(line, "error: ") {
+			errStr = strings.TrimPrefix(line, "error: ")
+			continue
+		}
+
+		if strings.HasPrefix(line, "errno: ") {
+			errno := strings.TrimPrefix(line, "errno: ")
+			if errno == "2" {
+				return os.ErrNotExist
+			}
+
+			return fmt.Errorf(errStr)
+		}
+	}
+
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (c *ContainerLXC) FileRemove(path string) error {
+	var errStr string
+	var ourStart bool
+	var err error
+
+	// Setup container storage if needed
+	if !c.IsRunning() {
+		ourStart, err = c.StorageStart()
+		if err != nil {
+			return err
+		}
+	}
+
+	// Remove the file from the container
+	_, stderr, err := shared.RunCommandSplit(
+		nil,
+		c.state.OS.ExecPath,
+		"forkfile",
+		"remove",
+		c.RootfsPath(),
+		fmt.Sprintf("%d", c.InitPID()),
+		path,
+	)
+
+	// Tear down container storage if needed
+	if !c.IsRunning() && ourStart {
+		_, err := c.StorageStop()
+		if err != nil {
+			return err
+		}
+	}
+
+	// Process forkremovefile response
+	for _, line := range strings.Split(strings.TrimRight(stderr, "\n"), "\n") {
+		if line == "" {
+			continue
+		}
+
+		// Extract errors
+		if strings.HasPrefix(line, "error: ") {
+			errStr = strings.TrimPrefix(line, "error: ")
+			continue
+		}
+
+		if strings.HasPrefix(line, "errno: ") {
+			errno := strings.TrimPrefix(line, "errno: ")
+			if errno == "2" {
+				return os.ErrNotExist
+			}
+
+			return fmt.Errorf(errStr)
+		}
+	}
+
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (c *ContainerLXC) Console(terminal *os.File) *exec.Cmd {
+	args := []string{
+		c.state.OS.ExecPath,
+		"forkconsole",
+		project.Prefix(c.Project(), c.Name()),
+		c.state.OS.LxcPath,
+		filepath.Join(c.LogPath(), "lxc.conf"),
+		"tty=0",
+		"escape=-1"}
+
+	cmd := exec.Cmd{}
+	cmd.Path = c.state.OS.ExecPath
+	cmd.Args = args
+	cmd.Stdin = terminal
+	cmd.Stdout = terminal
+	cmd.Stderr = terminal
+	return &cmd
+}
+
+func (c *ContainerLXC) ConsoleLog(opts lxc.ConsoleLogOptions) (string, error) {
+	msg, err := c.c.ConsoleLog(opts)
+	if err != nil {
+		return "", err
+	}
+
+	return string(msg), nil
+}
+
+func (c *ContainerLXC) Exec(command []string, env map[string]string, stdin *os.File, stdout *os.File, stderr *os.File, wait bool, cwd string, uid uint32, gid uint32) (*exec.Cmd, int, int, error) {
+	// Prepare the environment
+	envSlice := []string{}
+
+	for k, v := range env {
+		envSlice = append(envSlice, fmt.Sprintf("%s=%s", k, v))
+	}
+
+	// Setup logfile
+	logPath := filepath.Join(c.LogPath(), "forkexec.log")
+	logFile, err := os.OpenFile(logPath, os.O_WRONLY|os.O_CREATE|os.O_SYNC, 0644)
+	if err != nil {
+		return nil, -1, -1, err
+	}
+
+	// Prepare the subcommand
+	cname := project.Prefix(c.Project(), c.Name())
+	args := []string{
+		c.state.OS.ExecPath,
+		"forkexec",
+		cname,
+		c.state.OS.LxcPath,
+		filepath.Join(c.LogPath(), "lxc.conf"),
+		cwd,
+		fmt.Sprintf("%d", uid),
+		fmt.Sprintf("%d", gid),
+	}
+
+	args = append(args, "--")
+	args = append(args, "env")
+	args = append(args, envSlice...)
+
+	args = append(args, "--")
+	args = append(args, "cmd")
+	args = append(args, command...)
+
+	cmd := exec.Cmd{}
+	cmd.Path = c.state.OS.ExecPath
+	cmd.Args = args
+
+	cmd.Stdin = nil
+	cmd.Stdout = logFile
+	cmd.Stderr = logFile
+
+	// Mitigation for CVE-2019-5736
+	useRexec := false
+	if c.expandedConfig["raw.idmap"] != "" {
+		err := allowedUnprivilegedOnlyMap(c.expandedConfig["raw.idmap"])
+		if err != nil {
+			useRexec = true
+		}
+	}
+
+	if shared.IsTrue(c.expandedConfig["security.privileged"]) {
+		useRexec = true
+	}
+
+	if useRexec {
+		cmd.Env = append(os.Environ(), "LXC_MEMFD_REXEC=1")
+	}
+
+	// Setup communication PIPE
+	rStatus, wStatus, err := shared.Pipe()
+	defer rStatus.Close()
+	if err != nil {
+		return nil, -1, -1, err
+	}
+
+	cmd.ExtraFiles = []*os.File{stdin, stdout, stderr, wStatus}
+	err = cmd.Start()
+	if err != nil {
+		wStatus.Close()
+		return nil, -1, -1, err
+	}
+	wStatus.Close()
+
+	attachedPid := -1
+	if err := json.NewDecoder(rStatus).Decode(&attachedPid); err != nil {
+		logger.Errorf("Failed to retrieve PID of executing child process: %s", err)
+		return nil, -1, -1, err
+	}
+
+	// It's the callers responsibility to wait or not wait.
+	if !wait {
+		return &cmd, -1, attachedPid, nil
+	}
+
+	err = cmd.Wait()
+	if err != nil {
+		exitErr, ok := err.(*exec.ExitError)
+		if ok {
+			status, ok := exitErr.Sys().(syscall.WaitStatus)
+			if ok {
+				return nil, status.ExitStatus(), attachedPid, nil
+			}
+
+			if status.Signaled() {
+				// 128 + n == Fatal error signal "n"
+				return nil, 128 + int(status.Signal()), attachedPid, nil
+			}
+		}
+
+		return nil, -1, -1, err
+	}
+
+	return nil, 0, attachedPid, nil
+}
+
+func (c *ContainerLXC) cpuState() api.InstanceStateCPU {
+	cpu := api.InstanceStateCPU{}
+
+	if !c.state.OS.CGroupCPUacctController {
+		return cpu
+	}
+
+	// CPU usage in seconds
+	value, err := c.CGroupGet("cpuacct.usage")
+	if err != nil {
+		cpu.Usage = -1
+		return cpu
+	}
+
+	valueInt, err := strconv.ParseInt(value, 10, 64)
+	if err != nil {
+		cpu.Usage = -1
+		return cpu
+	}
+
+	cpu.Usage = valueInt
+
+	return cpu
+}
+
+func (c *ContainerLXC) diskState() map[string]api.InstanceStateDisk {
+	disk := map[string]api.InstanceStateDisk{}
+
+	// Initialize storage interface for the container.
+	err := c.InitStorage()
+	if err != nil {
+		return disk
+	}
+
+	for _, dev := range c.expandedDevices.Sorted() {
+		if dev.Config["type"] != "disk" {
+			continue
+		}
+
+		if dev.Config["path"] != "/" {
+			continue
+		}
+
+		usage, err := c.storage.ContainerGetUsage(c)
+		if err != nil {
+			continue
+		}
+
+		disk[dev.Name] = api.InstanceStateDisk{Usage: usage}
+	}
+
+	return disk
+}
+
+func (c *ContainerLXC) memoryState() api.InstanceStateMemory {
+	memory := api.InstanceStateMemory{}
+
+	if !c.state.OS.CGroupMemoryController {
+		return memory
+	}
+
+	// Memory in bytes
+	value, err := c.CGroupGet("memory.usage_in_bytes")
+	valueInt, err1 := strconv.ParseInt(value, 10, 64)
+	if err == nil && err1 == nil {
+		memory.Usage = valueInt
+	}
+
+	// Memory peak in bytes
+	value, err = c.CGroupGet("memory.max_usage_in_bytes")
+	valueInt, err1 = strconv.ParseInt(value, 10, 64)
+	if err == nil && err1 == nil {
+		memory.UsagePeak = valueInt
+	}
+
+	if c.state.OS.CGroupSwapAccounting {
+		// Swap in bytes
+		if memory.Usage > 0 {
+			value, err := c.CGroupGet("memory.memsw.usage_in_bytes")
+			valueInt, err1 := strconv.ParseInt(value, 10, 64)
+			if err == nil && err1 == nil {
+				memory.SwapUsage = valueInt - memory.Usage
+			}
+		}
+
+		// Swap peak in bytes
+		if memory.UsagePeak > 0 {
+			value, err = c.CGroupGet("memory.memsw.max_usage_in_bytes")
+			valueInt, err1 = strconv.ParseInt(value, 10, 64)
+			if err == nil && err1 == nil {
+				memory.SwapUsagePeak = valueInt - memory.UsagePeak
+			}
+		}
+	}
+
+	return memory
+}
+
+func (c *ContainerLXC) networkState() map[string]api.InstanceStateNetwork {
+	result := map[string]api.InstanceStateNetwork{}
+
+	pid := c.InitPID()
+	if pid < 1 {
+		return result
+	}
+
+	couldUseNetnsGetifaddrs := c.state.OS.NetnsGetifaddrs
+	if couldUseNetnsGetifaddrs {
+		nw, err := netutils.NetnsGetifaddrs(int32(pid))
+		if err != nil {
+			couldUseNetnsGetifaddrs = false
+			logger.Error("Failed to retrieve network information via netlink", log.Ctx{"container": c.name, "pid": pid})
+		} else {
+			result = nw
+		}
+	}
+
+	if !couldUseNetnsGetifaddrs {
+		// Get the network state from the container
+		out, err := shared.RunCommand(
+			c.state.OS.ExecPath,
+			"forknet",
+			"info",
+			fmt.Sprintf("%d", pid))
+
+		// Process forkgetnet response
+		if err != nil {
+			logger.Error("Error calling 'lxd forkgetnet", log.Ctx{"container": c.name, "err": err, "pid": pid})
+			return result
+		}
+
+		// If we can use netns_getifaddrs() but it failed and the setns() +
+		// netns_getifaddrs() succeeded we should just always fallback to the
+		// setns() + netns_getifaddrs() style retrieval.
+		c.state.OS.NetnsGetifaddrs = false
+
+		nw := map[string]api.InstanceStateNetwork{}
+		err = json.Unmarshal([]byte(out), &nw)
+		if err != nil {
+			logger.Error("Failure to read forkgetnet json", log.Ctx{"container": c.name, "err": err})
+			return result
+		}
+		result = nw
+	}
+
+	// Get host_name from volatile data if not set already.
+	for name, dev := range result {
+		if dev.HostName == "" {
+			dev.HostName = c.localConfig[fmt.Sprintf("volatile.%s.host_name", name)]
+			result[name] = dev
+		}
+	}
+
+	return result
+}
+
+func (c *ContainerLXC) processesState() int64 {
+	// Return 0 if not running
+	pid := c.InitPID()
+	if pid == -1 {
+		return 0
+	}
+
+	if c.state.OS.CGroupPidsController {
+		value, err := c.CGroupGet("pids.current")
+		if err != nil {
+			return -1
+		}
+
+		valueInt, err := strconv.ParseInt(value, 10, 64)
+		if err != nil {
+			return -1
+		}
+
+		return valueInt
+	}
+
+	pids := []int64{int64(pid)}
+
+	// Go through the pid list, adding new pids at the end so we go through them all
+	for i := 0; i < len(pids); i++ {
+		fname := fmt.Sprintf("/proc/%d/task/%d/children", pids[i], pids[i])
+		fcont, err := ioutil.ReadFile(fname)
+		if err != nil {
+			// the process terminated during execution of this loop
+			continue
+		}
+
+		content := strings.Split(string(fcont), " ")
+		for j := 0; j < len(content); j++ {
+			pid, err := strconv.ParseInt(content[j], 10, 64)
+			if err == nil {
+				pids = append(pids, pid)
+			}
+		}
+	}
+
+	return int64(len(pids))
+}
+
+// Storage functions
+func (c *ContainerLXC) Storage() Storage {
+	if c.storage == nil {
+		c.InitStorage()
+	}
+
+	return c.storage
+}
+
+func (c *ContainerLXC) StorageStart() (bool, error) {
+	// Initialize storage interface for the container.
+	err := c.InitStorage()
+	if err != nil {
+		return false, err
+	}
+
+	isOurOperation, err := c.StorageStartSensitive()
+	// Remove this as soon as zfs is fixed
+	if c.storage.GetStorageType() == StorageTypeZfs && err == unix.EBUSY {
+		return isOurOperation, nil
+	}
+
+	return isOurOperation, err
+}
+
+// Kill this function as soon as zfs is fixed.
+func (c *ContainerLXC) StorageStartSensitive() (bool, error) {
+	// Initialize storage interface for the container.
+	err := c.InitStorage()
+	if err != nil {
+		return false, err
+	}
+
+	var isOurOperation bool
+	if c.IsSnapshot() {
+		isOurOperation, err = c.storage.ContainerSnapshotStart(c)
+	} else {
+		isOurOperation, err = c.storage.ContainerMount(c)
+	}
+
+	return isOurOperation, err
+}
+
+func (c *ContainerLXC) StorageStop() (bool, error) {
+	// Initialize storage interface for the container.
+	err := c.InitStorage()
+	if err != nil {
+		return false, err
+	}
+
+	var isOurOperation bool
+	if c.IsSnapshot() {
+		isOurOperation, err = c.storage.ContainerSnapshotStop(c)
+	} else {
+		isOurOperation, err = c.storage.ContainerUmount(c, c.Path())
+	}
+
+	return isOurOperation, err
+}
+
+// Mount handling
+func (c *ContainerLXC) insertMountLXD(source, target, fstype string, flags int, mntnsPID int, shiftfs bool) error {
+	pid := mntnsPID
+	if pid <= 0 {
+		// Get the init PID
+		pid = c.InitPID()
+		if pid == -1 {
+			// Container isn't running
+			return fmt.Errorf("Can't insert mount into stopped container")
+		}
+	}
+
+	// Create the temporary mount target
+	var tmpMount string
+	var err error
+	if shared.IsDir(source) {
+		tmpMount, err = ioutil.TempDir(c.ShmountsPath(), "lxdmount_")
+		if err != nil {
+			return fmt.Errorf("Failed to create shmounts path: %s", err)
+		}
+	} else {
+		f, err := ioutil.TempFile(c.ShmountsPath(), "lxdmount_")
+		if err != nil {
+			return fmt.Errorf("Failed to create shmounts path: %s", err)
+		}
+
+		tmpMount = f.Name()
+		f.Close()
+	}
+	defer os.Remove(tmpMount)
+
+	// Mount the filesystem
+	err = unix.Mount(source, tmpMount, fstype, uintptr(flags), "")
+	if err != nil {
+		return fmt.Errorf("Failed to setup temporary mount: %s", err)
+	}
+	defer unix.Unmount(tmpMount, unix.MNT_DETACH)
+
+	// Setup host side shiftfs as needed
+	if shiftfs {
+		err = unix.Mount(tmpMount, tmpMount, "shiftfs", 0, "mark,passthrough=3")
+		if err != nil {
+			return fmt.Errorf("Failed to setup host side shiftfs mount: %s", err)
+		}
+		defer unix.Unmount(tmpMount, unix.MNT_DETACH)
+	}
+
+	// Move the mount inside the container
+	mntsrc := filepath.Join("/dev/.lxd-mounts", filepath.Base(tmpMount))
+	pidStr := fmt.Sprintf("%d", pid)
+
+	_, err = shared.RunCommand(c.state.OS.ExecPath, "forkmount", "lxd-mount", pidStr, mntsrc, target, fmt.Sprintf("%v", shiftfs))
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (c *ContainerLXC) insertMountLXC(source, target, fstype string, flags int) error {
+	cname := project.Prefix(c.Project(), c.Name())
+	configPath := filepath.Join(c.LogPath(), "lxc.conf")
+	if fstype == "" {
+		fstype = "none"
+	}
+
+	if !strings.HasPrefix(target, "/") {
+		target = "/" + target
+	}
+
+	_, err := shared.RunCommand(c.state.OS.ExecPath, "forkmount", "lxc-mount", cname, c.state.OS.LxcPath, configPath, source, target, fstype, fmt.Sprintf("%d", flags))
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (c *ContainerLXC) insertMount(source, target, fstype string, flags int, shiftfs bool) error {
+	if c.state.OS.LXCFeatures["mount_injection_file"] && !shiftfs {
+		return c.insertMountLXC(source, target, fstype, flags)
+	}
+
+	return c.insertMountLXD(source, target, fstype, flags, -1, shiftfs)
+}
+
+func (c *ContainerLXC) removeMount(mount string) error {
+	// Get the init PID
+	pid := c.InitPID()
+	if pid == -1 {
+		// Container isn't running
+		return fmt.Errorf("Can't remove mount from stopped container")
+	}
+
+	if c.state.OS.LXCFeatures["mount_injection_file"] {
+		configPath := filepath.Join(c.LogPath(), "lxc.conf")
+		cname := project.Prefix(c.Project(), c.Name())
+
+		if !strings.HasPrefix(mount, "/") {
+			mount = "/" + mount
+		}
+
+		_, err := shared.RunCommand(c.state.OS.ExecPath, "forkmount", "lxc-umount", cname, c.state.OS.LxcPath, configPath, mount)
+		if err != nil {
+			return err
+		}
+	} else {
+		// Remove the mount from the container
+		pidStr := fmt.Sprintf("%d", pid)
+		_, err := shared.RunCommand(c.state.OS.ExecPath, "forkmount", "lxd-umount", pidStr, mount)
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (c *ContainerLXC) InsertSeccompUnixDevice(prefix string, m config.Device, pid int) error {
+	if pid < 0 {
+		return fmt.Errorf("Invalid request PID specified")
+	}
+
+	rootLink := fmt.Sprintf("/proc/%d/root", pid)
+	rootPath, err := os.Readlink(rootLink)
+	if err != nil {
+		return err
+	}
+
+	err, uid, gid, _, _ := TaskIDs(pid)
+	if err != nil {
+		return err
+	}
+
+	idmapset, err := c.CurrentIdmap()
+	if err != nil {
+		return err
+	}
+
+	nsuid, nsgid := idmapset.ShiftFromNs(uid, gid)
+	m["uid"] = fmt.Sprintf("%d", nsuid)
+	m["gid"] = fmt.Sprintf("%d", nsgid)
+
+	if !path.IsAbs(m["path"]) {
+		cwdLink := fmt.Sprintf("/proc/%d/cwd", pid)
+		prefixPath, err := os.Readlink(cwdLink)
+		if err != nil {
+			return err
+		}
+
+		prefixPath = strings.TrimPrefix(prefixPath, rootPath)
+		m["path"] = filepath.Join(rootPath, prefixPath, m["path"])
+	} else {
+		m["path"] = filepath.Join(rootPath, m["path"])
+	}
+
+	idmapSet, err := c.CurrentIdmap()
+	if err != nil {
+		return err
+	}
+
+	d, err := device.UnixDeviceCreate(c.state, idmapSet, c.DevicesPath(), prefix, m, true)
+	if err != nil {
+		return fmt.Errorf("Failed to setup device: %s", err)
+	}
+	devPath := d.HostPath
+	tgtPath := d.RelativePath
+
+	// Bind-mount it into the container
+	defer os.Remove(devPath)
+	return c.insertMountLXD(devPath, tgtPath, "none", unix.MS_BIND, pid, false)
+}
+
+func (c *ContainerLXC) removeUnixDevices() error {
+	// Check that we indeed have devices to remove
+	if !shared.PathExists(c.DevicesPath()) {
+		return nil
+	}
+
+	// Load the directory listing
+	dents, err := ioutil.ReadDir(c.DevicesPath())
+	if err != nil {
+		return err
+	}
+
+	// Go through all the unix devices
+	for _, f := range dents {
+		// Skip non-Unix devices
+		if !strings.HasPrefix(f.Name(), "forkmknod.unix.") && !strings.HasPrefix(f.Name(), "unix.") && !strings.HasPrefix(f.Name(), "infiniband.unix.") {
+			continue
+		}
+
+		// Remove the entry
+		devicePath := filepath.Join(c.DevicesPath(), f.Name())
+		err := os.Remove(devicePath)
+		if err != nil {
+			logger.Error("Failed removing unix device", log.Ctx{"err": err, "path": devicePath})
+		}
+	}
+
+	return nil
+}
+
+// FillNetworkDevice takes a nic or infiniband device type and enriches it with automatically
+// generated name and hwaddr properties if these are missing from the device.
+func (c *ContainerLXC) FillNetworkDevice(name string, m config.Device) (config.Device, error) {
+	newDevice := m.Clone()
+
+	// Function to try and guess an available name
+	nextInterfaceName := func() (string, error) {
+		devNames := []string{}
+
+		// Include all static interface names
+		for _, dev := range c.expandedDevices.Sorted() {
+			if dev.Config["name"] != "" && !shared.StringInSlice(dev.Config["name"], devNames) {
+				devNames = append(devNames, dev.Config["name"])
+			}
+		}
+
+		// Include all currently allocated interface names
+		for k, v := range c.expandedConfig {
+			if !strings.HasPrefix(k, "volatile.") {
+				continue
+			}
+
+			fields := strings.SplitN(k, ".", 3)
+			if len(fields) != 3 {
+				continue
+			}
+
+			if fields[2] != "name" || shared.StringInSlice(v, devNames) {
+				continue
+			}
+
+			devNames = append(devNames, v)
+		}
+
+		// Attempt to include all existing interfaces
+		cname := project.Prefix(c.Project(), c.Name())
+		cc, err := lxc.NewContainer(cname, c.state.OS.LxcPath)
+		if err == nil {
+			defer cc.Release()
+
+			interfaces, err := cc.Interfaces()
+			if err == nil {
+				for _, name := range interfaces {
+					if shared.StringInSlice(name, devNames) {
+						continue
+					}
+
+					devNames = append(devNames, name)
+				}
+			}
+		}
+
+		i := 0
+		name := ""
+		for {
+			if m["type"] == "infiniband" {
+				name = fmt.Sprintf("ib%d", i)
+			} else {
+				name = fmt.Sprintf("eth%d", i)
+			}
+
+			// Find a free device name
+			if !shared.StringInSlice(name, devNames) {
+				return name, nil
+			}
+
+			i++
+		}
+	}
+
+	updateKey := func(key string, value string) error {
+		tx, err := c.state.Cluster.Begin()
+		if err != nil {
+			return err
+		}
+
+		err = db.ContainerConfigInsert(tx, c.id, map[string]string{key: value})
+		if err != nil {
+			tx.Rollback()
+			return err
+		}
+
+		err = db.TxCommit(tx)
+		if err != nil {
+			return err
+		}
+
+		return nil
+	}
+
+	// Fill in the MAC address
+	if !shared.StringInSlice(m["nictype"], []string{"physical", "ipvlan", "sriov"}) && m["hwaddr"] == "" {
+		configKey := fmt.Sprintf("volatile.%s.hwaddr", name)
+		volatileHwaddr := c.localConfig[configKey]
+		if volatileHwaddr == "" {
+			// Generate a new MAC address
+			volatileHwaddr, err := device.NetworkNextInterfaceHWAddr()
+			if err != nil {
+				return nil, err
+			}
+
+			// Update the database
+			err = query.Retry(func() error {
+				err := updateKey(configKey, volatileHwaddr)
+				if err != nil {
+					// Check if something else filled it in behind our back
+					value, err1 := c.state.Cluster.ContainerConfigGet(c.id, configKey)
+					if err1 != nil || value == "" {
+						return err
+					}
+
+					c.localConfig[configKey] = value
+					c.expandedConfig[configKey] = value
+					return nil
+				}
+
+				c.localConfig[configKey] = volatileHwaddr
+				c.expandedConfig[configKey] = volatileHwaddr
+				return nil
+			})
+			if err != nil {
+				return nil, err
+			}
+		}
+		newDevice["hwaddr"] = volatileHwaddr
+	}
+
+	// Fill in the name
+	if m["name"] == "" {
+		configKey := fmt.Sprintf("volatile.%s.name", name)
+		volatileName := c.localConfig[configKey]
+		if volatileName == "" {
+			// Generate a new interface name
+			volatileName, err := nextInterfaceName()
+			if err != nil {
+				return nil, err
+			}
+
+			// Update the database
+			err = updateKey(configKey, volatileName)
+			if err != nil {
+				// Check if something else filled it in behind our back
+				value, err1 := c.state.Cluster.ContainerConfigGet(c.id, configKey)
+				if err1 != nil || value == "" {
+					return nil, err
+				}
+
+				c.localConfig[configKey] = value
+				c.expandedConfig[configKey] = value
+			} else {
+				c.localConfig[configKey] = volatileName
+				c.expandedConfig[configKey] = volatileName
+			}
+		}
+		newDevice["name"] = volatileName
+	}
+
+	return newDevice, nil
+}
+
+func (c *ContainerLXC) removeDiskDevices() error {
+	// Check that we indeed have devices to remove
+	if !shared.PathExists(c.DevicesPath()) {
+		return nil
+	}
+
+	// Load the directory listing
+	dents, err := ioutil.ReadDir(c.DevicesPath())
+	if err != nil {
+		return err
+	}
+
+	// Go through all the unix devices
+	for _, f := range dents {
+		// Skip non-disk devices
+		if !strings.HasPrefix(f.Name(), "disk.") {
+			continue
+		}
+
+		// Always try to unmount the host side
+		_ = unix.Unmount(filepath.Join(c.DevicesPath(), f.Name()), unix.MNT_DETACH)
+
+		// Remove the entry
+		diskPath := filepath.Join(c.DevicesPath(), f.Name())
+		err := os.Remove(diskPath)
+		if err != nil {
+			logger.Error("Failed to remove disk device path", log.Ctx{"err": err, "path": diskPath})
+		}
+	}
+
+	return nil
+}
+
+// Network I/O limits
+func (c *ContainerLXC) setNetworkPriority() error {
+	// Check that the container is running
+	if !c.IsRunning() {
+		return fmt.Errorf("Can't set network priority on stopped container")
+	}
+
+	// Don't bother if the cgroup controller doesn't exist
+	if !c.state.OS.CGroupNetPrioController {
+		return nil
+	}
+
+	// Extract the current priority
+	networkPriority := c.expandedConfig["limits.network.priority"]
+	if networkPriority == "" {
+		networkPriority = "0"
+	}
+
+	networkInt, err := strconv.Atoi(networkPriority)
+	if err != nil {
+		return err
+	}
+
+	// Get all the interfaces
+	netifs, err := net.Interfaces()
+	if err != nil {
+		return err
+	}
+
+	// Check that we at least succeeded to set an entry
+	success := false
+	var last_error error
+	for _, netif := range netifs {
+		err = c.CGroupSet("net_prio.ifpriomap", fmt.Sprintf("%s %d", netif.Name, networkInt))
+		if err == nil {
+			success = true
+		} else {
+			last_error = err
+		}
+	}
+
+	if !success {
+		return fmt.Errorf("Failed to set network device priority: %s", last_error)
+	}
+
+	return nil
+}
+
+// Various state query functions
+func (c *ContainerLXC) IsStateful() bool {
+	return c.stateful
+}
+
+func (c *ContainerLXC) IsEphemeral() bool {
+	return c.ephemeral
+}
+
+func (c *ContainerLXC) IsFrozen() bool {
+	return c.State() == "FROZEN"
+}
+
+func (c *ContainerLXC) IsNesting() bool {
+	return shared.IsTrue(c.expandedConfig["security.nesting"])
+}
+
+func (c *ContainerLXC) isCurrentlyPrivileged() bool {
+	if !c.IsRunning() {
+		return c.IsPrivileged()
+	}
+
+	idmap, err := c.CurrentIdmap()
+	if err != nil {
+		return c.IsPrivileged()
+	}
+
+	return idmap == nil
+}
+
+func (c *ContainerLXC) IsPrivileged() bool {
+	return shared.IsTrue(c.expandedConfig["security.privileged"])
+}
+
+func (c *ContainerLXC) IsRunning() bool {
+	state := c.State()
+	return state != "BROKEN" && state != "STOPPED"
+}
+
+func (c *ContainerLXC) IsSnapshot() bool {
+	return c.snapshot
+}
+
+// Various property query functions
+func (c *ContainerLXC) Architecture() int {
+	return c.architecture
+}
+
+func (c *ContainerLXC) CreationDate() time.Time {
+	return c.creationDate
+}
+func (c *ContainerLXC) LastUsedDate() time.Time {
+	return c.lastUsedDate
+}
+func (c *ContainerLXC) ExpandedConfig() map[string]string {
+	return c.expandedConfig
+}
+
+func (c *ContainerLXC) ExpandedDevices() config.Devices {
+	return c.expandedDevices
+}
+
+func (c *ContainerLXC) Id() int {
+	return c.id
+}
+
+func (c *ContainerLXC) InitPID() int {
+	// Load the go-lxc struct
+	err := c.InitLXC(false)
+	if err != nil {
+		return -1
+	}
+
+	return c.c.InitPid()
+}
+
+func (c *ContainerLXC) LocalConfig() map[string]string {
+	return c.localConfig
+}
+
+func (c *ContainerLXC) LocalDevices() config.Devices {
+	return c.localDevices
+}
+
+func (c *ContainerLXC) CurrentIdmap() (*idmap.IdmapSet, error) {
+	jsonIdmap, ok := c.LocalConfig()["volatile.idmap.current"]
+	if !ok {
+		return c.DiskIdmap()
+	}
+
+	return IDMapsetFromString(jsonIdmap)
+}
+
+func (c *ContainerLXC) DiskIdmap() (*idmap.IdmapSet, error) {
+	jsonIdmap, ok := c.LocalConfig()["volatile.last_state.idmap"]
+	if !ok {
+		return nil, nil
+	}
+
+	return IDMapsetFromString(jsonIdmap)
+}
+
+func (c *ContainerLXC) NextIdmap() (*idmap.IdmapSet, error) {
+	jsonIdmap, ok := c.LocalConfig()["volatile.idmap.next"]
+	if !ok {
+		return c.CurrentIdmap()
+	}
+
+	return IDMapsetFromString(jsonIdmap)
+}
+
+func (c *ContainerLXC) DaemonState() *state.State {
+	// FIXME: This function should go away, since the abstract container
+	//        interface should not be coupled with internal state details.
+	//        However this is not currently possible, because many
+	//        higher-level APIs use container variables as "implicit
+	//        handles" to database/OS state and then need a way to get a
+	//        reference to it.
+	return c.state
+}
+
+func (c *ContainerLXC) Location() string {
+	return c.node
+}
+
+func (c *ContainerLXC) Project() string {
+	return c.project
+}
+
+func (c *ContainerLXC) Name() string {
+	return c.name
+}
+
+func (c *ContainerLXC) Description() string {
+	return c.description
+}
+
+func (c *ContainerLXC) Profiles() []string {
+	return c.profiles
+}
+
+func (c *ContainerLXC) State() string {
+	state, err := c.getLxcState()
+	if err != nil {
+		return api.Error.String()
+	}
+	return state.String()
+}
+
+// Various container paths
+func (c *ContainerLXC) Path() string {
+	name := project.Prefix(c.Project(), c.Name())
+	return driver.ContainerPath(name, c.IsSnapshot())
+}
+
+func (c *ContainerLXC) DevicesPath() string {
+	name := project.Prefix(c.Project(), c.Name())
+	return shared.VarPath("devices", name)
+}
+
+func (c *ContainerLXC) ShmountsPath() string {
+	name := project.Prefix(c.Project(), c.Name())
+	return shared.VarPath("shmounts", name)
+}
+
+func (c *ContainerLXC) LogPath() string {
+	name := project.Prefix(c.Project(), c.Name())
+	return shared.LogPath(name)
+}
+
+func (c *ContainerLXC) LogFilePath() string {
+	return filepath.Join(c.LogPath(), "lxc.log")
+}
+
+func (c *ContainerLXC) ConsoleBufferLogPath() string {
+	return filepath.Join(c.LogPath(), "console.log")
+}
+
+func (c *ContainerLXC) RootfsPath() string {
+	return filepath.Join(c.Path(), "rootfs")
+}
+
+func (c *ContainerLXC) TemplatesPath() string {
+	return filepath.Join(c.Path(), "templates")
+}
+
+func (c *ContainerLXC) StatePath() string {
+	/* FIXME: backwards compatibility: we used to use Join(RootfsPath(),
+	 * "state"), which was bad. Let's just check to see if that directory
+	 * exists.
+	 */
+	oldStatePath := filepath.Join(c.RootfsPath(), "state")
+	if shared.IsDir(oldStatePath) {
+		return oldStatePath
+	}
+	return filepath.Join(c.Path(), "state")
+}
+
+func (c *ContainerLXC) StoragePool() (string, error) {
+	poolName, err := c.state.Cluster.ContainerPool(c.Project(), c.Name())
+	if err != nil {
+		return "", err
+	}
+
+	return poolName, nil
+}
+
+// Progress tracking
+func (c *ContainerLXC) SetOperation(op *operation.Operation) {
+	c.op = op
+}
+
+func (c *ContainerLXC) ExpiryDate() time.Time {
+	if c.IsSnapshot() {
+		return c.expiryDate
+	}
+
+	// Return zero time if the container is not a snapshot
+	return time.Time{}
+}
+
+func (c *ContainerLXC) updateProgress(progress string) {
+	if c.op == nil {
+		return
+	}
+
+	meta := c.op.Metadata
+	if meta == nil {
+		meta = make(map[string]interface{})
+	}
+
+	if meta["container_progress"] != progress {
+		meta["container_progress"] = progress
+		c.op.UpdateMetadata(meta)
+	}
+}
+
+// Internal MAAS handling
+func (c *ContainerLXC) maasInterfaces(devices map[string]map[string]string) ([]maas.ContainerInterface, error) {
+	interfaces := []maas.ContainerInterface{}
+	for k, m := range devices {
+		if m["type"] != "nic" {
+			continue
+		}
+
+		if m["maas.subnet.ipv4"] == "" && m["maas.subnet.ipv6"] == "" {
+			continue
+		}
+
+		m, err := c.FillNetworkDevice(k, m)
+		if err != nil {
+			return nil, err
+		}
+
+		subnets := []maas.ContainerInterfaceSubnet{}
+
+		// IPv4
+		if m["maas.subnet.ipv4"] != "" {
+			subnet := maas.ContainerInterfaceSubnet{
+				Name:    m["maas.subnet.ipv4"],
+				Address: m["ipv4.address"],
+			}
+
+			subnets = append(subnets, subnet)
+		}
+
+		// IPv6
+		if m["maas.subnet.ipv6"] != "" {
+			subnet := maas.ContainerInterfaceSubnet{
+				Name:    m["maas.subnet.ipv6"],
+				Address: m["ipv6.address"],
+			}
+
+			subnets = append(subnets, subnet)
+		}
+
+		iface := maas.ContainerInterface{
+			Name:       m["name"],
+			MACAddress: m["hwaddr"],
+			Subnets:    subnets,
+		}
+
+		interfaces = append(interfaces, iface)
+	}
+
+	return interfaces, nil
+}
+
+func (c *ContainerLXC) maasUpdate(oldDevices map[string]map[string]string) error {
+	// Check if MAAS is configured
+	maasURL, err := cluster.ConfigGetString(c.state.Cluster, "maas.api.url")
+	if err != nil {
+		return err
+	}
+
+	if maasURL == "" {
+		return nil
+	}
+
+	// Check if there's something that uses MAAS
+	interfaces, err := c.maasInterfaces(c.expandedDevices.CloneNative())
+	if err != nil {
+		return err
+	}
+
+	var oldInterfaces []maas.ContainerInterface
+	if oldDevices != nil {
+		oldInterfaces, err = c.maasInterfaces(oldDevices)
+		if err != nil {
+			return err
+		}
+	}
+
+	if len(interfaces) == 0 && len(oldInterfaces) == 0 {
+		return nil
+	}
+
+	// See if we're connected to MAAS
+	if c.state.MAAS == nil {
+		return fmt.Errorf("Can't perform the operation because MAAS is currently unavailable")
+	}
+
+	exists, err := c.state.MAAS.DefinedContainer(project.Prefix(c.project, c.name))
+	if err != nil {
+		return err
+	}
+
+	if exists {
+		if len(interfaces) == 0 && len(oldInterfaces) > 0 {
+			return c.state.MAAS.DeleteContainer(project.Prefix(c.project, c.name))
+		}
+
+		return c.state.MAAS.UpdateContainer(project.Prefix(c.project, c.name), interfaces)
+	}
+
+	return c.state.MAAS.CreateContainer(project.Prefix(c.project, c.name), interfaces)
+}
+
+func (c *ContainerLXC) maasRename(newName string) error {
+	maasURL, err := cluster.ConfigGetString(c.state.Cluster, "maas.api.url")
+	if err != nil {
+		return err
+	}
+
+	if maasURL == "" {
+		return nil
+	}
+
+	interfaces, err := c.maasInterfaces(c.expandedDevices.CloneNative())
+	if err != nil {
+		return err
+	}
+
+	if len(interfaces) == 0 {
+		return nil
+	}
+
+	if c.state.MAAS == nil {
+		return fmt.Errorf("Can't perform the operation because MAAS is currently unavailable")
+	}
+
+	exists, err := c.state.MAAS.DefinedContainer(project.Prefix(c.project, c.name))
+	if err != nil {
+		return err
+	}
+
+	if !exists {
+		return c.maasUpdate(nil)
+	}
+
+	return c.state.MAAS.RenameContainer(project.Prefix(c.project, c.name), project.Prefix(c.project, newName))
+}
+
+func (c *ContainerLXC) maasDelete() error {
+	maasURL, err := cluster.ConfigGetString(c.state.Cluster, "maas.api.url")
+	if err != nil {
+		return err
+	}
+
+	if maasURL == "" {
+		return nil
+	}
+
+	interfaces, err := c.maasInterfaces(c.expandedDevices.CloneNative())
+	if err != nil {
+		return err
+	}
+
+	if len(interfaces) == 0 {
+		return nil
+	}
+
+	if c.state.MAAS == nil {
+		return fmt.Errorf("Can't perform the operation because MAAS is currently unavailable")
+	}
+
+	exists, err := c.state.MAAS.DefinedContainer(project.Prefix(c.project, c.name))
+	if err != nil {
+		return err
+	}
+
+	if !exists {
+		return nil
+	}
+
+	return c.state.MAAS.DeleteContainer(project.Prefix(c.project, c.name))
+}
+
+// SaveLXCConfigFile exposes the underlying liblxc's SaveConfigFile function for use in patching.
+func (c *ContainerLXC) SaveLXCConfigFile(path string) error {
+	return c.c.SaveConfigFile(path)
+}
+
+// SetName modifies the internal name property of the struct.
+func (c *ContainerLXC) SetName(name string) {
+	c.name = name
+}

From 617331ffe0941ab69e8cc22f0d2a35afd9b44bf8 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Wed, 25 Sep 2019 11:10:03 +0100
Subject: [PATCH 14/72] lxd/instance/interface: Moved to instance package

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/instance_interface.go | 118 --------------------------------------
 1 file changed, 118 deletions(-)
 delete mode 100644 lxd/instance_interface.go

diff --git a/lxd/instance_interface.go b/lxd/instance_interface.go
deleted file mode 100644
index dcdc4c2198..0000000000
--- a/lxd/instance_interface.go
+++ /dev/null
@@ -1,118 +0,0 @@
-package main
-
-import (
-	"io"
-	"os"
-	"os/exec"
-	"time"
-
-	"github.com/lxc/lxd/lxd/db"
-	"github.com/lxc/lxd/lxd/device"
-	"github.com/lxc/lxd/lxd/device/config"
-	"github.com/lxc/lxd/lxd/instance"
-	"github.com/lxc/lxd/lxd/state"
-	"github.com/lxc/lxd/shared/api"
-)
-
-// The Instance interface
-type Instance interface {
-	// Instance actions
-	Freeze() error
-	Shutdown(timeout time.Duration) error
-	Start(stateful bool) error
-	Stop(stateful bool) error
-	Unfreeze() error
-
-	IsPrivileged() bool
-
-	// Snapshots & migration & backups
-	Restore(source Instance, stateful bool) error
-	Snapshots() ([]Instance, error)
-	Backups() ([]backup, error)
-
-	// Config handling
-	Rename(newName string) error
-
-	// TODO rename db.ContainerArgs to db.InstanceArgs.
-	Update(newConfig db.ContainerArgs, userRequested bool) error
-
-	Delete() error
-	Export(w io.Writer, properties map[string]string) error
-
-	// Live configuration
-	CGroupGet(key string) (string, error)
-	CGroupSet(key string, value string) error
-	VolatileSet(changes map[string]string) error
-
-	// File handling
-	FileExists(path string) error
-	FilePull(srcpath string, dstpath string) (int64, int64, os.FileMode, string, []string, error)
-	FilePush(type_ string, srcpath string, dstpath string, uid int64, gid int64, mode int, write string) error
-	FileRemove(path string) error
-
-	// Console - Allocate and run a console tty.
-	//
-	// terminal  - Bidirectional file descriptor.
-	//
-	// This function will not return until the console has been exited by
-	// the user.
-	Console(terminal *os.File) *exec.Cmd
-	Exec(command []string, env map[string]string, stdin *os.File, stdout *os.File, stderr *os.File, wait bool, cwd string, uid uint32, gid uint32) (*exec.Cmd, int, int, error)
-
-	// Status
-	Render() (interface{}, interface{}, error)
-	RenderFull() (*api.InstanceFull, interface{}, error)
-	RenderState() (*api.InstanceState, error)
-	IsRunning() bool
-	IsFrozen() bool
-	IsEphemeral() bool
-	IsSnapshot() bool
-	IsStateful() bool
-
-	// Hooks
-	DeviceEventHandler(*device.RunConfig) error
-
-	// Properties
-	Id() int
-	Location() string
-	Project() string
-	Name() string
-	Type() instance.Type
-	Description() string
-	Architecture() int
-	CreationDate() time.Time
-	LastUsedDate() time.Time
-	ExpandedConfig() map[string]string
-	ExpandedDevices() config.Devices
-	LocalConfig() map[string]string
-	LocalDevices() config.Devices
-	Profiles() []string
-	InitPID() int
-	State() string
-	ExpiryDate() time.Time
-
-	// Paths
-	Path() string
-	RootfsPath() string
-	TemplatesPath() string
-	StatePath() string
-	LogFilePath() string
-	ConsoleBufferLogPath() string
-	LogPath() string
-	DevicesPath() string
-
-	// Storage
-	StoragePool() (string, error)
-
-	// Progress reporting
-
-	SetOperation(op *operation)
-
-	// FIXME: Those should be internal functions
-	// Needed for migration for now.
-	StorageStart() (bool, error)
-	StorageStop() (bool, error)
-	Storage() storage
-	TemplateApply(trigger string) error
-	DaemonState() *state.State
-}

From f9109b64f1905b75942164ebe1501daef5827e0a Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Wed, 25 Sep 2019 11:10:23 +0100
Subject: [PATCH 15/72] lxd/container/lxc: Moved to instance package

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/container_lxc.go | 7106 ------------------------------------------
 1 file changed, 7106 deletions(-)
 delete mode 100644 lxd/container_lxc.go

diff --git a/lxd/container_lxc.go b/lxd/container_lxc.go
deleted file mode 100644
index 67e2e5f2d6..0000000000
--- a/lxd/container_lxc.go
+++ /dev/null
@@ -1,7106 +0,0 @@
-package main
-
-import (
-	"bufio"
-	"encoding/json"
-	"fmt"
-	"io"
-	"io/ioutil"
-	"net"
-	"os"
-	"os/exec"
-	"path"
-	"path/filepath"
-	"runtime"
-	"sort"
-	"strconv"
-	"strings"
-	"sync"
-	"syscall"
-	"time"
-
-	"github.com/flosch/pongo2"
-	"github.com/pkg/errors"
-	"golang.org/x/sys/unix"
-	lxc "gopkg.in/lxc/go-lxc.v2"
-	yaml "gopkg.in/yaml.v2"
-
-	"github.com/lxc/lxd/lxd/cluster"
-	"github.com/lxc/lxd/lxd/db"
-	"github.com/lxc/lxd/lxd/db/query"
-	"github.com/lxc/lxd/lxd/device"
-	"github.com/lxc/lxd/lxd/device/config"
-	"github.com/lxc/lxd/lxd/instance"
-	"github.com/lxc/lxd/lxd/maas"
-	"github.com/lxc/lxd/lxd/project"
-	"github.com/lxc/lxd/lxd/state"
-	driver "github.com/lxc/lxd/lxd/storage"
-	"github.com/lxc/lxd/lxd/template"
-	"github.com/lxc/lxd/lxd/util"
-	"github.com/lxc/lxd/shared"
-	"github.com/lxc/lxd/shared/api"
-	"github.com/lxc/lxd/shared/containerwriter"
-	"github.com/lxc/lxd/shared/idmap"
-	"github.com/lxc/lxd/shared/logger"
-	"github.com/lxc/lxd/shared/netutils"
-	"github.com/lxc/lxd/shared/osarch"
-	"github.com/lxc/lxd/shared/units"
-
-	log "github.com/lxc/lxd/shared/log15"
-)
-
-// Operation locking
-type lxcContainerOperation struct {
-	action    string
-	chanDone  chan error
-	chanReset chan bool
-	err       error
-	id        int
-	reusable  bool
-}
-
-func (op *lxcContainerOperation) Create(id int, action string, reusable bool) *lxcContainerOperation {
-	op.id = id
-	op.action = action
-	op.reusable = reusable
-	op.chanDone = make(chan error, 0)
-	op.chanReset = make(chan bool, 0)
-
-	go func(op *lxcContainerOperation) {
-		for {
-			select {
-			case <-op.chanReset:
-				continue
-			case <-time.After(time.Second * 30):
-				op.Done(fmt.Errorf("Container %s operation timed out after 30 seconds", op.action))
-				return
-			}
-		}
-	}(op)
-
-	return op
-}
-
-func (op *lxcContainerOperation) Reset() error {
-	if !op.reusable {
-		return fmt.Errorf("Can't reset a non-reusable operation")
-	}
-
-	op.chanReset <- true
-	return nil
-}
-
-func (op *lxcContainerOperation) Wait() error {
-	<-op.chanDone
-
-	return op.err
-}
-
-func (op *lxcContainerOperation) Done(err error) {
-	lxcContainerOperationsLock.Lock()
-	defer lxcContainerOperationsLock.Unlock()
-
-	// Check if already done
-	runningOp, ok := lxcContainerOperations[op.id]
-	if !ok || runningOp != op {
-		return
-	}
-
-	op.err = err
-	close(op.chanDone)
-
-	delete(lxcContainerOperations, op.id)
-}
-
-var lxcContainerOperationsLock sync.Mutex
-var lxcContainerOperations map[int]*lxcContainerOperation = make(map[int]*lxcContainerOperation)
-
-// Helper functions
-func lxcSetConfigItem(c *lxc.Container, key string, value string) error {
-	if c == nil {
-		return fmt.Errorf("Uninitialized go-lxc struct")
-	}
-
-	if !util.RuntimeLiblxcVersionAtLeast(2, 1, 0) {
-		switch key {
-		case "lxc.uts.name":
-			key = "lxc.utsname"
-		case "lxc.pty.max":
-			key = "lxc.pts"
-		case "lxc.tty.dir":
-			key = "lxc.devttydir"
-		case "lxc.tty.max":
-			key = "lxc.tty"
-		case "lxc.apparmor.profile":
-			key = "lxc.aa_profile"
-		case "lxc.apparmor.allow_incomplete":
-			key = "lxc.aa_allow_incomplete"
-		case "lxc.selinux.context":
-			key = "lxc.se_context"
-		case "lxc.mount.fstab":
-			key = "lxc.mount"
-		case "lxc.console.path":
-			key = "lxc.console"
-		case "lxc.seccomp.profile":
-			key = "lxc.seccomp"
-		case "lxc.signal.halt":
-			key = "lxc.haltsignal"
-		case "lxc.signal.reboot":
-			key = "lxc.rebootsignal"
-		case "lxc.signal.stop":
-			key = "lxc.stopsignal"
-		case "lxc.log.syslog":
-			key = "lxc.syslog"
-		case "lxc.log.level":
-			key = "lxc.loglevel"
-		case "lxc.log.file":
-			key = "lxc.logfile"
-		case "lxc.init.cmd":
-			key = "lxc.init_cmd"
-		case "lxc.init.uid":
-			key = "lxc.init_uid"
-		case "lxc.init.gid":
-			key = "lxc.init_gid"
-		case "lxc.idmap":
-			key = "lxc.id_map"
-		}
-	}
-
-	if strings.HasPrefix(key, "lxc.prlimit.") {
-		if !util.RuntimeLiblxcVersionAtLeast(2, 1, 0) {
-			return fmt.Errorf(`Process limits require liblxc >= 2.1`)
-		}
-	}
-
-	err := c.SetConfigItem(key, value)
-	if err != nil {
-		return fmt.Errorf("Failed to set LXC config: %s=%s", key, value)
-	}
-
-	return nil
-}
-
-func lxcParseRawLXC(line string) (string, string, error) {
-	// Ignore empty lines
-	if len(line) == 0 {
-		return "", "", nil
-	}
-
-	// Skip whitespace {"\t", " "}
-	line = strings.TrimLeft(line, "\t ")
-
-	// Ignore comments
-	if strings.HasPrefix(line, "#") {
-		return "", "", nil
-	}
-
-	// Ensure the format is valid
-	membs := strings.SplitN(line, "=", 2)
-	if len(membs) != 2 {
-		return "", "", fmt.Errorf("Invalid raw.lxc line: %s", line)
-	}
-
-	key := strings.ToLower(strings.Trim(membs[0], " \t"))
-	val := strings.Trim(membs[1], " \t")
-	return key, val, nil
-}
-
-func lxcSupportSeccompNotify(state *state.State) bool {
-	if !state.OS.SeccompListener {
-		return false
-	}
-
-	if !state.OS.LXCFeatures["seccomp_notify"] {
-		return false
-	}
-
-	c, err := lxc.NewContainer("test-seccomp", state.OS.LxcPath)
-	if err != nil {
-		return false
-	}
-
-	err = c.SetConfigItem("lxc.seccomp.notify.proxy", fmt.Sprintf("unix:%s", shared.VarPath("seccomp.socket")))
-	if err != nil {
-		return false
-	}
-
-	c.Release()
-	return true
-}
-
-func lxcValidConfig(rawLxc string) error {
-	for _, line := range strings.Split(rawLxc, "\n") {
-		key, _, err := lxcParseRawLXC(line)
-		if err != nil {
-			return err
-		}
-
-		if key == "" {
-			continue
-		}
-
-		unprivOnly := os.Getenv("LXD_UNPRIVILEGED_ONLY")
-		if shared.IsTrue(unprivOnly) {
-			if key == "lxc.idmap" || key == "lxc.id_map" || key == "lxc.include" {
-				return fmt.Errorf("%s can't be set in raw.lxc as LXD was configured to only allow unprivileged containers", key)
-			}
-		}
-
-		// Blacklist some keys
-		if key == "lxc.logfile" || key == "lxc.log.file" {
-			return fmt.Errorf("Setting lxc.logfile is not allowed")
-		}
-
-		if key == "lxc.syslog" || key == "lxc.log.syslog" {
-			return fmt.Errorf("Setting lxc.log.syslog is not allowed")
-		}
-
-		if key == "lxc.ephemeral" {
-			return fmt.Errorf("Setting lxc.ephemeral is not allowed")
-		}
-
-		if strings.HasPrefix(key, "lxc.prlimit.") {
-			return fmt.Errorf(`Process limits should be set via ` +
-				`"limits.kernel.[limit name]" and not ` +
-				`directly via "lxc.prlimit.[limit name]"`)
-		}
-
-		networkKeyPrefix := "lxc.net."
-		if !util.RuntimeLiblxcVersionAtLeast(2, 1, 0) {
-			networkKeyPrefix = "lxc.network."
-		}
-
-		if strings.HasPrefix(key, networkKeyPrefix) {
-			fields := strings.Split(key, ".")
-
-			if !util.RuntimeLiblxcVersionAtLeast(2, 1, 0) {
-				// lxc.network.X.ipv4 or lxc.network.X.ipv6
-				if len(fields) == 4 && shared.StringInSlice(fields[3], []string{"ipv4", "ipv6"}) {
-					continue
-				}
-
-				// lxc.network.X.ipv4.gateway or lxc.network.X.ipv6.gateway
-				if len(fields) == 5 && shared.StringInSlice(fields[3], []string{"ipv4", "ipv6"}) && fields[4] == "gateway" {
-					continue
-				}
-			} else {
-				// lxc.net.X.ipv4.address or lxc.net.X.ipv6.address
-				if len(fields) == 5 && shared.StringInSlice(fields[3], []string{"ipv4", "ipv6"}) && fields[4] == "address" {
-					continue
-				}
-
-				// lxc.net.X.ipv4.gateway or lxc.net.X.ipv6.gateway
-				if len(fields) == 5 && shared.StringInSlice(fields[3], []string{"ipv4", "ipv6"}) && fields[4] == "gateway" {
-					continue
-				}
-			}
-
-			return fmt.Errorf("Only interface-specific ipv4/ipv6 %s keys are allowed", networkKeyPrefix)
-		}
-	}
-
-	return nil
-}
-
-func lxcStatusCode(state lxc.State) api.StatusCode {
-	return map[int]api.StatusCode{
-		1: api.Stopped,
-		2: api.Starting,
-		3: api.Running,
-		4: api.Stopping,
-		5: api.Aborting,
-		6: api.Freezing,
-		7: api.Frozen,
-		8: api.Thawed,
-		9: api.Error,
-	}[int(state)]
-}
-
-// Loader functions
-func containerLXCCreate(s *state.State, args db.ContainerArgs) (container, error) {
-	// Create the container struct
-	c := &containerLXC{
-		state:        s,
-		id:           args.ID,
-		project:      args.Project,
-		name:         args.Name,
-		node:         args.Node,
-		description:  args.Description,
-		ephemeral:    args.Ephemeral,
-		architecture: args.Architecture,
-		dbType:       args.Type,
-		snapshot:     args.Snapshot,
-		stateful:     args.Stateful,
-		creationDate: args.CreationDate,
-		lastUsedDate: args.LastUsedDate,
-		profiles:     args.Profiles,
-		localConfig:  args.Config,
-		localDevices: args.Devices,
-		expiryDate:   args.ExpiryDate,
-	}
-
-	// Cleanup the zero values
-	if c.expiryDate.IsZero() {
-		c.expiryDate = time.Time{}
-	}
-
-	if c.creationDate.IsZero() {
-		c.creationDate = time.Time{}
-	}
-
-	if c.lastUsedDate.IsZero() {
-		c.lastUsedDate = time.Time{}
-	}
-
-	ctxMap := log.Ctx{
-		"project":   args.Project,
-		"name":      c.name,
-		"ephemeral": c.ephemeral,
-	}
-
-	logger.Info("Creating container", ctxMap)
-
-	// Load the config
-	err := c.init()
-	if err != nil {
-		c.Delete()
-		logger.Error("Failed creating container", ctxMap)
-		return nil, err
-	}
-
-	// Validate expanded config
-	err = containerValidConfig(s.OS, c.expandedConfig, false, true)
-	if err != nil {
-		c.Delete()
-		logger.Error("Failed creating container", ctxMap)
-		return nil, err
-	}
-
-	err = containerValidDevices(s, s.Cluster, c.Name(), c.expandedDevices, true)
-	if err != nil {
-		c.Delete()
-		logger.Error("Failed creating container", ctxMap)
-		return nil, errors.Wrap(err, "Invalid devices")
-	}
-
-	// Retrieve the container's storage pool
-	_, rootDiskDevice, err := shared.GetRootDiskDevice(c.expandedDevices.CloneNative())
-	if err != nil {
-		c.Delete()
-		return nil, err
-	}
-
-	if rootDiskDevice["pool"] == "" {
-		c.Delete()
-		return nil, fmt.Errorf("The container's root device is missing the pool property")
-	}
-
-	storagePool := rootDiskDevice["pool"]
-
-	// Get the storage pool ID for the container
-	poolID, pool, err := s.Cluster.StoragePoolGet(storagePool)
-	if err != nil {
-		c.Delete()
-		return nil, err
-	}
-
-	// Fill in any default volume config
-	volumeConfig := map[string]string{}
-	err = storageVolumeFillDefault(storagePool, volumeConfig, pool)
-	if err != nil {
-		c.Delete()
-		return nil, err
-	}
-
-	// Create a new database entry for the container's storage volume
-	_, err = s.Cluster.StoragePoolVolumeCreate(args.Project, args.Name, "", storagePoolVolumeTypeContainer, false, poolID, volumeConfig)
-	if err != nil {
-		c.Delete()
-		return nil, err
-	}
-
-	// Initialize the container storage
-	cStorage, err := storagePoolVolumeContainerCreateInit(s, args.Project, storagePool, args.Name)
-	if err != nil {
-		c.Delete()
-		s.Cluster.StoragePoolVolumeDelete(args.Project, args.Name, storagePoolVolumeTypeContainer, poolID)
-		logger.Error("Failed to initialize container storage", ctxMap)
-		return nil, err
-	}
-	c.storage = cStorage
-
-	// Setup initial idmap config
-	var idmap *idmap.IdmapSet
-	base := int64(0)
-	if !c.IsPrivileged() {
-		idmap, base, err = findIdmap(
-			s,
-			args.Name,
-			c.expandedConfig["security.idmap.isolated"],
-			c.expandedConfig["security.idmap.base"],
-			c.expandedConfig["security.idmap.size"],
-			c.expandedConfig["raw.idmap"],
-		)
-
-		if err != nil {
-			c.Delete()
-			logger.Error("Failed creating container", ctxMap)
-			return nil, err
-		}
-	}
-
-	var jsonIdmap string
-	if idmap != nil {
-		idmapBytes, err := json.Marshal(idmap.Idmap)
-		if err != nil {
-			c.Delete()
-			logger.Error("Failed creating container", ctxMap)
-			return nil, err
-		}
-		jsonIdmap = string(idmapBytes)
-	} else {
-		jsonIdmap = "[]"
-	}
-
-	err = c.VolatileSet(map[string]string{"volatile.idmap.next": jsonIdmap})
-	if err != nil {
-		c.Delete()
-		logger.Error("Failed creating container", ctxMap)
-		return nil, err
-	}
-
-	err = c.VolatileSet(map[string]string{"volatile.idmap.base": fmt.Sprintf("%v", base)})
-	if err != nil {
-		c.Delete()
-		logger.Error("Failed creating container", ctxMap)
-		return nil, err
-	}
-
-	// Invalid idmap cache
-	c.idmapset = nil
-
-	// Set last_state if not currently set
-	if c.localConfig["volatile.last_state.idmap"] == "" {
-		err = c.VolatileSet(map[string]string{"volatile.last_state.idmap": "[]"})
-		if err != nil {
-			c.Delete()
-			logger.Error("Failed creating container", ctxMap)
-			return nil, err
-		}
-	}
-
-	// Re-run init to update the idmap
-	err = c.init()
-	if err != nil {
-		c.Delete()
-		logger.Error("Failed creating container", ctxMap)
-		return nil, err
-	}
-
-	if !c.IsSnapshot() {
-		// Update MAAS
-		err = c.maasUpdate(nil)
-		if err != nil {
-			c.Delete()
-			logger.Error("Failed creating container", ctxMap)
-			return nil, err
-		}
-
-		// Add devices to container.
-		for k, m := range c.expandedDevices {
-			err = c.deviceAdd(k, m)
-			if err != nil && err != device.ErrUnsupportedDevType {
-				c.Delete()
-				return nil, errors.Wrapf(err, "Failed to add device '%s'", k)
-			}
-		}
-	}
-
-	logger.Info("Created container", ctxMap)
-	eventSendLifecycle(c.project, "container-created",
-		fmt.Sprintf("/1.0/containers/%s", c.name), nil)
-
-	return c, nil
-}
-
-func containerLXCLoad(s *state.State, args db.ContainerArgs, profiles []api.Profile) (container, error) {
-	// Create the container struct
-	c := containerLXCInstantiate(s, args)
-
-	// Setup finalizer
-	runtime.SetFinalizer(c, containerLXCUnload)
-
-	// Expand config and devices
-	err := c.expandConfig(profiles)
-	if err != nil {
-		return nil, err
-	}
-
-	err = c.expandDevices(profiles)
-	if err != nil {
-		return nil, err
-	}
-
-	return c, nil
-}
-
-// Unload is called by the garbage collector
-func containerLXCUnload(c *containerLXC) {
-	runtime.SetFinalizer(c, nil)
-	if c.c != nil {
-		c.c.Release()
-		c.c = nil
-	}
-}
-
-// Create a container struct without initializing it.
-func containerLXCInstantiate(s *state.State, args db.ContainerArgs) *containerLXC {
-	c := &containerLXC{
-		state:        s,
-		id:           args.ID,
-		project:      args.Project,
-		name:         args.Name,
-		description:  args.Description,
-		ephemeral:    args.Ephemeral,
-		architecture: args.Architecture,
-		dbType:       args.Type,
-		snapshot:     args.Snapshot,
-		creationDate: args.CreationDate,
-		lastUsedDate: args.LastUsedDate,
-		profiles:     args.Profiles,
-		localConfig:  args.Config,
-		localDevices: args.Devices,
-		stateful:     args.Stateful,
-		node:         args.Node,
-		expiryDate:   args.ExpiryDate,
-	}
-
-	// Cleanup the zero values
-	if c.expiryDate.IsZero() {
-		c.expiryDate = time.Time{}
-	}
-
-	if c.creationDate.IsZero() {
-		c.creationDate = time.Time{}
-	}
-
-	if c.lastUsedDate.IsZero() {
-		c.lastUsedDate = time.Time{}
-	}
-
-	return c
-}
-
-// The LXC container driver
-type containerLXC struct {
-	// Properties
-	architecture int
-	dbType       instance.Type
-	snapshot     bool
-	creationDate time.Time
-	lastUsedDate time.Time
-	ephemeral    bool
-	id           int
-	project      string
-	name         string
-	description  string
-	stateful     bool
-
-	// Config
-	expandedConfig  map[string]string
-	expandedDevices config.Devices
-	fromHook        bool
-	localConfig     map[string]string
-	localDevices    config.Devices
-	profiles        []string
-
-	// Cache
-	c       *lxc.Container
-	cConfig bool
-
-	state    *state.State
-	idmapset *idmap.IdmapSet
-
-	// Storage
-	storage storage
-
-	// Clustering
-	node string
-
-	// Progress tracking
-	op *operation
-
-	expiryDate time.Time
-}
-
-func (c *containerLXC) Type() instance.Type {
-	return c.dbType
-}
-
-func (c *containerLXC) createOperation(action string, reusable bool, reuse bool) (*lxcContainerOperation, error) {
-	op, _ := c.getOperation("")
-	if op != nil {
-		if reuse && op.reusable {
-			op.Reset()
-			return op, nil
-		}
-
-		return nil, fmt.Errorf("Container is busy running a %s operation", op.action)
-	}
-
-	lxcContainerOperationsLock.Lock()
-	defer lxcContainerOperationsLock.Unlock()
-
-	op = &lxcContainerOperation{}
-	op.Create(c.id, action, reusable)
-	lxcContainerOperations[c.id] = op
-
-	return lxcContainerOperations[c.id], nil
-}
-
-func (c *containerLXC) getOperation(action string) (*lxcContainerOperation, error) {
-	lxcContainerOperationsLock.Lock()
-	defer lxcContainerOperationsLock.Unlock()
-
-	op := lxcContainerOperations[c.id]
-
-	if op == nil {
-		return nil, fmt.Errorf("No running %s container operation", action)
-	}
-
-	if action != "" && op.action != action {
-		return nil, fmt.Errorf("Container is running a %s operation, not a %s operation", op.action, action)
-	}
-
-	return op, nil
-}
-
-func (c *containerLXC) waitOperation() error {
-	op, _ := c.getOperation("")
-	if op != nil {
-		err := op.Wait()
-		if err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func idmapSize(state *state.State, isolatedStr string, size string) (int64, error) {
-	isolated := false
-	if shared.IsTrue(isolatedStr) {
-		isolated = true
-	}
-
-	var idMapSize int64
-	if size == "" || size == "auto" {
-		if isolated {
-			idMapSize = 65536
-		} else {
-			if len(state.OS.IdmapSet.Idmap) != 2 {
-				return 0, fmt.Errorf("bad initial idmap: %v", state.OS.IdmapSet)
-			}
-
-			idMapSize = state.OS.IdmapSet.Idmap[0].Maprange
-		}
-	} else {
-		size, err := strconv.ParseInt(size, 10, 64)
-		if err != nil {
-			return 0, err
-		}
-
-		idMapSize = size
-	}
-
-	return idMapSize, nil
-}
-
-var idmapLock sync.Mutex
-
-func parseRawIdmap(value string) ([]idmap.IdmapEntry, error) {
-	getRange := func(r string) (int64, int64, error) {
-		entries := strings.Split(r, "-")
-		if len(entries) > 2 {
-			return -1, -1, fmt.Errorf("invalid raw.idmap range %s", r)
-		}
-
-		base, err := strconv.ParseInt(entries[0], 10, 64)
-		if err != nil {
-			return -1, -1, err
-		}
-
-		size := int64(1)
-		if len(entries) > 1 {
-			size, err = strconv.ParseInt(entries[1], 10, 64)
-			if err != nil {
-				return -1, -1, err
-			}
-
-			size -= base
-			size += 1
-		}
-
-		return base, size, nil
-	}
-
-	ret := idmap.IdmapSet{}
-
-	for _, line := range strings.Split(value, "\n") {
-		if line == "" {
-			continue
-		}
-
-		entries := strings.Split(line, " ")
-		if len(entries) != 3 {
-			return nil, fmt.Errorf("invalid raw.idmap line %s", line)
-		}
-
-		outsideBase, outsideSize, err := getRange(entries[1])
-		if err != nil {
-			return nil, err
-		}
-
-		insideBase, insideSize, err := getRange(entries[2])
-		if err != nil {
-			return nil, err
-		}
-
-		if insideSize != outsideSize {
-			return nil, fmt.Errorf("idmap ranges of different sizes %s", line)
-		}
-
-		entry := idmap.IdmapEntry{
-			Hostid:   outsideBase,
-			Nsid:     insideBase,
-			Maprange: insideSize,
-		}
-
-		switch entries[0] {
-		case "both":
-			entry.Isuid = true
-			entry.Isgid = true
-			err := ret.AddSafe(entry)
-			if err != nil {
-				return nil, err
-			}
-		case "uid":
-			entry.Isuid = true
-			err := ret.AddSafe(entry)
-			if err != nil {
-				return nil, err
-			}
-		case "gid":
-			entry.Isgid = true
-			err := ret.AddSafe(entry)
-			if err != nil {
-				return nil, err
-			}
-		default:
-			return nil, fmt.Errorf("invalid raw.idmap type %s", line)
-		}
-	}
-
-	return ret.Idmap, nil
-}
-
-func findIdmap(state *state.State, cName string, isolatedStr string, configBase string, configSize string, rawIdmap string) (*idmap.IdmapSet, int64, error) {
-	isolated := false
-	if shared.IsTrue(isolatedStr) {
-		isolated = true
-	}
-
-	rawMaps, err := parseRawIdmap(rawIdmap)
-	if err != nil {
-		return nil, 0, err
-	}
-
-	if !isolated {
-		newIdmapset := idmap.IdmapSet{Idmap: make([]idmap.IdmapEntry, len(state.OS.IdmapSet.Idmap))}
-		copy(newIdmapset.Idmap, state.OS.IdmapSet.Idmap)
-
-		for _, ent := range rawMaps {
-			err := newIdmapset.AddSafe(ent)
-			if err != nil && err == idmap.ErrHostIdIsSubId {
-				return nil, 0, err
-			}
-		}
-
-		return &newIdmapset, 0, nil
-	}
-
-	size, err := idmapSize(state, isolatedStr, configSize)
-	if err != nil {
-		return nil, 0, err
-	}
-
-	mkIdmap := func(offset int64, size int64) (*idmap.IdmapSet, error) {
-		set := &idmap.IdmapSet{Idmap: []idmap.IdmapEntry{
-			{Isuid: true, Nsid: 0, Hostid: offset, Maprange: size},
-			{Isgid: true, Nsid: 0, Hostid: offset, Maprange: size},
-		}}
-
-		for _, ent := range rawMaps {
-			err := set.AddSafe(ent)
-			if err != nil && err == idmap.ErrHostIdIsSubId {
-				return nil, err
-			}
-		}
-
-		return set, nil
-	}
-
-	if configBase != "" {
-		offset, err := strconv.ParseInt(configBase, 10, 64)
-		if err != nil {
-			return nil, 0, err
-		}
-
-		set, err := mkIdmap(offset, size)
-		if err != nil && err == idmap.ErrHostIdIsSubId {
-			return nil, 0, err
-		}
-
-		return set, offset, nil
-	}
-
-	idmapLock.Lock()
-	defer idmapLock.Unlock()
-
-	cts, err := instanceLoadAll(state)
-	if err != nil {
-		return nil, 0, err
-	}
-
-	offset := state.OS.IdmapSet.Idmap[0].Hostid + 65536
-
-	mapentries := idmap.ByHostid{}
-	for _, container := range cts {
-		name := container.Name()
-
-		/* Don't change our map Just Because. */
-		if name == cName {
-			continue
-		}
-
-		if container.IsPrivileged() {
-			continue
-		}
-
-		if !shared.IsTrue(container.ExpandedConfig()["security.idmap.isolated"]) {
-			continue
-		}
-
-		cBase := int64(0)
-		if container.ExpandedConfig()["volatile.idmap.base"] != "" {
-			cBase, err = strconv.ParseInt(container.ExpandedConfig()["volatile.idmap.base"], 10, 64)
-			if err != nil {
-				return nil, 0, err
-			}
-		}
-
-		cSize, err := idmapSize(state, container.ExpandedConfig()["security.idmap.isolated"], container.ExpandedConfig()["security.idmap.size"])
-		if err != nil {
-			return nil, 0, err
-		}
-
-		mapentries = append(mapentries, &idmap.IdmapEntry{Hostid: int64(cBase), Maprange: cSize})
-	}
-
-	sort.Sort(mapentries)
-
-	for i := range mapentries {
-		if i == 0 {
-			if mapentries[0].Hostid < offset+size {
-				offset = mapentries[0].Hostid + mapentries[0].Maprange
-				continue
-			}
-
-			set, err := mkIdmap(offset, size)
-			if err != nil && err == idmap.ErrHostIdIsSubId {
-				return nil, 0, err
-			}
-
-			return set, offset, nil
-		}
-
-		if mapentries[i-1].Hostid+mapentries[i-1].Maprange > offset {
-			offset = mapentries[i-1].Hostid + mapentries[i-1].Maprange
-			continue
-		}
-
-		offset = mapentries[i-1].Hostid + mapentries[i-1].Maprange
-		if offset+size < mapentries[i].Hostid {
-			set, err := mkIdmap(offset, size)
-			if err != nil && err == idmap.ErrHostIdIsSubId {
-				return nil, 0, err
-			}
-
-			return set, offset, nil
-		}
-		offset = mapentries[i].Hostid + mapentries[i].Maprange
-	}
-
-	if offset+size < state.OS.IdmapSet.Idmap[0].Hostid+state.OS.IdmapSet.Idmap[0].Maprange {
-		set, err := mkIdmap(offset, size)
-		if err != nil && err == idmap.ErrHostIdIsSubId {
-			return nil, 0, err
-		}
-
-		return set, offset, nil
-	}
-
-	return nil, 0, fmt.Errorf("Not enough uid/gid available for the container")
-}
-
-func (c *containerLXC) init() error {
-	// Compute the expanded config and device list
-	err := c.expandConfig(nil)
-	if err != nil {
-		return err
-	}
-
-	err = c.expandDevices(nil)
-	if err != nil {
-		return err
-	}
-
-	return nil
-}
-
-func (c *containerLXC) initLXC(config bool) error {
-	// No need to go through all that for snapshots
-	if c.IsSnapshot() {
-		return nil
-	}
-
-	// Check if being called from a hook
-	if c.fromHook {
-		return fmt.Errorf("You can't use go-lxc from inside a LXC hook")
-	}
-
-	// Check if already initialized
-	if c.c != nil {
-		if !config || c.cConfig {
-			return nil
-		}
-	}
-
-	// Load the go-lxc struct
-	cname := project.Prefix(c.Project(), c.Name())
-	cc, err := lxc.NewContainer(cname, c.state.OS.LxcPath)
-	if err != nil {
-		return err
-	}
-
-	freeContainer := true
-	defer func() {
-		if freeContainer {
-			cc.Release()
-		}
-	}()
-
-	// Setup logging
-	logfile := c.LogFilePath()
-	err = lxcSetConfigItem(cc, "lxc.log.file", logfile)
-	if err != nil {
-		return err
-	}
-
-	logLevel := "warn"
-	if debug {
-		logLevel = "trace"
-	} else if verbose {
-		logLevel = "info"
-	}
-
-	err = lxcSetConfigItem(cc, "lxc.log.level", logLevel)
-	if err != nil {
-		return err
-	}
-
-	if util.RuntimeLiblxcVersionAtLeast(3, 0, 0) {
-		// Default size log buffer
-		err = lxcSetConfigItem(cc, "lxc.console.buffer.size", "auto")
-		if err != nil {
-			return err
-		}
-
-		err = lxcSetConfigItem(cc, "lxc.console.size", "auto")
-		if err != nil {
-			return err
-		}
-
-		// File to dump ringbuffer contents to when requested or
-		// container shutdown.
-		consoleBufferLogFile := c.ConsoleBufferLogPath()
-		err = lxcSetConfigItem(cc, "lxc.console.logfile", consoleBufferLogFile)
-		if err != nil {
-			return err
-		}
-	}
-
-	// Allow for lightweight init
-	c.cConfig = config
-	if !config {
-		if c.c != nil {
-			c.c.Release()
-		}
-
-		c.c = cc
-		freeContainer = false
-		return nil
-	}
-
-	if c.IsPrivileged() {
-		// Base config
-		toDrop := "sys_time sys_module sys_rawio"
-		if !c.state.OS.AppArmorStacking || c.state.OS.AppArmorStacked {
-			toDrop = toDrop + " mac_admin mac_override"
-		}
-
-		err = lxcSetConfigItem(cc, "lxc.cap.drop", toDrop)
-		if err != nil {
-			return err
-		}
-	}
-
-	// Set an appropriate /proc, /sys/ and /sys/fs/cgroup
-	mounts := []string{}
-	if c.IsPrivileged() && !c.state.OS.RunningInUserNS {
-		mounts = append(mounts, "proc:mixed")
-		mounts = append(mounts, "sys:mixed")
-	} else {
-		mounts = append(mounts, "proc:rw")
-		mounts = append(mounts, "sys:rw")
-	}
-
-	if !shared.PathExists("/proc/self/ns/cgroup") {
-		mounts = append(mounts, "cgroup:mixed")
-	}
-
-	err = lxcSetConfigItem(cc, "lxc.mount.auto", strings.Join(mounts, " "))
-	if err != nil {
-		return err
-	}
-
-	err = lxcSetConfigItem(cc, "lxc.autodev", "1")
-	if err != nil {
-		return err
-	}
-
-	err = lxcSetConfigItem(cc, "lxc.pty.max", "1024")
-	if err != nil {
-		return err
-	}
-
-	bindMounts := []string{
-		"/dev/fuse",
-		"/dev/net/tun",
-		"/proc/sys/fs/binfmt_misc",
-		"/sys/firmware/efi/efivars",
-		"/sys/fs/fuse/connections",
-		"/sys/fs/pstore",
-		"/sys/kernel/debug",
-		"/sys/kernel/security"}
-
-	if c.IsPrivileged() && !c.state.OS.RunningInUserNS {
-		err = lxcSetConfigItem(cc, "lxc.mount.entry", "mqueue dev/mqueue mqueue rw,relatime,create=dir,optional 0 0")
-		if err != nil {
-			return err
-		}
-	} else {
-		bindMounts = append(bindMounts, "/dev/mqueue")
-	}
-
-	for _, mnt := range bindMounts {
-		if !shared.PathExists(mnt) {
-			continue
-		}
-
-		if shared.IsDir(mnt) {
-			err = lxcSetConfigItem(cc, "lxc.mount.entry", fmt.Sprintf("%s %s none rbind,create=dir,optional 0 0", mnt, strings.TrimPrefix(mnt, "/")))
-			if err != nil {
-				return err
-			}
-		} else {
-			err = lxcSetConfigItem(cc, "lxc.mount.entry", fmt.Sprintf("%s %s none bind,create=file,optional 0 0", mnt, strings.TrimPrefix(mnt, "/")))
-			if err != nil {
-				return err
-			}
-		}
-	}
-
-	// For lxcfs
-	templateConfDir := os.Getenv("LXD_LXC_TEMPLATE_CONFIG")
-	if templateConfDir == "" {
-		templateConfDir = "/usr/share/lxc/config"
-	}
-
-	if shared.PathExists(fmt.Sprintf("%s/common.conf.d/", templateConfDir)) {
-		err = lxcSetConfigItem(cc, "lxc.include", fmt.Sprintf("%s/common.conf.d/", templateConfDir))
-		if err != nil {
-			return err
-		}
-	}
-
-	// Configure devices cgroup
-	if c.IsPrivileged() && !c.state.OS.RunningInUserNS && c.state.OS.CGroupDevicesController {
-		err = lxcSetConfigItem(cc, "lxc.cgroup.devices.deny", "a")
-		if err != nil {
-			return err
-		}
-
-		devices := []string{
-			"b *:* m",      // Allow mknod of block devices
-			"c *:* m",      // Allow mknod of char devices
-			"c 136:* rwm",  // /dev/pts devices
-			"c 1:3 rwm",    // /dev/null
-			"c 1:5 rwm",    // /dev/zero
-			"c 1:7 rwm",    // /dev/full
-			"c 1:8 rwm",    // /dev/random
-			"c 1:9 rwm",    // /dev/urandom
-			"c 5:0 rwm",    // /dev/tty
-			"c 5:1 rwm",    // /dev/console
-			"c 5:2 rwm",    // /dev/ptmx
-			"c 10:229 rwm", // /dev/fuse
-			"c 10:200 rwm", // /dev/net/tun
-		}
-
-		for _, dev := range devices {
-			err = lxcSetConfigItem(cc, "lxc.cgroup.devices.allow", dev)
-			if err != nil {
-				return err
-			}
-		}
-	}
-
-	if c.IsNesting() {
-		/*
-		 * mount extra /proc and /sys to work around kernel
-		 * restrictions on remounting them when covered
-		 */
-		err = lxcSetConfigItem(cc, "lxc.mount.entry", "proc dev/.lxc/proc proc create=dir,optional 0 0")
-		if err != nil {
-			return err
-		}
-
-		err = lxcSetConfigItem(cc, "lxc.mount.entry", "sys dev/.lxc/sys sysfs create=dir,optional 0 0")
-		if err != nil {
-			return err
-		}
-	}
-
-	// Setup architecture
-	personality, err := osarch.ArchitecturePersonality(c.architecture)
-	if err != nil {
-		personality, err = osarch.ArchitecturePersonality(c.state.OS.Architectures[0])
-		if err != nil {
-			return err
-		}
-	}
-
-	err = lxcSetConfigItem(cc, "lxc.arch", personality)
-	if err != nil {
-		return err
-	}
-
-	// Setup the hooks
-	err = lxcSetConfigItem(cc, "lxc.hook.version", "1")
-	if err != nil {
-		return err
-	}
-
-	err = lxcSetConfigItem(cc, "lxc.hook.pre-start", fmt.Sprintf("/proc/%d/exe callhook %s %d start", os.Getpid(), shared.VarPath(""), c.id))
-	if err != nil {
-		return err
-	}
-
-	err = lxcSetConfigItem(cc, "lxc.hook.stop", fmt.Sprintf("%s callhook %s %d stopns", c.state.OS.ExecPath, shared.VarPath(""), c.id))
-	if err != nil {
-		return err
-	}
-
-	err = lxcSetConfigItem(cc, "lxc.hook.post-stop", fmt.Sprintf("%s callhook %s %d stop", c.state.OS.ExecPath, shared.VarPath(""), c.id))
-	if err != nil {
-		return err
-	}
-
-	// Setup the console
-	err = lxcSetConfigItem(cc, "lxc.tty.max", "0")
-	if err != nil {
-		return err
-	}
-
-	// Setup the hostname
-	err = lxcSetConfigItem(cc, "lxc.uts.name", c.Name())
-	if err != nil {
-		return err
-	}
-
-	// Setup devlxd
-	if c.expandedConfig["security.devlxd"] == "" || shared.IsTrue(c.expandedConfig["security.devlxd"]) {
-		err = lxcSetConfigItem(cc, "lxc.mount.entry", fmt.Sprintf("%s dev/lxd none bind,create=dir 0 0", shared.VarPath("devlxd")))
-		if err != nil {
-			return err
-		}
-	}
-
-	// Setup AppArmor
-	if c.state.OS.AppArmorAvailable {
-		if c.state.OS.AppArmorConfined || !c.state.OS.AppArmorAdmin {
-			// If confined but otherwise able to use AppArmor, use our own profile
-			curProfile := util.AppArmorProfile()
-			curProfile = strings.TrimSuffix(curProfile, " (enforce)")
-			err := lxcSetConfigItem(cc, "lxc.apparmor.profile", curProfile)
-			if err != nil {
-				return err
-			}
-		} else {
-			// If not currently confined, use the container's profile
-			profile := AAProfileFull(c)
-
-			/* In the nesting case, we want to enable the inside
-			 * LXD to load its profile. Unprivileged containers can
-			 * load profiles, but privileged containers cannot, so
-			 * let's not use a namespace so they can fall back to
-			 * the old way of nesting, i.e. using the parent's
-			 * profile.
-			 */
-			if c.state.OS.AppArmorStacking && !c.state.OS.AppArmorStacked {
-				profile = fmt.Sprintf("%s//&:%s:", profile, AANamespace(c))
-			}
-
-			err := lxcSetConfigItem(cc, "lxc.apparmor.profile", profile)
-			if err != nil {
-				return err
-			}
-		}
-	}
-
-	// Setup Seccomp if necessary
-	if seccompContainerNeedsPolicy(c) {
-		err = lxcSetConfigItem(cc, "lxc.seccomp.profile", SeccompProfilePath(c))
-		if err != nil {
-			return err
-		}
-
-		// Setup notification socket
-		// System requirement errors are handled during policy generation instead of here
-		ok, err := seccompContainerNeedsIntercept(c)
-		if err == nil && ok {
-			err = lxcSetConfigItem(cc, "lxc.seccomp.notify.proxy", fmt.Sprintf("unix:%s", shared.VarPath("seccomp.socket")))
-			if err != nil {
-				return err
-			}
-		}
-	}
-
-	// Setup idmap
-	idmapset, err := c.NextIdmap()
-	if err != nil {
-		return err
-	}
-
-	if idmapset != nil {
-		lines := idmapset.ToLxcString()
-		for _, line := range lines {
-			err := lxcSetConfigItem(cc, "lxc.idmap", line)
-			if err != nil {
-				return err
-			}
-		}
-	}
-
-	// Setup environment
-	for k, v := range c.expandedConfig {
-		if strings.HasPrefix(k, "environment.") {
-			err = lxcSetConfigItem(cc, "lxc.environment", fmt.Sprintf("%s=%s", strings.TrimPrefix(k, "environment."), v))
-			if err != nil {
-				return err
-			}
-		}
-	}
-
-	// Setup NVIDIA runtime
-	if shared.IsTrue(c.expandedConfig["nvidia.runtime"]) {
-		hookDir := os.Getenv("LXD_LXC_HOOK")
-		if hookDir == "" {
-			hookDir = "/usr/share/lxc/hooks"
-		}
-
-		hookPath := filepath.Join(hookDir, "nvidia")
-		if !shared.PathExists(hookPath) {
-			return fmt.Errorf("The NVIDIA LXC hook couldn't be found")
-		}
-
-		_, err := exec.LookPath("nvidia-container-cli")
-		if err != nil {
-			return fmt.Errorf("The NVIDIA container tools couldn't be found")
-		}
-
-		err = lxcSetConfigItem(cc, "lxc.environment", "NVIDIA_VISIBLE_DEVICES=none")
-		if err != nil {
-			return err
-		}
-
-		nvidiaDriver := c.expandedConfig["nvidia.driver.capabilities"]
-		if nvidiaDriver == "" {
-			err = lxcSetConfigItem(cc, "lxc.environment", "NVIDIA_DRIVER_CAPABILITIES=compute,utility")
-			if err != nil {
-				return err
-			}
-		} else {
-			err = lxcSetConfigItem(cc, "lxc.environment", fmt.Sprintf("NVIDIA_DRIVER_CAPABILITIES=%s", nvidiaDriver))
-			if err != nil {
-				return err
-			}
-		}
-
-		nvidiaRequireCuda := c.expandedConfig["nvidia.require.cuda"]
-		if nvidiaRequireCuda == "" {
-			err = lxcSetConfigItem(cc, "lxc.environment", fmt.Sprintf("NVIDIA_REQUIRE_CUDA=%s", nvidiaRequireCuda))
-			if err != nil {
-				return err
-			}
-		}
-
-		nvidiaRequireDriver := c.expandedConfig["nvidia.require.driver"]
-		if nvidiaRequireDriver == "" {
-			err = lxcSetConfigItem(cc, "lxc.environment", fmt.Sprintf("NVIDIA_REQUIRE_DRIVER=%s", nvidiaRequireDriver))
-			if err != nil {
-				return err
-			}
-		}
-
-		err = lxcSetConfigItem(cc, "lxc.hook.mount", hookPath)
-		if err != nil {
-			return err
-		}
-	}
-
-	// Memory limits
-	if c.state.OS.CGroupMemoryController {
-		memory := c.expandedConfig["limits.memory"]
-		memoryEnforce := c.expandedConfig["limits.memory.enforce"]
-		memorySwap := c.expandedConfig["limits.memory.swap"]
-		memorySwapPriority := c.expandedConfig["limits.memory.swap.priority"]
-
-		// Configure the memory limits
-		if memory != "" {
-			var valueInt int64
-			if strings.HasSuffix(memory, "%") {
-				percent, err := strconv.ParseInt(strings.TrimSuffix(memory, "%"), 10, 64)
-				if err != nil {
-					return err
-				}
-
-				memoryTotal, err := shared.DeviceTotalMemory()
-				if err != nil {
-					return err
-				}
-
-				valueInt = int64((memoryTotal / 100) * percent)
-			} else {
-				valueInt, err = units.ParseByteSizeString(memory)
-				if err != nil {
-					return err
-				}
-			}
-
-			if memoryEnforce == "soft" {
-				err = lxcSetConfigItem(cc, "lxc.cgroup.memory.soft_limit_in_bytes", fmt.Sprintf("%d", valueInt))
-				if err != nil {
-					return err
-				}
-			} else {
-				if c.state.OS.CGroupSwapAccounting && (memorySwap == "" || shared.IsTrue(memorySwap)) {
-					err = lxcSetConfigItem(cc, "lxc.cgroup.memory.limit_in_bytes", fmt.Sprintf("%d", valueInt))
-					if err != nil {
-						return err
-					}
-					err = lxcSetConfigItem(cc, "lxc.cgroup.memory.memsw.limit_in_bytes", fmt.Sprintf("%d", valueInt))
-					if err != nil {
-						return err
-					}
-				} else {
-					err = lxcSetConfigItem(cc, "lxc.cgroup.memory.limit_in_bytes", fmt.Sprintf("%d", valueInt))
-					if err != nil {
-						return err
-					}
-				}
-				// Set soft limit to value 10% less than hard limit
-				err = lxcSetConfigItem(cc, "lxc.cgroup.memory.soft_limit_in_bytes", fmt.Sprintf("%.0f", float64(valueInt)*0.9))
-				if err != nil {
-					return err
-				}
-			}
-		}
-
-		// Configure the swappiness
-		if memorySwap != "" && !shared.IsTrue(memorySwap) {
-			err = lxcSetConfigItem(cc, "lxc.cgroup.memory.swappiness", "0")
-			if err != nil {
-				return err
-			}
-		} else if memorySwapPriority != "" {
-			priority, err := strconv.Atoi(memorySwapPriority)
-			if err != nil {
-				return err
-			}
-
-			err = lxcSetConfigItem(cc, "lxc.cgroup.memory.swappiness", fmt.Sprintf("%d", 60-10+priority))
-			if err != nil {
-				return err
-			}
-		}
-	}
-
-	// CPU limits
-	cpuPriority := c.expandedConfig["limits.cpu.priority"]
-	cpuAllowance := c.expandedConfig["limits.cpu.allowance"]
-
-	if (cpuPriority != "" || cpuAllowance != "") && c.state.OS.CGroupCPUController {
-		cpuShares, cpuCfsQuota, cpuCfsPeriod, err := deviceParseCPU(cpuAllowance, cpuPriority)
-		if err != nil {
-			return err
-		}
-
-		if cpuShares != "1024" {
-			err = lxcSetConfigItem(cc, "lxc.cgroup.cpu.shares", cpuShares)
-			if err != nil {
-				return err
-			}
-		}
-
-		if cpuCfsPeriod != "-1" {
-			err = lxcSetConfigItem(cc, "lxc.cgroup.cpu.cfs_period_us", cpuCfsPeriod)
-			if err != nil {
-				return err
-			}
-		}
-
-		if cpuCfsQuota != "-1" {
-			err = lxcSetConfigItem(cc, "lxc.cgroup.cpu.cfs_quota_us", cpuCfsQuota)
-			if err != nil {
-				return err
-			}
-		}
-	}
-
-	// Processes
-	if c.state.OS.CGroupPidsController {
-		processes := c.expandedConfig["limits.processes"]
-		if processes != "" {
-			valueInt, err := strconv.ParseInt(processes, 10, 64)
-			if err != nil {
-				return err
-			}
-
-			err = lxcSetConfigItem(cc, "lxc.cgroup.pids.max", fmt.Sprintf("%d", valueInt))
-			if err != nil {
-				return err
-			}
-		}
-	}
-
-	// Setup process limits
-	for k, v := range c.expandedConfig {
-		if strings.HasPrefix(k, "limits.kernel.") {
-			prlimitSuffix := strings.TrimPrefix(k, "limits.kernel.")
-			prlimitKey := fmt.Sprintf("lxc.prlimit.%s", prlimitSuffix)
-			err = lxcSetConfigItem(cc, prlimitKey, v)
-			if err != nil {
-				return err
-			}
-		}
-	}
-
-	// Setup shmounts
-	if c.state.OS.LXCFeatures["mount_injection_file"] {
-		err = lxcSetConfigItem(cc, "lxc.mount.auto", fmt.Sprintf("shmounts:%s:/dev/.lxd-mounts", c.ShmountsPath()))
-	} else {
-		err = lxcSetConfigItem(cc, "lxc.mount.entry", fmt.Sprintf("%s dev/.lxd-mounts none bind,create=dir 0 0", c.ShmountsPath()))
-	}
-	if err != nil {
-		return err
-	}
-
-	// Apply raw.lxc
-	if lxcConfig, ok := c.expandedConfig["raw.lxc"]; ok {
-		f, err := ioutil.TempFile("", "lxd_config_")
-		if err != nil {
-			return err
-		}
-
-		err = shared.WriteAll(f, []byte(lxcConfig))
-		f.Close()
-		defer os.Remove(f.Name())
-		if err != nil {
-			return err
-		}
-
-		if err := cc.LoadConfigFile(f.Name()); err != nil {
-			return fmt.Errorf("Failed to load raw.lxc")
-		}
-	}
-
-	if c.c != nil {
-		c.c.Release()
-	}
-	c.c = cc
-	freeContainer = false
-
-	return nil
-}
-
-// runHooks executes the callback functions returned from a function.
-func (c *containerLXC) runHooks(hooks []func() error) error {
-	// Run any post start hooks.
-	if len(hooks) > 0 {
-		for _, hook := range hooks {
-			err := hook()
-			if err != nil {
-				return err
-			}
-		}
-	}
-
-	return nil
-}
-
-// deviceLoad instantiates and validates a new device and returns it along with enriched config.
-func (c *containerLXC) deviceLoad(deviceName string, rawConfig config.Device) (device.Device, config.Device, error) {
-	var configCopy config.Device
-	var err error
-
-	// Create copy of config and load some fields from volatile if device is nic or infiniband.
-	if shared.StringInSlice(rawConfig["type"], []string{"nic", "infiniband"}) {
-		configCopy, err = c.fillNetworkDevice(deviceName, rawConfig)
-		if err != nil {
-			return nil, nil, err
-		}
-	} else {
-		// Othewise copy the config so it cannot be modified by device.
-		configCopy = rawConfig.Clone()
-	}
-
-	d, err := device.New(c, c.state, deviceName, configCopy, c.deviceVolatileGetFunc(deviceName), c.deviceVolatileSetFunc(deviceName))
-
-	// Return device and config copy even if error occurs as caller may still use device.
-	return d, configCopy, err
-}
-
-// deviceAdd loads a new device and calls its Add() function.
-func (c *containerLXC) deviceAdd(deviceName string, rawConfig config.Device) error {
-	d, _, err := c.deviceLoad(deviceName, rawConfig)
-	if err != nil {
-		return err
-	}
-
-	return d.Add()
-}
-
-// deviceStart loads a new device and calls its Start() function. After processing the runtime
-// config returned from Start(), it also runs the device's Register() function irrespective of
-// whether the container is running or not.
-func (c *containerLXC) deviceStart(deviceName string, rawConfig config.Device, isRunning bool) (*device.RunConfig, error) {
-	d, configCopy, err := c.deviceLoad(deviceName, rawConfig)
-	if err != nil {
-		return nil, err
-	}
-
-	if canHotPlug, _ := d.CanHotPlug(); isRunning && !canHotPlug {
-		return nil, fmt.Errorf("Device cannot be started when container is running")
-	}
-
-	runConf, err := d.Start()
-	if err != nil {
-		return nil, err
-	}
-
-	// If runConf supplied, perform any container specific setup of device.
-	if runConf != nil {
-		// Shift device file ownership if needed before mounting into container.
-		// This needs to be done whether or not container is running.
-		if len(runConf.Mounts) > 0 {
-			err := c.deviceStaticShiftMounts(runConf.Mounts)
-			if err != nil {
-				return nil, err
-			}
-		}
-
-		// If container is running and then live attach device.
-		if isRunning {
-			// Attach mounts if requested.
-			if len(runConf.Mounts) > 0 {
-				err = c.deviceHandleMounts(runConf.Mounts)
-				if err != nil {
-					return nil, err
-				}
-			}
-
-			// Add cgroup rules if requested.
-			if len(runConf.CGroups) > 0 {
-				err = c.deviceAddCgroupRules(runConf.CGroups)
-				if err != nil {
-					return nil, err
-				}
-			}
-
-			// Attach network interface if requested.
-			if len(runConf.NetworkInterface) > 0 {
-				err = c.deviceAttachNIC(configCopy, runConf.NetworkInterface)
-				if err != nil {
-					return nil, err
-				}
-			}
-
-			// If running, run post start hooks now (if not running LXD will run them
-			// once the instance is started).
-			err = c.runHooks(runConf.PostHooks)
-			if err != nil {
-				return nil, err
-			}
-		}
-	}
-
-	return runConf, nil
-}
-
-// deviceStaticShiftMounts statically shift device mount files ownership to active idmap if needed.
-func (c *containerLXC) deviceStaticShiftMounts(mounts []device.MountEntryItem) error {
-	idmapSet, err := c.CurrentIdmap()
-	if err != nil {
-		return fmt.Errorf("Failed to get idmap for device: %s", err)
-	}
-
-	// If there is an idmap being applied and LXD not running in a user namespace then shift the
-	// device files before they are mounted.
-	if idmapSet != nil && !c.state.OS.RunningInUserNS {
-		for _, mount := range mounts {
-			// Skip UID/GID shifting if OwnerShift mode is not static, or the host-side
-			// DevPath is empty (meaning an unmount request that doesn't need shifting).
-			if mount.OwnerShift != device.MountOwnerShiftStatic || mount.DevPath == "" {
-				continue
-			}
-
-			err := idmapSet.ShiftFile(mount.DevPath)
-			if err != nil {
-				// uidshift failing is weird, but not a big problem. Log and proceed.
-				logger.Debugf("Failed to uidshift device %s: %s\n", mount.DevPath, err)
-			}
-		}
-	}
-
-	return nil
-}
-
-// deviceAddCgroupRules live adds cgroup rules to a container.
-func (c *containerLXC) deviceAddCgroupRules(cgroups []device.RunConfigItem) error {
-	for _, rule := range cgroups {
-		// Only apply devices cgroup rules if container is running privileged and host has devices cgroup controller.
-		if strings.HasPrefix(rule.Key, "devices.") && (!c.isCurrentlyPrivileged() || c.state.OS.RunningInUserNS || !c.state.OS.CGroupDevicesController) {
-			continue
-		}
-
-		// Add the new device cgroup rule.
-		err := c.CGroupSet(rule.Key, rule.Value)
-		if err != nil {
-			return fmt.Errorf("Failed to add cgroup rule for device")
-		}
-	}
-
-	return nil
-}
-
-// deviceAttachNIC live attaches a NIC device to a container.
-func (c *containerLXC) deviceAttachNIC(configCopy map[string]string, netIF []device.RunConfigItem) error {
-	devName := ""
-	for _, dev := range netIF {
-		if dev.Key == "link" {
-			devName = dev.Value
-			break
-		}
-	}
-
-	if devName == "" {
-		return fmt.Errorf("Device didn't provide a link property to use")
-	}
-
-	// Load the go-lxc struct.
-	err := c.initLXC(false)
-	if err != nil {
-		return err
-	}
-
-	// Add the interface to the container.
-	err = c.c.AttachInterface(devName, configCopy["name"])
-	if err != nil {
-		return fmt.Errorf("Failed to attach interface: %s to %s: %s", devName, configCopy["name"], err)
-	}
-
-	return nil
-}
-
-// deviceUpdate loads a new device and calls its Update() function.
-func (c *containerLXC) deviceUpdate(deviceName string, rawConfig config.Device, oldDevices config.Devices, isRunning bool) error {
-	d, _, err := c.deviceLoad(deviceName, rawConfig)
-	if err != nil {
-		return err
-	}
-
-	err = d.Update(oldDevices, isRunning)
-	if err != nil {
-		return err
-	}
-
-	return nil
-}
-
-// deviceStop loads a new device and calls its Stop() function.
-func (c *containerLXC) deviceStop(deviceName string, rawConfig config.Device, stopHookNetnsPath string) error {
-	d, configCopy, err := c.deviceLoad(deviceName, rawConfig)
-
-	// If deviceLoad fails with unsupported device type then return.
-	if err == device.ErrUnsupportedDevType {
-		return err
-	}
-
-	// If deviceLoad fails for any other reason then just log the error and proceed, as in the
-	// scenario that a new version of LXD has additional validation restrictions than older
-	// versions we still need to allow previously valid devices to be stopped.
-	if err != nil {
-		// If there is no device returned, then we cannot proceed, so return as error.
-		if d == nil {
-			return fmt.Errorf("Device stop validation failed for '%s': %v", deviceName, err)
-
-		}
-
-		logger.Errorf("Device stop validation failed for '%s': %v", deviceName, err)
-	}
-
-	canHotPlug, _ := d.CanHotPlug()
-
-	// An empty netns path means we haven't been called from the LXC stop hook, so are running.
-	if stopHookNetnsPath == "" && !canHotPlug {
-		return fmt.Errorf("Device cannot be stopped when container is running")
-	}
-
-	runConf, err := d.Stop()
-	if err != nil {
-		return err
-	}
-
-	if runConf != nil {
-		// If network interface settings returned, then detach NIC from container.
-		if len(runConf.NetworkInterface) > 0 {
-			err = c.deviceDetachNIC(configCopy, runConf.NetworkInterface, stopHookNetnsPath)
-			if err != nil {
-				return err
-			}
-		}
-
-		// Add cgroup rules if requested and container is running.
-		if len(runConf.CGroups) > 0 && stopHookNetnsPath == "" {
-			err = c.deviceAddCgroupRules(runConf.CGroups)
-			if err != nil {
-				return err
-			}
-		}
-
-		// Detach mounts if requested and container is running.
-		if len(runConf.Mounts) > 0 && stopHookNetnsPath == "" {
-			err = c.deviceHandleMounts(runConf.Mounts)
-			if err != nil {
-				return err
-			}
-		}
-
-		// Run post stop hooks irrespective of run state of instance.
-		err = c.runHooks(runConf.PostHooks)
-		if err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-// deviceDetachNIC detaches a NIC device from a container.
-func (c *containerLXC) deviceDetachNIC(configCopy map[string]string, netIF []device.RunConfigItem, stopHookNetnsPath string) error {
-	// Get requested device name to detach interface back to on the host.
-	devName := ""
-	for _, dev := range netIF {
-		if dev.Key == "link" {
-			devName = dev.Value
-			break
-		}
-	}
-
-	if devName == "" {
-		return fmt.Errorf("Device didn't provide a link property to use")
-	}
-
-	// If container is running, perform live detach of interface back to host.
-	if stopHookNetnsPath == "" {
-		// For some reason, having network config confuses detach, so get our own go-lxc struct.
-		cname := project.Prefix(c.Project(), c.Name())
-		cc, err := lxc.NewContainer(cname, c.state.OS.LxcPath)
-		if err != nil {
-			return err
-		}
-		defer cc.Release()
-
-		// Get interfaces inside container.
-		ifaces, err := cc.Interfaces()
-		if err != nil {
-			return fmt.Errorf("Failed to list network interfaces: %v", err)
-		}
-
-		// If interface doesn't exist inside container, cannot proceed.
-		if !shared.StringInSlice(configCopy["name"], ifaces) {
-			return nil
-		}
-
-		err = cc.DetachInterfaceRename(configCopy["name"], devName)
-		if err != nil {
-			return errors.Wrapf(err, "Failed to detach interface: %s to %s", configCopy["name"], devName)
-		}
-	} else {
-		// Currently liblxc does not move devices back to the host on stop that were added
-		// after the the container was started. For this reason we utilise the lxc.hook.stop
-		// hook so that we can capture the netns path, enter the namespace and move the nics
-		// back to the host and rename them if liblxc hasn't already done it.
-		// We can only move back devices that have an expected host_name record and where
-		// that device doesn't already exist on the host as if a device exists on the host
-		// we can't know whether that is because liblxc has moved it back already or whether
-		// it is a conflicting device.
-		if !shared.PathExists(fmt.Sprintf("/sys/class/net/%s", devName)) {
-			err := c.detachInterfaceRename(stopHookNetnsPath, configCopy["name"], devName)
-			if err != nil {
-				return errors.Wrapf(err, "Failed to detach interface: %s to %s", configCopy["name"], devName)
-			}
-		}
-	}
-
-	return nil
-}
-
-// deviceHandleMounts live attaches or detaches mounts on a container.
-// If the mount DevPath is empty the mount action is treated as unmount.
-func (c *containerLXC) deviceHandleMounts(mounts []device.MountEntryItem) error {
-	for _, mount := range mounts {
-		if mount.DevPath != "" {
-			flags := 0
-
-			// Convert options into flags.
-			for _, opt := range mount.Opts {
-				if opt == "bind" {
-					flags |= unix.MS_BIND
-				} else if opt == "rbind" {
-					flags |= unix.MS_BIND | unix.MS_REC
-				}
-			}
-
-			shiftfs := false
-			if mount.OwnerShift == device.MountOwnerShiftDynamic {
-				shiftfs = true
-			}
-
-			// Mount it into the container.
-			err := c.insertMount(mount.DevPath, mount.TargetPath, mount.FSType, flags, shiftfs)
-			if err != nil {
-				return fmt.Errorf("Failed to add mount for device inside container: %s", err)
-			}
-		} else {
-			relativeTargetPath := strings.TrimPrefix(mount.TargetPath, "/")
-			if c.FileExists(relativeTargetPath) == nil {
-				err := c.removeMount(mount.TargetPath)
-				if err != nil {
-					return fmt.Errorf("Error unmounting the device path inside container: %s", err)
-				}
-
-				err = c.FileRemove(relativeTargetPath)
-				if err != nil {
-					// Only warn here and don't fail as removing a directory
-					// mount may fail if there was already files inside
-					// directory before it was mouted over preventing delete.
-					logger.Warnf("Could not remove the device path inside container: %s", err)
-				}
-			}
-		}
-	}
-
-	return nil
-}
-
-// deviceRemove loads a new device and calls its Remove() function.
-func (c *containerLXC) deviceRemove(deviceName string, rawConfig config.Device) error {
-	d, _, err := c.deviceLoad(deviceName, rawConfig)
-
-	// If deviceLoad fails with unsupported device type then return.
-	if err == device.ErrUnsupportedDevType {
-		return err
-	}
-
-	// If deviceLoad fails for any other reason then just log the error and proceed, as in the
-	// scenario that a new version of LXD has additional validation restrictions than older
-	// versions we still need to allow previously valid devices to be stopped.
-	if err != nil {
-		logger.Errorf("Device remove validation failed for '%s': %v", deviceName, err)
-	}
-
-	return d.Remove()
-}
-
-// deviceVolatileGetFunc returns a function that retrieves a named device's volatile config and
-// removes its device prefix from the keys.
-func (c *containerLXC) deviceVolatileGetFunc(devName string) func() map[string]string {
-	return func() map[string]string {
-		volatile := make(map[string]string)
-		prefix := fmt.Sprintf("volatile.%s.", devName)
-		for k, v := range c.localConfig {
-			if strings.HasPrefix(k, prefix) {
-				volatile[strings.TrimPrefix(k, prefix)] = v
-			}
-		}
-		return volatile
-	}
-}
-
-// deviceVolatileSetFunc returns a function that can be called to save a named device's volatile
-// config using keys that do not have the device's name prefixed.
-func (c *containerLXC) deviceVolatileSetFunc(devName string) func(save map[string]string) error {
-	return func(save map[string]string) error {
-		volatileSave := make(map[string]string)
-		for k, v := range save {
-			volatileSave[fmt.Sprintf("volatile.%s.%s", devName, k)] = v
-		}
-
-		return c.VolatileSet(volatileSave)
-	}
-}
-
-// deviceResetVolatile resets a device's volatile data when its removed or updated in such a way
-// that it is removed then added immediately afterwards.
-func (c *containerLXC) deviceResetVolatile(devName string, oldConfig, newConfig config.Device) error {
-	volatileClear := make(map[string]string)
-	devicePrefix := fmt.Sprintf("volatile.%s.", devName)
-
-	// If the device type has changed, remove all old volatile keys.
-	// This will occur if the newConfig is empty (i.e the device is actually being removed) or
-	// if the device type is being changed but keeping the same name.
-	if newConfig["type"] != oldConfig["type"] || newConfig["nictype"] != oldConfig["nictype"] {
-		for k := range c.localConfig {
-			if !strings.HasPrefix(k, devicePrefix) {
-				continue
-			}
-
-			volatileClear[k] = ""
-		}
-
-		return c.VolatileSet(volatileClear)
-	}
-
-	// If the device type remains the same, then just remove any volatile keys that have
-	// the same key name present in the new config (i.e the new config is replacing the
-	// old volatile key).
-	for k := range c.localConfig {
-		if !strings.HasPrefix(k, devicePrefix) {
-			continue
-		}
-
-		devKey := strings.TrimPrefix(k, devicePrefix)
-		if _, found := newConfig[devKey]; found {
-			volatileClear[k] = ""
-		}
-	}
-
-	return c.VolatileSet(volatileClear)
-}
-
-// DeviceEventHandler actions the results of a RunConfig after an event has occurred on a device.
-func (c *containerLXC) DeviceEventHandler(runConf *device.RunConfig) error {
-	// Device events can only be processed when the container is running.
-	if !c.IsRunning() {
-		return nil
-	}
-
-	if runConf == nil {
-		return nil
-	}
-
-	// Shift device file ownership if needed before mounting devices into container.
-	if len(runConf.Mounts) > 0 {
-		err := c.deviceStaticShiftMounts(runConf.Mounts)
-		if err != nil {
-			return err
-		}
-
-		err = c.deviceHandleMounts(runConf.Mounts)
-		if err != nil {
-			return err
-		}
-	}
-
-	// Add cgroup rules if requested.
-	if len(runConf.CGroups) > 0 {
-		err := c.deviceAddCgroupRules(runConf.CGroups)
-		if err != nil {
-			return err
-		}
-	}
-
-	// Run any post hooks requested.
-	err := c.runHooks(runConf.PostHooks)
-	if err != nil {
-		return err
-	}
-
-	// Generate uevent inside container if requested.
-	if len(runConf.Uevents) > 0 {
-		for _, eventParts := range runConf.Uevents {
-			ueventArray := make([]string, 4)
-			ueventArray[0] = "forkuevent"
-			ueventArray[1] = "inject"
-			ueventArray[2] = fmt.Sprintf("%d", c.InitPID())
-			length := 0
-			for _, part := range eventParts {
-				length = length + len(part) + 1
-			}
-			ueventArray[3] = fmt.Sprintf("%d", length)
-			ueventArray = append(ueventArray, eventParts...)
-			_, err := shared.RunCommand(c.state.OS.ExecPath, ueventArray...)
-			if err != nil {
-				return err
-			}
-		}
-	}
-
-	return nil
-}
-
-// Initialize storage interface for this container
-func (c *containerLXC) initStorage() error {
-	if c.storage != nil {
-		return nil
-	}
-
-	s, err := storagePoolVolumeContainerLoadInit(c.state, c.Project(), c.Name())
-	if err != nil {
-		return err
-	}
-
-	c.storage = s
-
-	return nil
-}
-
-// Config handling
-func (c *containerLXC) expandConfig(profiles []api.Profile) error {
-	if profiles == nil && len(c.profiles) > 0 {
-		var err error
-		profiles, err = c.state.Cluster.ProfilesGet(c.project, c.profiles)
-		if err != nil {
-			return err
-		}
-	}
-
-	c.expandedConfig = db.ProfilesExpandConfig(c.localConfig, profiles)
-
-	return nil
-}
-
-func (c *containerLXC) expandDevices(profiles []api.Profile) error {
-	if profiles == nil && len(c.profiles) > 0 {
-		var err error
-		profiles, err = c.state.Cluster.ProfilesGet(c.project, c.profiles)
-		if err != nil {
-			return err
-		}
-	}
-
-	c.expandedDevices = db.ProfilesExpandDevices(c.localDevices, profiles)
-
-	return nil
-}
-
-func shiftBtrfsRootfs(path string, diskIdmap *idmap.IdmapSet, shift bool) error {
-	var err error
-	roSubvols := []string{}
-	subvols, _ := btrfsSubVolumesGet(path)
-	sort.Sort(sort.StringSlice(subvols))
-	for _, subvol := range subvols {
-		subvol = filepath.Join(path, subvol)
-
-		if !btrfsSubVolumeIsRo(subvol) {
-			continue
-		}
-
-		roSubvols = append(roSubvols, subvol)
-		btrfsSubVolumeMakeRw(subvol)
-	}
-
-	if shift {
-		err = diskIdmap.ShiftRootfs(path, nil)
-	} else {
-		err = diskIdmap.UnshiftRootfs(path, nil)
-	}
-
-	for _, subvol := range roSubvols {
-		btrfsSubVolumeMakeRo(subvol)
-	}
-
-	return err
-}
-
-func ShiftBtrfsRootfs(path string, diskIdmap *idmap.IdmapSet) error {
-	return shiftBtrfsRootfs(path, diskIdmap, true)
-}
-
-func UnshiftBtrfsRootfs(path string, diskIdmap *idmap.IdmapSet) error {
-	return shiftBtrfsRootfs(path, diskIdmap, false)
-}
-
-// Start functions
-func (c *containerLXC) startCommon() (string, []func() error, error) {
-	var ourStart bool
-	postStartHooks := []func() error{}
-
-	// Load the go-lxc struct
-	err := c.initLXC(true)
-	if err != nil {
-		return "", postStartHooks, errors.Wrap(err, "Load go-lxc struct")
-	}
-
-	// Check that we're not already running
-	if c.IsRunning() {
-		return "", postStartHooks, fmt.Errorf("The container is already running")
-	}
-
-	// Load any required kernel modules
-	kernelModules := c.expandedConfig["linux.kernel_modules"]
-	if kernelModules != "" {
-		for _, module := range strings.Split(kernelModules, ",") {
-			module = strings.TrimPrefix(module, " ")
-			err := util.LoadModule(module)
-			if err != nil {
-				return "", postStartHooks, fmt.Errorf("Failed to load kernel module '%s': %s", module, err)
-			}
-		}
-	}
-
-	/* Deal with idmap changes */
-	nextIdmap, err := c.NextIdmap()
-	if err != nil {
-		return "", postStartHooks, errors.Wrap(err, "Set ID map")
-	}
-
-	diskIdmap, err := c.DiskIdmap()
-	if err != nil {
-		return "", postStartHooks, errors.Wrap(err, "Set last ID map")
-	}
-
-	if !nextIdmap.Equals(diskIdmap) && !(diskIdmap == nil && c.state.OS.Shiftfs) {
-		if shared.IsTrue(c.expandedConfig["security.protection.shift"]) {
-			return "", postStartHooks, fmt.Errorf("Container is protected against filesystem shifting")
-		}
-
-		logger.Debugf("Container idmap changed, remapping")
-		c.updateProgress("Remapping container filesystem")
-
-		ourStart, err = c.StorageStart()
-		if err != nil {
-			return "", postStartHooks, errors.Wrap(err, "Storage start")
-		}
-
-		if diskIdmap != nil {
-			if c.Storage().GetStorageType() == storageTypeZfs {
-				err = diskIdmap.UnshiftRootfs(c.RootfsPath(), zfsIdmapSetSkipper)
-			} else if c.Storage().GetStorageType() == storageTypeBtrfs {
-				err = UnshiftBtrfsRootfs(c.RootfsPath(), diskIdmap)
-			} else {
-				err = diskIdmap.UnshiftRootfs(c.RootfsPath(), nil)
-			}
-			if err != nil {
-				if ourStart {
-					c.StorageStop()
-				}
-				return "", postStartHooks, err
-			}
-		}
-
-		if nextIdmap != nil && !c.state.OS.Shiftfs {
-			if c.Storage().GetStorageType() == storageTypeZfs {
-				err = nextIdmap.ShiftRootfs(c.RootfsPath(), zfsIdmapSetSkipper)
-			} else if c.Storage().GetStorageType() == storageTypeBtrfs {
-				err = ShiftBtrfsRootfs(c.RootfsPath(), nextIdmap)
-			} else {
-				err = nextIdmap.ShiftRootfs(c.RootfsPath(), nil)
-			}
-			if err != nil {
-				if ourStart {
-					c.StorageStop()
-				}
-				return "", postStartHooks, err
-			}
-		}
-
-		jsonDiskIdmap := "[]"
-		if nextIdmap != nil && !c.state.OS.Shiftfs {
-			idmapBytes, err := json.Marshal(nextIdmap.Idmap)
-			if err != nil {
-				return "", postStartHooks, err
-			}
-			jsonDiskIdmap = string(idmapBytes)
-		}
-
-		err = c.VolatileSet(map[string]string{"volatile.last_state.idmap": jsonDiskIdmap})
-		if err != nil {
-			return "", postStartHooks, errors.Wrapf(err, "Set volatile.last_state.idmap config key on container %q (id %d)", c.name, c.id)
-		}
-
-		c.updateProgress("")
-	}
-
-	var idmapBytes []byte
-	if nextIdmap == nil {
-		idmapBytes = []byte("[]")
-	} else {
-		idmapBytes, err = json.Marshal(nextIdmap.Idmap)
-		if err != nil {
-			return "", postStartHooks, err
-		}
-	}
-
-	if c.localConfig["volatile.idmap.current"] != string(idmapBytes) {
-		err = c.VolatileSet(map[string]string{"volatile.idmap.current": string(idmapBytes)})
-		if err != nil {
-			return "", postStartHooks, errors.Wrapf(err, "Set volatile.idmap.current config key on container %q (id %d)", c.name, c.id)
-		}
-	}
-
-	// Generate the Seccomp profile
-	if err := SeccompCreateProfile(c); err != nil {
-		return "", postStartHooks, err
-	}
-
-	// Cleanup any existing leftover devices
-	c.removeUnixDevices()
-	c.removeDiskDevices()
-
-	// Create any missing directories.
-	err = os.MkdirAll(c.LogPath(), 0700)
-	if err != nil {
-		return "", postStartHooks, err
-	}
-
-	err = os.MkdirAll(c.DevicesPath(), 0711)
-	if err != nil {
-		return "", postStartHooks, err
-	}
-
-	err = os.MkdirAll(c.ShmountsPath(), 0711)
-	if err != nil {
-		return "", postStartHooks, err
-	}
-
-	// Create the devices
-	nicID := -1
-
-	// Setup devices in sorted order, this ensures that device mounts are added in path order.
-	for _, dev := range c.expandedDevices.Sorted() {
-		// Start the device.
-		runConf, err := c.deviceStart(dev.Name, dev.Config, false)
-		if err != nil {
-			return "", postStartHooks, errors.Wrapf(err, "Failed to start device '%s'", dev.Name)
-		}
-
-		if runConf == nil {
-			continue
-		}
-
-		// Process rootfs setup.
-		if runConf.RootFS.Path != "" {
-			if !util.RuntimeLiblxcVersionAtLeast(2, 1, 0) {
-				// Set the rootfs backend type if supported (must happen before any other lxc.rootfs)
-				err := lxcSetConfigItem(c.c, "lxc.rootfs.backend", "dir")
-				if err == nil {
-					value := c.c.ConfigItem("lxc.rootfs.backend")
-					if len(value) == 0 || value[0] != "dir" {
-						lxcSetConfigItem(c.c, "lxc.rootfs.backend", "")
-					}
-				}
-			}
-
-			if util.RuntimeLiblxcVersionAtLeast(2, 1, 0) {
-				rootfsPath := fmt.Sprintf("dir:%s", runConf.RootFS.Path)
-				err = lxcSetConfigItem(c.c, "lxc.rootfs.path", rootfsPath)
-			} else {
-				err = lxcSetConfigItem(c.c, "lxc.rootfs", runConf.RootFS.Path)
-			}
-
-			if err != nil {
-				return "", postStartHooks, errors.Wrapf(err, "Failed to setup device rootfs '%s'", dev.Name)
-			}
-
-			if len(runConf.RootFS.Opts) > 0 {
-				err = lxcSetConfigItem(c.c, "lxc.rootfs.options", strings.Join(runConf.RootFS.Opts, ","))
-				if err != nil {
-					return "", postStartHooks, errors.Wrapf(err, "Failed to setup device rootfs '%s'", dev.Name)
-				}
-			}
-
-			if c.state.OS.Shiftfs && !c.IsPrivileged() && diskIdmap == nil {
-				// Host side mark mount.
-				err = lxcSetConfigItem(c.c, "lxc.hook.pre-start", fmt.Sprintf("/bin/mount -t shiftfs -o mark,passthrough=3 %s %s", c.RootfsPath(), c.RootfsPath()))
-				if err != nil {
-					return "", postStartHooks, errors.Wrapf(err, "Failed to setup device mount shiftfs '%s'", dev.Name)
-				}
-
-				// Container side shift mount.
-				err = lxcSetConfigItem(c.c, "lxc.hook.pre-mount", fmt.Sprintf("/bin/mount -t shiftfs -o passthrough=3 %s %s", c.RootfsPath(), c.RootfsPath()))
-				if err != nil {
-					return "", postStartHooks, errors.Wrapf(err, "Failed to setup device mount shiftfs '%s'", dev.Name)
-				}
-
-				// Host side umount of mark mount.
-				err = lxcSetConfigItem(c.c, "lxc.hook.start-host", fmt.Sprintf("/bin/umount -l %s", c.RootfsPath()))
-				if err != nil {
-					return "", postStartHooks, errors.Wrapf(err, "Failed to setup device mount shiftfs '%s'", dev.Name)
-				}
-			}
-		}
-
-		// Pass any cgroups rules into LXC.
-		if len(runConf.CGroups) > 0 {
-			for _, rule := range runConf.CGroups {
-				err = lxcSetConfigItem(c.c, fmt.Sprintf("lxc.cgroup.%s", rule.Key), rule.Value)
-				if err != nil {
-					return "", postStartHooks, errors.Wrapf(err, "Failed to setup device cgroup '%s'", dev.Name)
-				}
-			}
-		}
-
-		// Pass any mounts into LXC.
-		if len(runConf.Mounts) > 0 {
-			for _, mount := range runConf.Mounts {
-				if shared.StringInSlice("propagation", mount.Opts) && !util.RuntimeLiblxcVersionAtLeast(3, 0, 0) {
-					return "", postStartHooks, errors.Wrapf(fmt.Errorf("liblxc 3.0 is required for mount propagation configuration"), "Failed to setup device mount '%s'", dev.Name)
-				}
-
-				if mount.OwnerShift == device.MountOwnerShiftDynamic && !c.IsPrivileged() {
-					if !c.state.OS.Shiftfs {
-						return "", postStartHooks, errors.Wrapf(fmt.Errorf("shiftfs is required but isn't supported on system"), "Failed to setup device mount '%s'", dev.Name)
-					}
-
-					err = lxcSetConfigItem(c.c, "lxc.hook.pre-start", fmt.Sprintf("/bin/mount -t shiftfs -o mark,passthrough=3 %s %s", mount.DevPath, mount.DevPath))
-					if err != nil {
-						return "", postStartHooks, errors.Wrapf(err, "Failed to setup device mount shiftfs '%s'", dev.Name)
-					}
-
-					err = lxcSetConfigItem(c.c, "lxc.hook.pre-mount", fmt.Sprintf("/bin/mount -t shiftfs -o passthrough=3 %s %s", mount.DevPath, mount.DevPath))
-					if err != nil {
-						return "", postStartHooks, errors.Wrapf(err, "Failed to setup device mount shiftfs '%s'", dev.Name)
-					}
-
-					err = lxcSetConfigItem(c.c, "lxc.hook.start-host", fmt.Sprintf("/bin/umount -l %s", mount.DevPath))
-					if err != nil {
-						return "", postStartHooks, errors.Wrapf(err, "Failed to setup device mount shiftfs '%s'", dev.Name)
-					}
-				}
-
-				mntVal := fmt.Sprintf("%s %s %s %s %d %d", shared.EscapePathFstab(mount.DevPath), shared.EscapePathFstab(mount.TargetPath), mount.FSType, strings.Join(mount.Opts, ","), mount.Freq, mount.PassNo)
-				err = lxcSetConfigItem(c.c, "lxc.mount.entry", mntVal)
-				if err != nil {
-					return "", postStartHooks, errors.Wrapf(err, "Failed to setup device mount '%s'", dev.Name)
-				}
-			}
-		}
-
-		// Pass any network setup config into LXC.
-		if len(runConf.NetworkInterface) > 0 {
-			// Increment nicID so that LXC network index is unique per device.
-			nicID++
-
-			networkKeyPrefix := "lxc.net"
-			if !util.RuntimeLiblxcVersionAtLeast(2, 1, 0) {
-				networkKeyPrefix = "lxc.network"
-			}
-
-			for _, nicItem := range runConf.NetworkInterface {
-				err = lxcSetConfigItem(c.c, fmt.Sprintf("%s.%d.%s", networkKeyPrefix, nicID, nicItem.Key), nicItem.Value)
-				if err != nil {
-					return "", postStartHooks, errors.Wrapf(err, "Failed to setup device network interface '%s'", dev.Name)
-				}
-			}
-		}
-
-		// Add any post start hooks.
-		if len(runConf.PostHooks) > 0 {
-			postStartHooks = append(postStartHooks, runConf.PostHooks...)
-		}
-	}
-
-	// Rotate the log file
-	logfile := c.LogFilePath()
-	if shared.PathExists(logfile) {
-		os.Remove(logfile + ".old")
-		err := os.Rename(logfile, logfile+".old")
-		if err != nil {
-			return "", postStartHooks, err
-		}
-	}
-
-	// Storage is guaranteed to be mountable now (must be called after devices setup).
-	ourStart, err = c.StorageStart()
-	if err != nil {
-		return "", postStartHooks, err
-	}
-
-	// Generate the LXC config
-	configPath := filepath.Join(c.LogPath(), "lxc.conf")
-	err = c.c.SaveConfigFile(configPath)
-	if err != nil {
-		os.Remove(configPath)
-		return "", postStartHooks, err
-	}
-
-	// Set ownership to match container root
-	currentIdmapset, err := c.CurrentIdmap()
-	if err != nil {
-		if ourStart {
-			c.StorageStop()
-		}
-		return "", postStartHooks, err
-	}
-
-	uid := int64(0)
-	if currentIdmapset != nil {
-		uid, _ = currentIdmapset.ShiftFromNs(0, 0)
-	}
-
-	err = os.Chown(c.Path(), int(uid), 0)
-	if err != nil {
-		if ourStart {
-			c.StorageStop()
-		}
-		return "", postStartHooks, err
-	}
-
-	// We only need traversal by root in the container
-	err = os.Chmod(c.Path(), 0100)
-	if err != nil {
-		if ourStart {
-			c.StorageStop()
-		}
-		return "", postStartHooks, err
-	}
-
-	// Update the backup.yaml file
-	err = writeBackupFile(c)
-	if err != nil {
-		if ourStart {
-			c.StorageStop()
-		}
-		return "", postStartHooks, err
-	}
-
-	// If starting stateless, wipe state
-	if !c.IsStateful() && shared.PathExists(c.StatePath()) {
-		os.RemoveAll(c.StatePath())
-	}
-
-	// Unmount any previously mounted shiftfs
-	unix.Unmount(c.RootfsPath(), unix.MNT_DETACH)
-
-	return configPath, postStartHooks, nil
-}
-
-// detachInterfaceRename enters the container's network namespace and moves the named interface
-// in ifName back to the network namespace of the running process as the name specified in hostName.
-func (c *containerLXC) detachInterfaceRename(netns string, ifName string, hostName string) error {
-	lxdPID := os.Getpid()
-
-	// Run forknet detach
-	_, err := shared.RunCommand(
-		c.state.OS.ExecPath,
-		"forknet",
-		"detach",
-		netns,
-		fmt.Sprintf("%d", lxdPID),
-		ifName,
-		hostName,
-	)
-
-	// Process forknet detach response
-	if err != nil {
-		return err
-	}
-
-	return nil
-}
-
-func (c *containerLXC) Start(stateful bool) error {
-	var ctxMap log.Ctx
-
-	// Setup a new operation
-	op, err := c.createOperation("start", false, false)
-	if err != nil {
-		return errors.Wrap(err, "Create container start operation")
-	}
-	defer op.Done(nil)
-
-	err = setupSharedMounts()
-	if err != nil {
-		return fmt.Errorf("Daemon failed to setup shared mounts base: %s.\nDoes security.nesting need to be turned on?", err)
-	}
-
-	// Run the shared start code
-	configPath, postStartHooks, err := c.startCommon()
-	if err != nil {
-		return errors.Wrap(err, "Common start logic")
-	}
-
-	// Ensure that the container storage volume is mounted.
-	_, err = c.StorageStart()
-	if err != nil {
-		return errors.Wrap(err, "Storage start")
-	}
-
-	ctxMap = log.Ctx{
-		"project":   c.project,
-		"name":      c.name,
-		"action":    op.action,
-		"created":   c.creationDate,
-		"ephemeral": c.ephemeral,
-		"used":      c.lastUsedDate,
-		"stateful":  stateful}
-
-	logger.Info("Starting container", ctxMap)
-
-	// If stateful, restore now
-	if stateful {
-		if !c.stateful {
-			return fmt.Errorf("Container has no existing state to restore")
-		}
-
-		criuMigrationArgs := CriuMigrationArgs{
-			cmd:          lxc.MIGRATE_RESTORE,
-			stateDir:     c.StatePath(),
-			function:     "snapshot",
-			stop:         false,
-			actionScript: false,
-			dumpDir:      "",
-			preDumpDir:   "",
-		}
-
-		err := c.Migrate(&criuMigrationArgs)
-		if err != nil && !c.IsRunning() {
-			return errors.Wrap(err, "Migrate")
-		}
-
-		os.RemoveAll(c.StatePath())
-		c.stateful = false
-
-		err = c.state.Cluster.ContainerSetStateful(c.id, false)
-		if err != nil {
-			logger.Error("Failed starting container", ctxMap)
-			return errors.Wrap(err, "Start container")
-		}
-
-		// Run any post start hooks.
-		err = c.runHooks(postStartHooks)
-		if err != nil {
-			// Attempt to stop container.
-			op.Done(err)
-			c.Stop(false)
-			return err
-		}
-
-		logger.Info("Started container", ctxMap)
-		return nil
-	} else if c.stateful {
-		/* stateless start required when we have state, let's delete it */
-		err := os.RemoveAll(c.StatePath())
-		if err != nil {
-			return err
-		}
-
-		c.stateful = false
-		err = c.state.Cluster.ContainerSetStateful(c.id, false)
-		if err != nil {
-			return errors.Wrap(err, "Persist stateful flag")
-		}
-	}
-
-	name := project.Prefix(c.Project(), c.name)
-
-	// Start the LXC container
-	_, err = shared.RunCommand(
-		c.state.OS.ExecPath,
-		"forkstart",
-		name,
-		c.state.OS.LxcPath,
-		configPath)
-	if err != nil && !c.IsRunning() {
-		// Attempt to extract the LXC errors
-		lxcLog := ""
-		logPath := filepath.Join(c.LogPath(), "lxc.log")
-		if shared.PathExists(logPath) {
-			logContent, err := ioutil.ReadFile(logPath)
-			if err == nil {
-				for _, line := range strings.Split(string(logContent), "\n") {
-					fields := strings.Fields(line)
-					if len(fields) < 4 {
-						continue
-					}
-
-					// We only care about errors
-					if fields[2] != "ERROR" {
-						continue
-					}
-
-					// Prepend the line break
-					if len(lxcLog) == 0 {
-						lxcLog += "\n"
-					}
-
-					lxcLog += fmt.Sprintf("  %s\n", strings.Join(fields[0:], " "))
-				}
-			}
-		}
-
-		logger.Error("Failed starting container", ctxMap)
-
-		// Return the actual error
-		return err
-	}
-
-	// Run any post start hooks.
-	err = c.runHooks(postStartHooks)
-	if err != nil {
-		// Attempt to stop container.
-		op.Done(err)
-		c.Stop(false)
-		return err
-	}
-
-	logger.Info("Started container", ctxMap)
-	eventSendLifecycle(c.project, "container-started",
-		fmt.Sprintf("/1.0/containers/%s", c.name), nil)
-
-	return nil
-}
-
-func (c *containerLXC) OnStart() error {
-	// Make sure we can't call go-lxc functions by mistake
-	c.fromHook = true
-
-	// Start the storage for this container
-	ourStart, err := c.StorageStartSensitive()
-	if err != nil {
-		return err
-	}
-
-	// Load the container AppArmor profile
-	err = AALoadProfile(c)
-	if err != nil {
-		if ourStart {
-			c.StorageStop()
-		}
-		return err
-	}
-
-	// Template anything that needs templating
-	key := "volatile.apply_template"
-	if c.localConfig[key] != "" {
-		// Run any template that needs running
-		err = c.templateApplyNow(c.localConfig[key])
-		if err != nil {
-			AADestroy(c)
-			if ourStart {
-				c.StorageStop()
-			}
-			return err
-		}
-
-		// Remove the volatile key from the DB
-		err := c.state.Cluster.ContainerConfigRemove(c.id, key)
-		if err != nil {
-			AADestroy(c)
-			if ourStart {
-				c.StorageStop()
-			}
-			return err
-		}
-	}
-
-	err = c.templateApplyNow("start")
-	if err != nil {
-		AADestroy(c)
-		if ourStart {
-			c.StorageStop()
-		}
-		return err
-	}
-
-	// Trigger a rebalance
-	deviceTaskSchedulerTrigger("container", c.name, "started")
-
-	// Apply network priority
-	if c.expandedConfig["limits.network.priority"] != "" {
-		go func(c *containerLXC) {
-			c.fromHook = false
-			err := c.setNetworkPriority()
-			if err != nil {
-				logger.Error("Failed to apply network priority", log.Ctx{"container": c.name, "err": err})
-			}
-		}(c)
-	}
-
-	// Database updates
-	err = c.state.Cluster.Transaction(func(tx *db.ClusterTx) error {
-		// Record current state
-		err = tx.ContainerSetState(c.id, "RUNNING")
-		if err != nil {
-			return errors.Wrap(err, "Error updating container state")
-		}
-
-		// Update time container last started time
-		err = tx.ContainerLastUsedUpdate(c.id, time.Now().UTC())
-		if err != nil {
-			return errors.Wrap(err, "Error updating last used")
-		}
-
-		return nil
-	})
-	if err != nil {
-		return err
-	}
-
-	return nil
-}
-
-// Stop functions
-func (c *containerLXC) Stop(stateful bool) error {
-	var ctxMap log.Ctx
-
-	// Check that we're not already stopped
-	if !c.IsRunning() {
-		return fmt.Errorf("The container is already stopped")
-	}
-
-	// Setup a new operation
-	op, err := c.createOperation("stop", false, true)
-	if err != nil {
-		return err
-	}
-
-	ctxMap = log.Ctx{
-		"project":   c.project,
-		"name":      c.name,
-		"action":    op.action,
-		"created":   c.creationDate,
-		"ephemeral": c.ephemeral,
-		"used":      c.lastUsedDate,
-		"stateful":  stateful}
-
-	logger.Info("Stopping container", ctxMap)
-
-	// Handle stateful stop
-	if stateful {
-		// Cleanup any existing state
-		stateDir := c.StatePath()
-		os.RemoveAll(stateDir)
-
-		err := os.MkdirAll(stateDir, 0700)
-		if err != nil {
-			op.Done(err)
-			logger.Error("Failed stopping container", ctxMap)
-			return err
-		}
-
-		criuMigrationArgs := CriuMigrationArgs{
-			cmd:          lxc.MIGRATE_DUMP,
-			stateDir:     stateDir,
-			function:     "snapshot",
-			stop:         true,
-			actionScript: false,
-			dumpDir:      "",
-			preDumpDir:   "",
-		}
-
-		// Checkpoint
-		err = c.Migrate(&criuMigrationArgs)
-		if err != nil {
-			op.Done(err)
-			logger.Error("Failed stopping container", ctxMap)
-			return err
-		}
-
-		err = op.Wait()
-		if err != nil && c.IsRunning() {
-			logger.Error("Failed stopping container", ctxMap)
-			return err
-		}
-
-		c.stateful = true
-		err = c.state.Cluster.ContainerSetStateful(c.id, true)
-		if err != nil {
-			op.Done(err)
-			logger.Error("Failed stopping container", ctxMap)
-			return err
-		}
-
-		op.Done(nil)
-		logger.Info("Stopped container", ctxMap)
-		eventSendLifecycle(c.project, "container-stopped",
-			fmt.Sprintf("/1.0/containers/%s", c.name), nil)
-		return nil
-	} else if shared.PathExists(c.StatePath()) {
-		os.RemoveAll(c.StatePath())
-	}
-
-	// Load the go-lxc struct
-	if c.expandedConfig["raw.lxc"] != "" {
-		err = c.initLXC(true)
-		if err != nil {
-			op.Done(err)
-			logger.Error("Failed stopping container", ctxMap)
-			return err
-		}
-	} else {
-		err = c.initLXC(false)
-		if err != nil {
-			op.Done(err)
-			logger.Error("Failed stopping container", ctxMap)
-			return err
-		}
-	}
-
-	// Fork-bomb mitigation, prevent forking from this point on
-	if c.state.OS.CGroupPidsController {
-		// Attempt to disable forking new processes
-		c.CGroupSet("pids.max", "0")
-	} else if c.state.OS.CGroupFreezerController {
-		// Attempt to freeze the container
-		freezer := make(chan bool, 1)
-		go func() {
-			c.Freeze()
-			freezer <- true
-		}()
-
-		select {
-		case <-freezer:
-		case <-time.After(time.Second * 5):
-			c.Unfreeze()
-		}
-	}
-
-	if err := c.c.Stop(); err != nil {
-		op.Done(err)
-		logger.Error("Failed stopping container", ctxMap)
-		return err
-	}
-
-	err = op.Wait()
-	if err != nil && c.IsRunning() {
-		logger.Error("Failed stopping container", ctxMap)
-		return err
-	}
-
-	logger.Info("Stopped container", ctxMap)
-	eventSendLifecycle(c.project, "container-stopped",
-		fmt.Sprintf("/1.0/containers/%s", c.name), nil)
-
-	return nil
-}
-
-func (c *containerLXC) Shutdown(timeout time.Duration) error {
-	var ctxMap log.Ctx
-
-	// Check that we're not already stopped
-	if !c.IsRunning() {
-		return fmt.Errorf("The container is already stopped")
-	}
-
-	// Setup a new operation
-	op, err := c.createOperation("stop", true, true)
-	if err != nil {
-		return err
-	}
-
-	ctxMap = log.Ctx{
-		"project":   c.project,
-		"name":      c.name,
-		"action":    "shutdown",
-		"created":   c.creationDate,
-		"ephemeral": c.ephemeral,
-		"used":      c.lastUsedDate,
-		"timeout":   timeout}
-
-	logger.Info("Shutting down container", ctxMap)
-
-	// Load the go-lxc struct
-	if c.expandedConfig["raw.lxc"] != "" {
-		err = c.initLXC(true)
-		if err != nil {
-			op.Done(err)
-			logger.Error("Failed stopping container", ctxMap)
-			return err
-		}
-	} else {
-		err = c.initLXC(false)
-		if err != nil {
-			op.Done(err)
-			logger.Error("Failed stopping container", ctxMap)
-			return err
-		}
-	}
-
-	if err := c.c.Shutdown(timeout); err != nil {
-		op.Done(err)
-		logger.Error("Failed shutting down container", ctxMap)
-		return err
-	}
-
-	err = op.Wait()
-	if err != nil && c.IsRunning() {
-		logger.Error("Failed shutting down container", ctxMap)
-		return err
-	}
-
-	logger.Info("Shut down container", ctxMap)
-	eventSendLifecycle(c.project, "container-shutdown",
-		fmt.Sprintf("/1.0/containers/%s", c.name), nil)
-
-	return nil
-}
-
-// OnStopNS is triggered by LXC's stop hook once a container is shutdown but before the container's
-// namespaces have been closed. The netns path of the stopped container is provided.
-func (c *containerLXC) OnStopNS(target string, netns string) error {
-	// Validate target
-	if !shared.StringInSlice(target, []string{"stop", "reboot"}) {
-		logger.Error("Container sent invalid target to OnStopNS", log.Ctx{"container": c.Name(), "target": target})
-		return fmt.Errorf("Invalid stop target: %s", target)
-	}
-
-	// Clean up devices.
-	c.cleanupDevices(netns)
-
-	return nil
-}
-
-// OnStop is triggered by LXC's post-stop hook once a container is shutdown and after the
-// container's namespaces have been closed.
-func (c *containerLXC) OnStop(target string) error {
-	// Validate target
-	if !shared.StringInSlice(target, []string{"stop", "reboot"}) {
-		logger.Error("Container sent invalid target to OnStop", log.Ctx{"container": c.Name(), "target": target})
-		return fmt.Errorf("Invalid stop target: %s", target)
-	}
-
-	// Get operation
-	op, _ := c.getOperation("")
-	if op != nil && op.action != "stop" {
-		return fmt.Errorf("Container is already running a %s operation", op.action)
-	}
-
-	// Make sure we can't call go-lxc functions by mistake
-	c.fromHook = true
-
-	// Remove directory ownership (to avoid issue if uidmap is re-used)
-	err := os.Chown(c.Path(), 0, 0)
-	if err != nil {
-		if op != nil {
-			op.Done(err)
-		}
-
-		return err
-	}
-
-	err = os.Chmod(c.Path(), 0100)
-	if err != nil {
-		if op != nil {
-			op.Done(err)
-		}
-
-		return err
-	}
-
-	// Stop the storage for this container
-	_, err = c.StorageStop()
-	if err != nil {
-		if op != nil {
-			op.Done(err)
-		}
-
-		return err
-	}
-
-	// Log user actions
-	if op == nil {
-		ctxMap := log.Ctx{
-			"project":   c.project,
-			"name":      c.name,
-			"action":    target,
-			"created":   c.creationDate,
-			"ephemeral": c.ephemeral,
-			"used":      c.lastUsedDate,
-			"stateful":  false}
-
-		logger.Info(fmt.Sprintf("Container initiated %s", target), ctxMap)
-	}
-
-	// Record power state
-	err = c.state.Cluster.ContainerSetState(c.id, "STOPPED")
-	if err != nil {
-		logger.Error("Failed to set container state", log.Ctx{"container": c.Name(), "err": err})
-	}
-
-	go func(c *containerLXC, target string, op *lxcContainerOperation) {
-		c.fromHook = false
-		err = nil
-
-		// Unlock on return
-		if op != nil {
-			defer op.Done(err)
-		}
-
-		// Wait for other post-stop actions to be done
-		c.IsRunning()
-
-		// Unload the apparmor profile
-		err = AADestroy(c)
-		if err != nil {
-			logger.Error("Failed to destroy apparmor namespace", log.Ctx{"container": c.Name(), "err": err})
-		}
-
-		// Clean all the unix devices
-		err = c.removeUnixDevices()
-		if err != nil {
-			logger.Error("Unable to remove unix devices", log.Ctx{"container": c.Name(), "err": err})
-		}
-
-		// Clean all the disk devices
-		err = c.removeDiskDevices()
-		if err != nil {
-			logger.Error("Unable to remove disk devices", log.Ctx{"container": c.Name(), "err": err})
-		}
-
-		// Reboot the container
-		if target == "reboot" {
-			// Start the container again
-			err = c.Start(false)
-			return
-		}
-
-		// Trigger a rebalance
-		deviceTaskSchedulerTrigger("container", c.name, "stopped")
-
-		// Destroy ephemeral containers
-		if c.ephemeral {
-			err = c.Delete()
-		}
-	}(c, target, op)
-
-	return nil
-}
-
-// cleanupDevices performs any needed device cleanup steps when container is stopped.
-func (c *containerLXC) cleanupDevices(netns string) {
-	for _, dev := range c.expandedDevices.Sorted() {
-		// Use the device interface if device supports it.
-		err := c.deviceStop(dev.Name, dev.Config, netns)
-		if err == device.ErrUnsupportedDevType {
-			continue
-		} else if err != nil {
-			logger.Errorf("Failed to stop device '%s': %v", dev.Name, err)
-		}
-	}
-}
-
-// Freezer functions
-func (c *containerLXC) Freeze() error {
-	ctxMap := log.Ctx{
-		"project":   c.project,
-		"name":      c.name,
-		"created":   c.creationDate,
-		"ephemeral": c.ephemeral,
-		"used":      c.lastUsedDate}
-
-	// Check that we're running
-	if !c.IsRunning() {
-		return fmt.Errorf("The container isn't running")
-	}
-
-	// Check if the CGroup is available
-	if !c.state.OS.CGroupFreezerController {
-		logger.Info("Unable to freeze container (lack of kernel support)", ctxMap)
-		return nil
-	}
-
-	// Check that we're not already frozen
-	if c.IsFrozen() {
-		return fmt.Errorf("The container is already frozen")
-	}
-
-	logger.Info("Freezing container", ctxMap)
-
-	// Load the go-lxc struct
-	err := c.initLXC(false)
-	if err != nil {
-		ctxMap["err"] = err
-		logger.Error("Failed freezing container", ctxMap)
-		return err
-	}
-
-	err = c.c.Freeze()
-	if err != nil {
-		ctxMap["err"] = err
-		logger.Error("Failed freezing container", ctxMap)
-		return err
-	}
-
-	logger.Info("Froze container", ctxMap)
-	eventSendLifecycle(c.project, "container-paused",
-		fmt.Sprintf("/1.0/containers/%s", c.name), nil)
-
-	return err
-}
-
-func (c *containerLXC) Unfreeze() error {
-	ctxMap := log.Ctx{
-		"project":   c.project,
-		"name":      c.name,
-		"created":   c.creationDate,
-		"ephemeral": c.ephemeral,
-		"used":      c.lastUsedDate}
-
-	// Check that we're running
-	if !c.IsRunning() {
-		return fmt.Errorf("The container isn't running")
-	}
-
-	// Check if the CGroup is available
-	if !c.state.OS.CGroupFreezerController {
-		logger.Info("Unable to unfreeze container (lack of kernel support)", ctxMap)
-		return nil
-	}
-
-	// Check that we're frozen
-	if !c.IsFrozen() {
-		return fmt.Errorf("The container is already running")
-	}
-
-	logger.Info("Unfreezing container", ctxMap)
-
-	// Load the go-lxc struct
-	err := c.initLXC(false)
-	if err != nil {
-		logger.Error("Failed unfreezing container", ctxMap)
-		return err
-	}
-
-	err = c.c.Unfreeze()
-	if err != nil {
-		logger.Error("Failed unfreezing container", ctxMap)
-	}
-
-	logger.Info("Unfroze container", ctxMap)
-	eventSendLifecycle(c.project, "container-resumed",
-		fmt.Sprintf("/1.0/containers/%s", c.name), nil)
-
-	return err
-}
-
-var LxcMonitorStateError = fmt.Errorf("Monitor is hung")
-
-// Get lxc container state, with 1 second timeout
-// If we don't get a reply, assume the lxc monitor is hung
-func (c *containerLXC) getLxcState() (lxc.State, error) {
-	if c.IsSnapshot() {
-		return lxc.StateMap["STOPPED"], nil
-	}
-
-	// Load the go-lxc struct
-	err := c.initLXC(false)
-	if err != nil {
-		return lxc.StateMap["STOPPED"], err
-	}
-
-	monitor := make(chan lxc.State, 1)
-
-	go func(c *lxc.Container) {
-		monitor <- c.State()
-	}(c.c)
-
-	select {
-	case state := <-monitor:
-		return state, nil
-	case <-time.After(5 * time.Second):
-		return lxc.StateMap["FROZEN"], LxcMonitorStateError
-	}
-}
-
-func (c *containerLXC) Render() (interface{}, interface{}, error) {
-	// Ignore err as the arch string on error is correct (unknown)
-	architectureName, _ := osarch.ArchitectureName(c.architecture)
-
-	if c.IsSnapshot() {
-		// Prepare the ETag
-		etag := []interface{}{c.expiryDate}
-
-		ct := api.InstanceSnapshot{
-			CreatedAt:       c.creationDate,
-			ExpandedConfig:  c.expandedConfig,
-			ExpandedDevices: c.expandedDevices.CloneNative(),
-			LastUsedAt:      c.lastUsedDate,
-			Name:            strings.SplitN(c.name, "/", 2)[1],
-			Stateful:        c.stateful,
-		}
-		ct.Architecture = architectureName
-		ct.Config = c.localConfig
-		ct.Devices = c.localDevices.CloneNative()
-		ct.Ephemeral = c.ephemeral
-		ct.Profiles = c.profiles
-		ct.ExpiresAt = c.expiryDate
-
-		return &ct, etag, nil
-	}
-
-	// Prepare the ETag
-	etag := []interface{}{c.architecture, c.localConfig, c.localDevices, c.ephemeral, c.profiles}
-
-	// FIXME: Render shouldn't directly access the go-lxc struct
-	cState, err := c.getLxcState()
-	if err != nil {
-		return nil, nil, errors.Wrap(err, "Get container stated")
-	}
-	statusCode := lxcStatusCode(cState)
-
-	ct := api.Instance{
-		ExpandedConfig:  c.expandedConfig,
-		ExpandedDevices: c.expandedDevices.CloneNative(),
-		Name:            c.name,
-		Status:          statusCode.String(),
-		StatusCode:      statusCode,
-		Location:        c.node,
-		Type:            c.Type().String(),
-	}
-
-	ct.Description = c.description
-	ct.Architecture = architectureName
-	ct.Config = c.localConfig
-	ct.CreatedAt = c.creationDate
-	ct.Devices = c.localDevices.CloneNative()
-	ct.Ephemeral = c.ephemeral
-	ct.LastUsedAt = c.lastUsedDate
-	ct.Profiles = c.profiles
-	ct.Stateful = c.stateful
-
-	return &ct, etag, nil
-}
-
-func (c *containerLXC) RenderFull() (*api.InstanceFull, interface{}, error) {
-	if c.IsSnapshot() {
-		return nil, nil, fmt.Errorf("RenderFull only works with containers")
-	}
-
-	// Get the Container struct
-	base, etag, err := c.Render()
-	if err != nil {
-		return nil, nil, err
-	}
-
-	// Convert to ContainerFull
-	ct := api.InstanceFull{Instance: *base.(*api.Instance)}
-
-	// Add the ContainerState
-	ct.State, err = c.RenderState()
-	if err != nil {
-		return nil, nil, err
-	}
-
-	// Add the ContainerSnapshots
-	snaps, err := c.Snapshots()
-	if err != nil {
-		return nil, nil, err
-	}
-
-	for _, snap := range snaps {
-		render, _, err := snap.Render()
-		if err != nil {
-			return nil, nil, err
-		}
-
-		if ct.Snapshots == nil {
-			ct.Snapshots = []api.InstanceSnapshot{}
-		}
-
-		ct.Snapshots = append(ct.Snapshots, *render.(*api.InstanceSnapshot))
-	}
-
-	// Add the ContainerBackups
-	backups, err := c.Backups()
-	if err != nil {
-		return nil, nil, err
-	}
-
-	for _, backup := range backups {
-		render := backup.Render()
-
-		if ct.Backups == nil {
-			ct.Backups = []api.InstanceBackup{}
-		}
-
-		ct.Backups = append(ct.Backups, *render)
-	}
-
-	return &ct, etag, nil
-}
-
-func (c *containerLXC) RenderState() (*api.InstanceState, error) {
-	cState, err := c.getLxcState()
-	if err != nil {
-		return nil, err
-	}
-	statusCode := lxcStatusCode(cState)
-	status := api.InstanceState{
-		Status:     statusCode.String(),
-		StatusCode: statusCode,
-	}
-
-	if c.IsRunning() {
-		pid := c.InitPID()
-		status.CPU = c.cpuState()
-		status.Disk = c.diskState()
-		status.Memory = c.memoryState()
-		status.Network = c.networkState()
-		status.Pid = int64(pid)
-		status.Processes = c.processesState()
-	}
-
-	return &status, nil
-}
-
-func (c *containerLXC) Snapshots() ([]Instance, error) {
-	var snaps []db.Instance
-
-	if c.IsSnapshot() {
-		return []Instance{}, nil
-	}
-
-	// Get all the snapshots
-	err := c.state.Cluster.Transaction(func(tx *db.ClusterTx) error {
-		var err error
-		snaps, err = tx.ContainerGetSnapshotsFull(c.Project(), c.name)
-		if err != nil {
-			return err
-		}
-
-		return nil
-	})
-	if err != nil {
-		return nil, err
-	}
-
-	// Build the snapshot list
-	containers, err := instanceLoadAllInternal(snaps, c.state)
-	if err != nil {
-		return nil, err
-	}
-
-	instances := make([]Instance, len(containers))
-	for k, v := range containers {
-		instances[k] = Instance(v)
-	}
-
-	return instances, nil
-}
-
-func (c *containerLXC) Backups() ([]backup, error) {
-	// Get all the backups
-	backupNames, err := c.state.Cluster.ContainerGetBackups(c.project, c.name)
-	if err != nil {
-		return nil, err
-	}
-
-	// Build the backup list
-	backups := []backup{}
-	for _, backupName := range backupNames {
-		backup, err := backupLoadByName(c.state, c.project, backupName)
-		if err != nil {
-			return nil, err
-		}
-
-		backups = append(backups, *backup)
-	}
-
-	return backups, nil
-}
-
-func (c *containerLXC) Restore(sourceContainer Instance, stateful bool) error {
-	var ctxMap log.Ctx
-
-	// Initialize storage interface for the container.
-	err := c.initStorage()
-	if err != nil {
-		return err
-	}
-
-	ourStart, err := c.StorageStart()
-	if err != nil {
-		return err
-	}
-	if ourStart {
-		defer c.StorageStop()
-	}
-
-	/* let's also check for CRIU if necessary, before doing a bunch of
-	 * filesystem manipulations
-	 */
-	if shared.PathExists(c.StatePath()) {
-		_, err := exec.LookPath("criu")
-		if err != nil {
-			return fmt.Errorf("Failed to restore container state. CRIU isn't installed")
-		}
-	}
-
-	// Stop the container
-	wasRunning := false
-	if c.IsRunning() {
-		wasRunning = true
-
-		ephemeral := c.IsEphemeral()
-		if ephemeral {
-			// Unset ephemeral flag
-			args := db.ContainerArgs{
-				Architecture: c.Architecture(),
-				Config:       c.LocalConfig(),
-				Description:  c.Description(),
-				Devices:      c.LocalDevices(),
-				Ephemeral:    false,
-				Profiles:     c.Profiles(),
-				Project:      c.Project(),
-				Type:         c.Type(),
-				Snapshot:     c.IsSnapshot(),
-			}
-
-			err := c.Update(args, false)
-			if err != nil {
-				return err
-			}
-
-			// On function return, set the flag back on
-			defer func() {
-				args.Ephemeral = ephemeral
-				c.Update(args, true)
-			}()
-		}
-
-		// This will unmount the container storage.
-		err := c.Stop(false)
-		if err != nil {
-			return err
-		}
-
-		// Ensure that storage is mounted for state path checks.
-		ourStart, err := c.StorageStart()
-		if err != nil {
-			return err
-		}
-		if ourStart {
-			defer c.StorageStop()
-		}
-	}
-
-	ctxMap = log.Ctx{
-		"project":   c.project,
-		"name":      c.name,
-		"created":   c.creationDate,
-		"ephemeral": c.ephemeral,
-		"used":      c.lastUsedDate,
-		"source":    sourceContainer.Name()}
-
-	logger.Info("Restoring container", ctxMap)
-
-	// Restore the rootfs
-	err = c.storage.ContainerRestore(c, sourceContainer)
-	if err != nil {
-		logger.Error("Failed restoring container filesystem", ctxMap)
-		return err
-	}
-
-	// Restore the configuration
-	args := db.ContainerArgs{
-		Architecture: sourceContainer.Architecture(),
-		Config:       sourceContainer.LocalConfig(),
-		Description:  sourceContainer.Description(),
-		Devices:      sourceContainer.LocalDevices(),
-		Ephemeral:    sourceContainer.IsEphemeral(),
-		Profiles:     sourceContainer.Profiles(),
-		Project:      sourceContainer.Project(),
-		Type:         sourceContainer.Type(),
-		Snapshot:     sourceContainer.IsSnapshot(),
-	}
-
-	err = c.Update(args, false)
-	if err != nil {
-		logger.Error("Failed restoring container configuration", ctxMap)
-		return err
-	}
-
-	// The old backup file may be out of date (e.g. it doesn't have all the
-	// current snapshots of the container listed); let's write a new one to
-	// be safe.
-	err = writeBackupFile(c)
-	if err != nil {
-		return err
-	}
-
-	// If the container wasn't running but was stateful, should we restore
-	// it as running?
-	if stateful == true {
-		if !shared.PathExists(c.StatePath()) {
-			return fmt.Errorf("Stateful snapshot restore requested by snapshot is stateless")
-		}
-
-		logger.Debug("Performing stateful restore", ctxMap)
-		c.stateful = true
-
-		criuMigrationArgs := CriuMigrationArgs{
-			cmd:          lxc.MIGRATE_RESTORE,
-			stateDir:     c.StatePath(),
-			function:     "snapshot",
-			stop:         false,
-			actionScript: false,
-			dumpDir:      "",
-			preDumpDir:   "",
-		}
-
-		// Checkpoint
-		err := c.Migrate(&criuMigrationArgs)
-		if err != nil {
-			return err
-		}
-
-		// Remove the state from the parent container; we only keep
-		// this in snapshots.
-		err2 := os.RemoveAll(c.StatePath())
-		if err2 != nil {
-			logger.Error("Failed to delete snapshot state", log.Ctx{"path": c.StatePath(), "err": err2})
-		}
-
-		if err != nil {
-			logger.Info("Failed restoring container", ctxMap)
-			return err
-		}
-
-		logger.Debug("Performed stateful restore", ctxMap)
-		logger.Info("Restored container", ctxMap)
-		return nil
-	}
-
-	eventSendLifecycle(c.project, "container-snapshot-restored",
-		fmt.Sprintf("/1.0/containers/%s", c.name), map[string]interface{}{
-			"snapshot_name": c.name,
-		})
-
-	// Restart the container
-	if wasRunning {
-		logger.Info("Restored container", ctxMap)
-		return c.Start(false)
-	}
-
-	logger.Info("Restored container", ctxMap)
-
-	return nil
-}
-
-func (c *containerLXC) cleanup() {
-	// Unmount any leftovers
-	c.removeUnixDevices()
-	c.removeDiskDevices()
-
-	// Remove the security profiles
-	AADeleteProfile(c)
-	SeccompDeleteProfile(c)
-
-	// Remove the devices path
-	os.Remove(c.DevicesPath())
-
-	// Remove the shmounts path
-	os.RemoveAll(c.ShmountsPath())
-}
-
-func (c *containerLXC) Delete() error {
-	ctxMap := log.Ctx{
-		"project":   c.project,
-		"name":      c.name,
-		"created":   c.creationDate,
-		"ephemeral": c.ephemeral,
-		"used":      c.lastUsedDate}
-
-	logger.Info("Deleting container", ctxMap)
-
-	if shared.IsTrue(c.expandedConfig["security.protection.delete"]) && !c.IsSnapshot() {
-		err := fmt.Errorf("Container is protected")
-		logger.Warn("Failed to delete container", log.Ctx{"name": c.Name(), "err": err})
-		return err
-	}
-
-	// Check if we're dealing with "lxd import"
-	isImport := false
-	if c.storage != nil {
-		_, poolName, _ := c.storage.GetContainerPoolInfo()
-
-		if c.IsSnapshot() {
-			cName, _, _ := shared.ContainerGetParentAndSnapshotName(c.name)
-			if shared.PathExists(shared.VarPath("storage-pools", poolName, "containers", cName, ".importing")) {
-				isImport = true
-			}
-		} else {
-			if shared.PathExists(shared.VarPath("storage-pools", poolName, "containers", c.name, ".importing")) {
-				isImport = true
-			}
-		}
-	}
-
-	// Attempt to initialize storage interface for the container.
-	err := c.initStorage()
-	if err != nil {
-		logger.Warnf("Failed to init storage: %v", err)
-	}
-
-	if c.IsSnapshot() {
-		// Remove the snapshot
-		if c.storage != nil && !isImport {
-			err := c.storage.ContainerSnapshotDelete(c)
-			if err != nil {
-				logger.Warn("Failed to delete snapshot", log.Ctx{"name": c.Name(), "err": err})
-				return err
-			}
-		}
-	} else {
-		// Remove all snapshots
-		err := containerDeleteSnapshots(c.state, c.Project(), c.Name())
-		if err != nil {
-			logger.Warn("Failed to delete snapshots", log.Ctx{"name": c.Name(), "err": err})
-			return err
-		}
-
-		// Remove all backups
-		backups, err := c.Backups()
-		if err != nil {
-			return err
-		}
-
-		for _, backup := range backups {
-			err = backup.Delete()
-			if err != nil {
-				return err
-			}
-		}
-
-		// Clean things up
-		c.cleanup()
-
-		// Delete the container from disk
-		if c.storage != nil && !isImport {
-			_, poolName, _ := c.storage.GetContainerPoolInfo()
-			containerMountPoint := driver.GetContainerMountPoint(c.Project(), poolName, c.Name())
-			if shared.PathExists(c.Path()) ||
-				shared.PathExists(containerMountPoint) {
-				err := c.storage.ContainerDelete(c)
-				if err != nil {
-					logger.Error("Failed deleting container storage", log.Ctx{"name": c.Name(), "err": err})
-					return err
-				}
-			}
-		}
-
-		// Delete the MAAS entry
-		err = c.maasDelete()
-		if err != nil {
-			logger.Error("Failed deleting container MAAS record", log.Ctx{"name": c.Name(), "err": err})
-			return err
-		}
-
-		// Remove devices from container.
-		for k, m := range c.expandedDevices {
-			err = c.deviceRemove(k, m)
-			if err != nil && err != device.ErrUnsupportedDevType {
-				return errors.Wrapf(err, "Failed to remove device '%s'", k)
-			}
-		}
-	}
-
-	// Remove the database record
-	if err := c.state.Cluster.ContainerRemove(c.project, c.Name()); err != nil {
-		logger.Error("Failed deleting container entry", log.Ctx{"name": c.Name(), "err": err})
-		return err
-	}
-
-	// Remove the database entry for the pool device
-	if c.storage != nil {
-		// Get the name of the storage pool the container is attached to. This
-		// reverse-engineering works because container names are globally
-		// unique.
-		poolID, _, _ := c.storage.GetContainerPoolInfo()
-
-		// Remove volume from storage pool.
-		err := c.state.Cluster.StoragePoolVolumeDelete(c.Project(), c.Name(), storagePoolVolumeTypeContainer, poolID)
-		if err != nil {
-			return err
-		}
-	}
-
-	logger.Info("Deleted container", ctxMap)
-
-	if c.IsSnapshot() {
-		eventSendLifecycle(c.project, "container-snapshot-deleted",
-			fmt.Sprintf("/1.0/containers/%s", c.name), map[string]interface{}{
-				"snapshot_name": c.name,
-			})
-	} else {
-		eventSendLifecycle(c.project, "container-deleted",
-			fmt.Sprintf("/1.0/containers/%s", c.name), nil)
-	}
-
-	return nil
-}
-
-func (c *containerLXC) Rename(newName string) error {
-	oldName := c.Name()
-	ctxMap := log.Ctx{
-		"project":   c.project,
-		"name":      c.name,
-		"created":   c.creationDate,
-		"ephemeral": c.ephemeral,
-		"used":      c.lastUsedDate,
-		"newname":   newName}
-
-	logger.Info("Renaming container", ctxMap)
-
-	// Initialize storage interface for the container.
-	err := c.initStorage()
-	if err != nil {
-		return err
-	}
-
-	// Sanity checks
-	if !c.IsSnapshot() && !shared.ValidHostname(newName) {
-		return fmt.Errorf("Invalid container name")
-	}
-
-	if c.IsRunning() {
-		return fmt.Errorf("Renaming of running container not allowed")
-	}
-
-	// Clean things up
-	c.cleanup()
-
-	// Rename the MAAS entry
-	if !c.IsSnapshot() {
-		err = c.maasRename(newName)
-		if err != nil {
-			return err
-		}
-	}
-
-	// Rename the logging path
-	os.RemoveAll(shared.LogPath(newName))
-	if shared.PathExists(c.LogPath()) {
-		err := os.Rename(c.LogPath(), shared.LogPath(newName))
-		if err != nil {
-			logger.Error("Failed renaming container", ctxMap)
-			return err
-		}
-	}
-
-	// Rename the storage entry
-	if c.IsSnapshot() {
-		err := c.storage.ContainerSnapshotRename(c, newName)
-		if err != nil {
-			logger.Error("Failed renaming container", ctxMap)
-			return err
-		}
-	} else {
-		err := c.storage.ContainerRename(c, newName)
-		if err != nil {
-			logger.Error("Failed renaming container", ctxMap)
-			return err
-		}
-	}
-
-	// Rename the backups
-	backups, err := c.Backups()
-	if err != nil {
-		return err
-	}
-
-	for _, backup := range backups {
-		backupName := strings.Split(backup.name, "/")[1]
-		newName := fmt.Sprintf("%s/%s", newName, backupName)
-
-		err = backup.Rename(newName)
-		if err != nil {
-			return err
-		}
-	}
-
-	poolID, _, _ := c.storage.GetContainerPoolInfo()
-
-	if !c.IsSnapshot() {
-		// Rename all the snapshots
-		results, err := c.state.Cluster.ContainerGetSnapshots(c.project, oldName)
-		if err != nil {
-			logger.Error("Failed to get container snapshots", ctxMap)
-			return err
-		}
-
-		for _, sname := range results {
-			// Rename the snapshot
-			oldSnapName := strings.SplitN(sname, shared.SnapshotDelimiter, 2)[1]
-			baseSnapName := filepath.Base(sname)
-			newSnapshotName := newName + shared.SnapshotDelimiter + baseSnapName
-			err := c.state.Cluster.Transaction(func(tx *db.ClusterTx) error {
-				return tx.InstanceSnapshotRename(c.project, oldName, oldSnapName, baseSnapName)
-			})
-			if err != nil {
-				logger.Error("Failed renaming snapshot", ctxMap)
-				return err
-			}
-
-			// Rename storage volume for the snapshot.
-			err = c.state.Cluster.StoragePoolVolumeRename(c.project, sname, newSnapshotName, storagePoolVolumeTypeContainer, poolID)
-			if err != nil {
-				logger.Error("Failed renaming storage volume", ctxMap)
-				return err
-			}
-		}
-	}
-
-	// Rename the database entry
-	err = c.state.Cluster.Transaction(func(tx *db.ClusterTx) error {
-		if c.IsSnapshot() {
-			oldParts := strings.SplitN(oldName, shared.SnapshotDelimiter, 2)
-			newParts := strings.SplitN(newName, shared.SnapshotDelimiter, 2)
-			return tx.InstanceSnapshotRename(c.project, oldParts[0], oldParts[1], newParts[1])
-		} else {
-			return tx.InstanceRename(c.project, oldName, newName)
-		}
-	})
-	if err != nil {
-		logger.Error("Failed renaming container", ctxMap)
-		return err
-	}
-
-	// Rename storage volume for the container.
-	err = c.state.Cluster.StoragePoolVolumeRename(c.project, oldName, newName, storagePoolVolumeTypeContainer, poolID)
-	if err != nil {
-		logger.Error("Failed renaming storage volume", ctxMap)
-		return err
-	}
-
-	// Set the new name in the struct
-	c.name = newName
-
-	// Update the storage volume name in the storage interface.
-	sNew := c.storage.GetStoragePoolVolumeWritable()
-	c.storage.SetStoragePoolVolumeWritable(&sNew)
-
-	// Invalidate the go-lxc cache
-	if c.c != nil {
-		c.c.Release()
-		c.c = nil
-	}
-
-	c.cConfig = false
-
-	// Update lease files
-	networkUpdateStatic(c.state, "")
-
-	logger.Info("Renamed container", ctxMap)
-
-	if c.IsSnapshot() {
-		eventSendLifecycle(c.project, "container-snapshot-renamed",
-			fmt.Sprintf("/1.0/containers/%s", oldName), map[string]interface{}{
-				"new_name":      newName,
-				"snapshot_name": oldName,
-			})
-	} else {
-		eventSendLifecycle(c.project, "container-renamed",
-			fmt.Sprintf("/1.0/containers/%s", oldName), map[string]interface{}{
-				"new_name": newName,
-			})
-	}
-
-	return nil
-}
-
-func (c *containerLXC) CGroupGet(key string) (string, error) {
-	// Load the go-lxc struct
-	err := c.initLXC(false)
-	if err != nil {
-		return "", err
-	}
-
-	// Make sure the container is running
-	if !c.IsRunning() {
-		return "", fmt.Errorf("Can't get cgroups on a stopped container")
-	}
-
-	value := c.c.CgroupItem(key)
-	return strings.Join(value, "\n"), nil
-}
-
-func (c *containerLXC) CGroupSet(key string, value string) error {
-	// Load the go-lxc struct
-	err := c.initLXC(false)
-	if err != nil {
-		return err
-	}
-
-	// Make sure the container is running
-	if !c.IsRunning() {
-		return fmt.Errorf("Can't set cgroups on a stopped container")
-	}
-
-	err = c.c.SetCgroupItem(key, value)
-	if err != nil {
-		return fmt.Errorf("Failed to set cgroup %s=\"%s\": %s", key, value, err)
-	}
-
-	return nil
-}
-
-func (c *containerLXC) VolatileSet(changes map[string]string) error {
-	// Sanity check
-	for key := range changes {
-		if !strings.HasPrefix(key, "volatile.") {
-			return fmt.Errorf("Only volatile keys can be modified with VolatileSet")
-		}
-	}
-
-	// Update the database
-	var err error
-	if c.IsSnapshot() {
-		err = c.state.Cluster.Transaction(func(tx *db.ClusterTx) error {
-			return tx.InstanceSnapshotConfigUpdate(c.id, changes)
-		})
-	} else {
-		err = c.state.Cluster.Transaction(func(tx *db.ClusterTx) error {
-			return tx.ContainerConfigUpdate(c.id, changes)
-		})
-	}
-	if err != nil {
-		return errors.Wrap(err, "Failed to volatile config")
-	}
-
-	// Apply the change locally
-	for key, value := range changes {
-		if value == "" {
-			delete(c.expandedConfig, key)
-			delete(c.localConfig, key)
-			continue
-		}
-
-		c.expandedConfig[key] = value
-		c.localConfig[key] = value
-	}
-
-	return nil
-}
-
-type backupFile struct {
-	Container *api.Instance           `yaml:"container"`
-	Snapshots []*api.InstanceSnapshot `yaml:"snapshots"`
-	Pool      *api.StoragePool        `yaml:"pool"`
-	Volume    *api.StorageVolume      `yaml:"volume"`
-}
-
-func writeBackupFile(c Instance) error {
-	// We only write backup files out for actual containers
-	if c.IsSnapshot() {
-		return nil
-	}
-
-	// Immediately return if the container directory doesn't exist yet
-	if !shared.PathExists(c.Path()) {
-		return os.ErrNotExist
-	}
-
-	// Generate the YAML
-	ci, _, err := c.Render()
-	if err != nil {
-		return errors.Wrap(err, "Failed to render container metadata")
-	}
-
-	snapshots, err := c.Snapshots()
-	if err != nil {
-		return errors.Wrap(err, "Failed to get snapshots")
-	}
-
-	var sis []*api.InstanceSnapshot
-
-	for _, s := range snapshots {
-		si, _, err := s.Render()
-		if err != nil {
-			return err
-		}
-
-		sis = append(sis, si.(*api.InstanceSnapshot))
-	}
-
-	poolName, err := c.StoragePool()
-	if err != nil {
-		return err
-	}
-
-	s := c.DaemonState()
-	poolID, pool, err := s.Cluster.StoragePoolGet(poolName)
-	if err != nil {
-		return err
-	}
-
-	_, volume, err := s.Cluster.StoragePoolNodeVolumeGetTypeByProject(c.Project(), c.Name(), storagePoolVolumeTypeContainer, poolID)
-	if err != nil {
-		return err
-	}
-
-	data, err := yaml.Marshal(&backupFile{
-		Container: ci.(*api.Instance),
-		Snapshots: sis,
-		Pool:      pool,
-		Volume:    volume,
-	})
-	if err != nil {
-		return err
-	}
-
-	// Ensure the container is currently mounted
-	if !shared.PathExists(c.RootfsPath()) {
-		logger.Debug("Unable to update backup.yaml at this time", log.Ctx{"name": c.Name(), "project": c.Project()})
-		return nil
-	}
-
-	// Write the YAML
-	f, err := os.Create(filepath.Join(c.Path(), "backup.yaml"))
-	if err != nil {
-		return err
-	}
-	defer f.Close()
-
-	err = f.Chmod(0400)
-	if err != nil {
-		return err
-	}
-
-	err = shared.WriteAll(f, data)
-	if err != nil {
-		return err
-	}
-
-	return nil
-}
-
-func (c *containerLXC) Update(args db.ContainerArgs, userRequested bool) error {
-	// Set sane defaults for unset keys
-	if args.Project == "" {
-		args.Project = "default"
-	}
-
-	if args.Architecture == 0 {
-		args.Architecture = c.architecture
-	}
-
-	if args.Config == nil {
-		args.Config = map[string]string{}
-	}
-
-	if args.Devices == nil {
-		args.Devices = config.Devices{}
-	}
-
-	if args.Profiles == nil {
-		args.Profiles = []string{}
-	}
-
-	// Validate the new config
-	err := containerValidConfig(c.state.OS, args.Config, false, false)
-	if err != nil {
-		return errors.Wrap(err, "Invalid config")
-	}
-
-	// Validate the new devices without using expanded devices validation (expensive checks disabled).
-	err = containerValidDevices(c.state, c.state.Cluster, c.Name(), args.Devices, false)
-	if err != nil {
-		return errors.Wrap(err, "Invalid devices")
-	}
-
-	// Validate the new profiles
-	profiles, err := c.state.Cluster.Profiles(args.Project)
-	if err != nil {
-		return errors.Wrap(err, "Failed to get profiles")
-	}
-
-	checkedProfiles := []string{}
-	for _, profile := range args.Profiles {
-		if !shared.StringInSlice(profile, profiles) {
-			return fmt.Errorf("Requested profile '%s' doesn't exist", profile)
-		}
-
-		if shared.StringInSlice(profile, checkedProfiles) {
-			return fmt.Errorf("Duplicate profile found in request")
-		}
-
-		checkedProfiles = append(checkedProfiles, profile)
-	}
-
-	// Validate the new architecture
-	if args.Architecture != 0 {
-		_, err = osarch.ArchitectureName(args.Architecture)
-		if err != nil {
-			return fmt.Errorf("Invalid architecture id: %s", err)
-		}
-	}
-
-	// Check that volatile and image keys weren't modified
-	if userRequested {
-		for k, v := range args.Config {
-			if strings.HasPrefix(k, "volatile.") && c.localConfig[k] != v {
-				return fmt.Errorf("Volatile keys are read-only")
-			}
-
-			if strings.HasPrefix(k, "image.") && c.localConfig[k] != v {
-				return fmt.Errorf("Image keys are read-only")
-			}
-		}
-
-		for k, v := range c.localConfig {
-			if strings.HasPrefix(k, "volatile.") && args.Config[k] != v {
-				return fmt.Errorf("Volatile keys are read-only")
-			}
-
-			if strings.HasPrefix(k, "image.") && args.Config[k] != v {
-				return fmt.Errorf("Image keys are read-only")
-			}
-		}
-	}
-
-	// Get a copy of the old configuration
-	oldDescription := c.Description()
-	oldArchitecture := 0
-	err = shared.DeepCopy(&c.architecture, &oldArchitecture)
-	if err != nil {
-		return err
-	}
-
-	oldEphemeral := false
-	err = shared.DeepCopy(&c.ephemeral, &oldEphemeral)
-	if err != nil {
-		return err
-	}
-
-	oldExpandedDevices := config.Devices{}
-	err = shared.DeepCopy(&c.expandedDevices, &oldExpandedDevices)
-	if err != nil {
-		return err
-	}
-
-	oldExpandedConfig := map[string]string{}
-	err = shared.DeepCopy(&c.expandedConfig, &oldExpandedConfig)
-	if err != nil {
-		return err
-	}
-
-	oldLocalDevices := config.Devices{}
-	err = shared.DeepCopy(&c.localDevices, &oldLocalDevices)
-	if err != nil {
-		return err
-	}
-
-	oldLocalConfig := map[string]string{}
-	err = shared.DeepCopy(&c.localConfig, &oldLocalConfig)
-	if err != nil {
-		return err
-	}
-
-	oldProfiles := []string{}
-	err = shared.DeepCopy(&c.profiles, &oldProfiles)
-	if err != nil {
-		return err
-	}
-
-	oldExpiryDate := c.expiryDate
-
-	// Define a function which reverts everything.  Defer this function
-	// so that it doesn't need to be explicitly called in every failing
-	// return path.  Track whether or not we want to undo the changes
-	// using a closure.
-	undoChanges := true
-	defer func() {
-		if undoChanges {
-			c.description = oldDescription
-			c.architecture = oldArchitecture
-			c.ephemeral = oldEphemeral
-			c.expandedConfig = oldExpandedConfig
-			c.expandedDevices = oldExpandedDevices
-			c.localConfig = oldLocalConfig
-			c.localDevices = oldLocalDevices
-			c.profiles = oldProfiles
-			c.expiryDate = oldExpiryDate
-			if c.c != nil {
-				c.c.Release()
-				c.c = nil
-			}
-			c.cConfig = false
-			c.initLXC(true)
-			deviceTaskSchedulerTrigger("container", c.name, "changed")
-		}
-	}()
-
-	// Apply the various changes
-	c.description = args.Description
-	c.architecture = args.Architecture
-	c.ephemeral = args.Ephemeral
-	c.localConfig = args.Config
-	c.localDevices = args.Devices
-	c.profiles = args.Profiles
-	c.expiryDate = args.ExpiryDate
-
-	// Expand the config and refresh the LXC config
-	err = c.expandConfig(nil)
-	if err != nil {
-		return errors.Wrap(err, "Expand config")
-	}
-
-	err = c.expandDevices(nil)
-	if err != nil {
-		return errors.Wrap(err, "Expand devices")
-	}
-
-	// Diff the configurations
-	changedConfig := []string{}
-	for key := range oldExpandedConfig {
-		if oldExpandedConfig[key] != c.expandedConfig[key] {
-			if !shared.StringInSlice(key, changedConfig) {
-				changedConfig = append(changedConfig, key)
-			}
-		}
-	}
-
-	for key := range c.expandedConfig {
-		if oldExpandedConfig[key] != c.expandedConfig[key] {
-			if !shared.StringInSlice(key, changedConfig) {
-				changedConfig = append(changedConfig, key)
-			}
-		}
-	}
-
-	// Diff the devices
-	removeDevices, addDevices, updateDevices, updateDiff := oldExpandedDevices.Update(c.expandedDevices, func(oldDevice config.Device, newDevice config.Device) []string {
-		// This function needs to return a list of fields that are excluded from differences
-		// between oldDevice and newDevice. The result of this is that as long as the
-		// devices are otherwise identical except for the fields returned here, then the
-		// device is considered to be being "updated" rather than "added & removed".
-		if oldDevice["type"] != newDevice["type"] || oldDevice["nictype"] != newDevice["nictype"] {
-			return []string{} // Device types aren't the same, so this cannot be an update.
-		}
-
-		d, err := device.New(c, c.state, "", newDevice, nil, nil)
-		if err != nil {
-			return []string{} // Couldn't create Device, so this cannot be an update.
-		}
-
-		_, updateFields := d.CanHotPlug()
-		return updateFields
-	})
-
-	// Do some validation of the config diff
-	err = containerValidConfig(c.state.OS, c.expandedConfig, false, true)
-	if err != nil {
-		return errors.Wrap(err, "Invalid expanded config")
-	}
-
-	// Do full expanded validation of the devices diff.
-	err = containerValidDevices(c.state, c.state.Cluster, c.Name(), c.expandedDevices, true)
-	if err != nil {
-		return errors.Wrap(err, "Invalid expanded devices")
-	}
-
-	// Run through initLXC to catch anything we missed
-	if c.c != nil {
-		c.c.Release()
-		c.c = nil
-	}
-	c.cConfig = false
-	err = c.initLXC(true)
-	if err != nil {
-		return errors.Wrap(err, "Initialize LXC")
-	}
-
-	// Initialize storage interface for the container.
-	err = c.initStorage()
-	if err != nil {
-		return errors.Wrap(err, "Initialize storage")
-	}
-
-	// If apparmor changed, re-validate the apparmor profile
-	if shared.StringInSlice("raw.apparmor", changedConfig) || shared.StringInSlice("security.nesting", changedConfig) {
-		err = AAParseProfile(c)
-		if err != nil {
-			return errors.Wrap(err, "Parse AppArmor profile")
-		}
-	}
-
-	if shared.StringInSlice("security.idmap.isolated", changedConfig) || shared.StringInSlice("security.idmap.base", changedConfig) || shared.StringInSlice("security.idmap.size", changedConfig) || shared.StringInSlice("raw.idmap", changedConfig) || shared.StringInSlice("security.privileged", changedConfig) {
-		var idmap *idmap.IdmapSet
-		base := int64(0)
-		if !c.IsPrivileged() {
-			// update the idmap
-			idmap, base, err = findIdmap(
-				c.state,
-				c.Name(),
-				c.expandedConfig["security.idmap.isolated"],
-				c.expandedConfig["security.idmap.base"],
-				c.expandedConfig["security.idmap.size"],
-				c.expandedConfig["raw.idmap"],
-			)
-			if err != nil {
-				return errors.Wrap(err, "Failed to get ID map")
-			}
-		}
-
-		var jsonIdmap string
-		if idmap != nil {
-			idmapBytes, err := json.Marshal(idmap.Idmap)
-			if err != nil {
-				return err
-			}
-			jsonIdmap = string(idmapBytes)
-		} else {
-			jsonIdmap = "[]"
-		}
-		c.localConfig["volatile.idmap.next"] = jsonIdmap
-		c.localConfig["volatile.idmap.base"] = fmt.Sprintf("%v", base)
-
-		// Invalid idmap cache
-		c.idmapset = nil
-	}
-
-	// Update MAAS
-	updateMAAS := false
-	for _, key := range []string{"maas.subnet.ipv4", "maas.subnet.ipv6", "ipv4.address", "ipv6.address"} {
-		if shared.StringInSlice(key, updateDiff) {
-			updateMAAS = true
-			break
-		}
-	}
-
-	if !c.IsSnapshot() && updateMAAS {
-		err = c.maasUpdate(oldExpandedDevices.CloneNative())
-		if err != nil {
-			return err
-		}
-	}
-
-	// Use the device interface to apply update changes.
-	err = c.updateDevices(removeDevices, addDevices, updateDevices, oldExpandedDevices)
-	if err != nil {
-		return err
-	}
-
-	// Apply the live changes
-	isRunning := c.IsRunning()
-	if isRunning {
-		// Live update the container config
-		for _, key := range changedConfig {
-			value := c.expandedConfig[key]
-
-			if key == "raw.apparmor" || key == "security.nesting" {
-				// Update the AppArmor profile
-				err = AALoadProfile(c)
-				if err != nil {
-					return err
-				}
-			} else if key == "security.devlxd" {
-				if value == "" || shared.IsTrue(value) {
-					err = c.insertMount(shared.VarPath("devlxd"), "/dev/lxd", "none", unix.MS_BIND, false)
-					if err != nil {
-						return err
-					}
-				} else if c.FileExists("/dev/lxd") == nil {
-					err = c.removeMount("/dev/lxd")
-					if err != nil {
-						return err
-					}
-
-					err = c.FileRemove("/dev/lxd")
-					if err != nil {
-						return err
-					}
-				}
-			} else if key == "linux.kernel_modules" && value != "" {
-				for _, module := range strings.Split(value, ",") {
-					module = strings.TrimPrefix(module, " ")
-					err := util.LoadModule(module)
-					if err != nil {
-						return fmt.Errorf("Failed to load kernel module '%s': %s", module, err)
-					}
-				}
-			} else if key == "limits.disk.priority" {
-				if !c.state.OS.CGroupBlkioController {
-					continue
-				}
-
-				priorityInt := 5
-				diskPriority := c.expandedConfig["limits.disk.priority"]
-				if diskPriority != "" {
-					priorityInt, err = strconv.Atoi(diskPriority)
-					if err != nil {
-						return err
-					}
-				}
-
-				// Minimum valid value is 10
-				priority := priorityInt * 100
-				if priority == 0 {
-					priority = 10
-				}
-
-				err = c.CGroupSet("blkio.weight", fmt.Sprintf("%d", priority))
-				if err != nil {
-					return err
-				}
-			} else if key == "limits.memory" || strings.HasPrefix(key, "limits.memory.") {
-				// Skip if no memory CGroup
-				if !c.state.OS.CGroupMemoryController {
-					continue
-				}
-
-				// Set the new memory limit
-				memory := c.expandedConfig["limits.memory"]
-				memoryEnforce := c.expandedConfig["limits.memory.enforce"]
-				memorySwap := c.expandedConfig["limits.memory.swap"]
-
-				// Parse memory
-				if memory == "" {
-					memory = "-1"
-				} else if strings.HasSuffix(memory, "%") {
-					percent, err := strconv.ParseInt(strings.TrimSuffix(memory, "%"), 10, 64)
-					if err != nil {
-						return err
-					}
-
-					memoryTotal, err := shared.DeviceTotalMemory()
-					if err != nil {
-						return err
-					}
-
-					memory = fmt.Sprintf("%d", int64((memoryTotal/100)*percent))
-				} else {
-					valueInt, err := units.ParseByteSizeString(memory)
-					if err != nil {
-						return err
-					}
-					memory = fmt.Sprintf("%d", valueInt)
-				}
-
-				// Store the old values for revert
-				oldMemswLimit := ""
-				if c.state.OS.CGroupSwapAccounting {
-					oldMemswLimit, err = c.CGroupGet("memory.memsw.limit_in_bytes")
-					if err != nil {
-						oldMemswLimit = ""
-					}
-				}
-
-				oldLimit, err := c.CGroupGet("memory.limit_in_bytes")
-				if err != nil {
-					oldLimit = ""
-				}
-
-				oldSoftLimit, err := c.CGroupGet("memory.soft_limit_in_bytes")
-				if err != nil {
-					oldSoftLimit = ""
-				}
-
-				revertMemory := func() {
-					if oldSoftLimit != "" {
-						c.CGroupSet("memory.soft_limit_in_bytes", oldSoftLimit)
-					}
-
-					if oldLimit != "" {
-						c.CGroupSet("memory.limit_in_bytes", oldLimit)
-					}
-
-					if oldMemswLimit != "" {
-						c.CGroupSet("memory.memsw.limit_in_bytes", oldMemswLimit)
-					}
-				}
-
-				// Reset everything
-				if c.state.OS.CGroupSwapAccounting {
-					err = c.CGroupSet("memory.memsw.limit_in_bytes", "-1")
-					if err != nil {
-						revertMemory()
-						return err
-					}
-				}
-
-				err = c.CGroupSet("memory.limit_in_bytes", "-1")
-				if err != nil {
-					revertMemory()
-					return err
-				}
-
-				err = c.CGroupSet("memory.soft_limit_in_bytes", "-1")
-				if err != nil {
-					revertMemory()
-					return err
-				}
-
-				// Set the new values
-				if memoryEnforce == "soft" {
-					// Set new limit
-					err = c.CGroupSet("memory.soft_limit_in_bytes", memory)
-					if err != nil {
-						revertMemory()
-						return err
-					}
-				} else {
-					if c.state.OS.CGroupSwapAccounting && (memorySwap == "" || shared.IsTrue(memorySwap)) {
-						err = c.CGroupSet("memory.limit_in_bytes", memory)
-						if err != nil {
-							revertMemory()
-							return err
-						}
-
-						err = c.CGroupSet("memory.memsw.limit_in_bytes", memory)
-						if err != nil {
-							revertMemory()
-							return err
-						}
-					} else {
-						err = c.CGroupSet("memory.limit_in_bytes", memory)
-						if err != nil {
-							revertMemory()
-							return err
-						}
-					}
-
-					// Set soft limit to value 10% less than hard limit
-					valueInt, err := strconv.ParseInt(memory, 10, 64)
-					if err != nil {
-						revertMemory()
-						return err
-					}
-
-					err = c.CGroupSet("memory.soft_limit_in_bytes", fmt.Sprintf("%.0f", float64(valueInt)*0.9))
-					if err != nil {
-						revertMemory()
-						return err
-					}
-				}
-
-				// Configure the swappiness
-				if key == "limits.memory.swap" || key == "limits.memory.swap.priority" {
-					memorySwap := c.expandedConfig["limits.memory.swap"]
-					memorySwapPriority := c.expandedConfig["limits.memory.swap.priority"]
-					if memorySwap != "" && !shared.IsTrue(memorySwap) {
-						err = c.CGroupSet("memory.swappiness", "0")
-						if err != nil {
-							return err
-						}
-					} else {
-						priority := 0
-						if memorySwapPriority != "" {
-							priority, err = strconv.Atoi(memorySwapPriority)
-							if err != nil {
-								return err
-							}
-						}
-
-						err = c.CGroupSet("memory.swappiness", fmt.Sprintf("%d", 60-10+priority))
-						if err != nil {
-							return err
-						}
-					}
-				}
-			} else if key == "limits.network.priority" {
-				err := c.setNetworkPriority()
-				if err != nil {
-					return err
-				}
-			} else if key == "limits.cpu" {
-				// Trigger a scheduler re-run
-				deviceTaskSchedulerTrigger("container", c.name, "changed")
-			} else if key == "limits.cpu.priority" || key == "limits.cpu.allowance" {
-				// Skip if no cpu CGroup
-				if !c.state.OS.CGroupCPUController {
-					continue
-				}
-
-				// Apply new CPU limits
-				cpuShares, cpuCfsQuota, cpuCfsPeriod, err := deviceParseCPU(c.expandedConfig["limits.cpu.allowance"], c.expandedConfig["limits.cpu.priority"])
-				if err != nil {
-					return err
-				}
-
-				err = c.CGroupSet("cpu.shares", cpuShares)
-				if err != nil {
-					return err
-				}
-
-				err = c.CGroupSet("cpu.cfs_period_us", cpuCfsPeriod)
-				if err != nil {
-					return err
-				}
-
-				err = c.CGroupSet("cpu.cfs_quota_us", cpuCfsQuota)
-				if err != nil {
-					return err
-				}
-			} else if key == "limits.processes" {
-				if !c.state.OS.CGroupPidsController {
-					continue
-				}
-
-				if value == "" {
-					err = c.CGroupSet("pids.max", "max")
-					if err != nil {
-						return err
-					}
-				} else {
-					valueInt, err := strconv.ParseInt(value, 10, 64)
-					if err != nil {
-						return err
-					}
-
-					err = c.CGroupSet("pids.max", fmt.Sprintf("%d", valueInt))
-					if err != nil {
-						return err
-					}
-				}
-			}
-		}
-	}
-
-	// Finally, apply the changes to the database
-	err = query.Retry(func() error {
-		tx, err := c.state.Cluster.Begin()
-		if err != nil {
-			return err
-		}
-
-		// Snapshots should update only their descriptions and expiry date.
-		if c.IsSnapshot() {
-			err = db.InstanceSnapshotUpdate(tx, c.id, c.description, c.expiryDate)
-			if err != nil {
-				tx.Rollback()
-				return errors.Wrap(err, "Snapshot update")
-			}
-		} else {
-			err = db.ContainerConfigClear(tx, c.id)
-			if err != nil {
-				tx.Rollback()
-				return err
-
-			}
-			err = db.ContainerConfigInsert(tx, c.id, c.localConfig)
-			if err != nil {
-				tx.Rollback()
-				return errors.Wrap(err, "Config insert")
-			}
-
-			err = db.ContainerProfilesInsert(tx, c.id, c.project, c.profiles)
-			if err != nil {
-				tx.Rollback()
-				return errors.Wrap(err, "Profiles insert")
-			}
-
-			err = db.DevicesAdd(tx, "instance", int64(c.id), c.localDevices)
-			if err != nil {
-				tx.Rollback()
-				return errors.Wrap(err, "Device add")
-			}
-
-			err = db.ContainerUpdate(tx, c.id, c.description, c.architecture, c.ephemeral, c.expiryDate)
-			if err != nil {
-				tx.Rollback()
-				return errors.Wrap(err, "Container update")
-			}
-
-		}
-
-		if err := db.TxCommit(tx); err != nil {
-			return err
-		}
-		return nil
-	})
-	if err != nil {
-		return errors.Wrap(err, "Failed to update database")
-	}
-
-	/* we can call Update in some cases when the directory doesn't exist
-	 * yet before container creation; this is okay, because at the end of
-	 * container creation we write the backup file, so let's not worry about
-	 * ENOENT. */
-	if c.storage.ContainerStorageReady(c) {
-		err := writeBackupFile(c)
-		if err != nil && !os.IsNotExist(err) {
-			return errors.Wrap(err, "Failed to write backup file")
-		}
-	}
-
-	// Send devlxd notifications
-	if isRunning {
-		// Config changes (only for user.* keys
-		for _, key := range changedConfig {
-			if !strings.HasPrefix(key, "user.") {
-				continue
-			}
-
-			msg := map[string]string{
-				"key":       key,
-				"old_value": oldExpandedConfig[key],
-				"value":     c.expandedConfig[key],
-			}
-
-			err = devlxdEventSend(c, "config", msg)
-			if err != nil {
-				return err
-			}
-		}
-
-		// Device changes
-		for k, m := range removeDevices {
-			msg := map[string]interface{}{
-				"action": "removed",
-				"name":   k,
-				"config": m,
-			}
-
-			err = devlxdEventSend(c, "device", msg)
-			if err != nil {
-				return err
-			}
-		}
-
-		for k, m := range updateDevices {
-			msg := map[string]interface{}{
-				"action": "updated",
-				"name":   k,
-				"config": m,
-			}
-
-			err = devlxdEventSend(c, "device", msg)
-			if err != nil {
-				return err
-			}
-		}
-
-		for k, m := range addDevices {
-			msg := map[string]interface{}{
-				"action": "added",
-				"name":   k,
-				"config": m,
-			}
-
-			err = devlxdEventSend(c, "device", msg)
-			if err != nil {
-				return err
-			}
-		}
-	}
-
-	// Success, update the closure to mark that the changes should be kept.
-	undoChanges = false
-
-	var endpoint string
-
-	if c.IsSnapshot() {
-		cName, sName, _ := shared.ContainerGetParentAndSnapshotName(c.name)
-		endpoint = fmt.Sprintf("/1.0/containers/%s/snapshots/%s", cName, sName)
-	} else {
-		endpoint = fmt.Sprintf("/1.0/containers/%s", c.name)
-	}
-
-	eventSendLifecycle(c.project, "container-updated", endpoint, nil)
-
-	return nil
-}
-
-func (c *containerLXC) updateDevices(removeDevices config.Devices, addDevices config.Devices, updateDevices config.Devices, oldExpandedDevices config.Devices) error {
-	isRunning := c.IsRunning()
-
-	// Remove devices in reverse order to how they were added.
-	for _, dev := range removeDevices.Reversed() {
-		if isRunning {
-			err := c.deviceStop(dev.Name, dev.Config, "")
-			if err == device.ErrUnsupportedDevType {
-				continue // No point in trying to remove device below.
-			} else if err != nil {
-				return errors.Wrapf(err, "Failed to stop device '%s'", dev.Name)
-			}
-		}
-
-		err := c.deviceRemove(dev.Name, dev.Config)
-		if err != nil && err != device.ErrUnsupportedDevType {
-			return errors.Wrapf(err, "Failed to remove device '%s'", dev.Name)
-		}
-
-		// Check whether we are about to add the same device back with updated config and
-		// if not, or if the device type has changed, then remove all volatile keys for
-		// this device (as its an actual removal or a device type change).
-		err = c.deviceResetVolatile(dev.Name, dev.Config, addDevices[dev.Name])
-		if err != nil {
-			return errors.Wrapf(err, "Failed to reset volatile data for device '%s'", dev.Name)
-		}
-	}
-
-	// Add devices in sorted order, this ensures that device mounts are added in path order.
-	for _, dev := range addDevices.Sorted() {
-		err := c.deviceAdd(dev.Name, dev.Config)
-		if err == device.ErrUnsupportedDevType {
-			continue // No point in trying to start device below.
-		} else if err != nil {
-			return errors.Wrapf(err, "Failed to add device '%s'", dev.Name)
-		}
-
-		if isRunning {
-			_, err := c.deviceStart(dev.Name, dev.Config, isRunning)
-			if err != nil && err != device.ErrUnsupportedDevType {
-				return errors.Wrapf(err, "Failed to start device '%s'", dev.Name)
-			}
-		}
-	}
-
-	for _, dev := range updateDevices.Sorted() {
-		err := c.deviceUpdate(dev.Name, dev.Config, oldExpandedDevices, isRunning)
-		if err != nil && err != device.ErrUnsupportedDevType {
-			return errors.Wrapf(err, "Failed to update device '%s'", dev.Name)
-		}
-	}
-
-	return nil
-}
-
-func (c *containerLXC) Export(w io.Writer, properties map[string]string) error {
-	ctxMap := log.Ctx{
-		"project":   c.project,
-		"name":      c.name,
-		"created":   c.creationDate,
-		"ephemeral": c.ephemeral,
-		"used":      c.lastUsedDate}
-
-	if c.IsRunning() {
-		return fmt.Errorf("Cannot export a running container as an image")
-	}
-
-	logger.Info("Exporting container", ctxMap)
-
-	// Start the storage
-	ourStart, err := c.StorageStart()
-	if err != nil {
-		logger.Error("Failed exporting container", ctxMap)
-		return err
-	}
-	if ourStart {
-		defer c.StorageStop()
-	}
-
-	// Unshift the container
-	idmap, err := c.DiskIdmap()
-	if err != nil {
-		logger.Error("Failed exporting container", ctxMap)
-		return err
-	}
-
-	if idmap != nil {
-		if !c.IsSnapshot() && shared.IsTrue(c.expandedConfig["security.protection.shift"]) {
-			return fmt.Errorf("Container is protected against filesystem shifting")
-		}
-
-		var err error
-
-		if c.Storage().GetStorageType() == storageTypeZfs {
-			err = idmap.UnshiftRootfs(c.RootfsPath(), zfsIdmapSetSkipper)
-		} else if c.Storage().GetStorageType() == storageTypeBtrfs {
-			err = UnshiftBtrfsRootfs(c.RootfsPath(), idmap)
-		} else {
-			err = idmap.UnshiftRootfs(c.RootfsPath(), nil)
-		}
-		if err != nil {
-			logger.Error("Failed exporting container", ctxMap)
-			return err
-		}
-
-		if c.Storage().GetStorageType() == storageTypeZfs {
-			defer idmap.ShiftRootfs(c.RootfsPath(), zfsIdmapSetSkipper)
-		} else if c.Storage().GetStorageType() == storageTypeBtrfs {
-			defer ShiftBtrfsRootfs(c.RootfsPath(), idmap)
-		} else {
-			defer idmap.ShiftRootfs(c.RootfsPath(), nil)
-		}
-	}
-
-	// Create the tarball
-	ctw := containerwriter.NewContainerTarWriter(w, idmap)
-
-	// Keep track of the first path we saw for each path with nlink>1
-	cDir := c.Path()
-
-	// Path inside the tar image is the pathname starting after cDir
-	offset := len(cDir) + 1
-
-	writeToTar := func(path string, fi os.FileInfo, err error) error {
-		if err != nil {
-			return err
-		}
-
-		err = ctw.WriteFile(offset, path, fi)
-		if err != nil {
-			logger.Debugf("Error tarring up %s: %s", path, err)
-			return err
-		}
-		return nil
-	}
-
-	// Look for metadata.yaml
-	fnam := filepath.Join(cDir, "metadata.yaml")
-	if !shared.PathExists(fnam) {
-		// Generate a new metadata.yaml
-		tempDir, err := ioutil.TempDir("", "lxd_lxd_metadata_")
-		if err != nil {
-			ctw.Close()
-			logger.Error("Failed exporting container", ctxMap)
-			return err
-		}
-		defer os.RemoveAll(tempDir)
-
-		// Get the container's architecture
-		var arch string
-		if c.IsSnapshot() {
-			parentName, _, _ := shared.ContainerGetParentAndSnapshotName(c.name)
-			parent, err := instanceLoadByProjectAndName(c.state, c.project, parentName)
-			if err != nil {
-				ctw.Close()
-				logger.Error("Failed exporting container", ctxMap)
-				return err
-			}
-
-			arch, _ = osarch.ArchitectureName(parent.Architecture())
-		} else {
-			arch, _ = osarch.ArchitectureName(c.architecture)
-		}
-
-		if arch == "" {
-			arch, err = osarch.ArchitectureName(c.state.OS.Architectures[0])
-			if err != nil {
-				logger.Error("Failed exporting container", ctxMap)
-				return err
-			}
-		}
-
-		// Fill in the metadata
-		meta := api.ImageMetadata{}
-		meta.Architecture = arch
-		meta.CreationDate = time.Now().UTC().Unix()
-		meta.Properties = properties
-
-		data, err := yaml.Marshal(&meta)
-		if err != nil {
-			ctw.Close()
-			logger.Error("Failed exporting container", ctxMap)
-			return err
-		}
-
-		// Write the actual file
-		fnam = filepath.Join(tempDir, "metadata.yaml")
-		err = ioutil.WriteFile(fnam, data, 0644)
-		if err != nil {
-			ctw.Close()
-			logger.Error("Failed exporting container", ctxMap)
-			return err
-		}
-
-		fi, err := os.Lstat(fnam)
-		if err != nil {
-			ctw.Close()
-			logger.Error("Failed exporting container", ctxMap)
-			return err
-		}
-
-		tmpOffset := len(path.Dir(fnam)) + 1
-		if err := ctw.WriteFile(tmpOffset, fnam, fi); err != nil {
-			ctw.Close()
-			logger.Debugf("Error writing to tarfile: %s", err)
-			logger.Error("Failed exporting container", ctxMap)
-			return err
-		}
-	} else {
-		if properties != nil {
-			// Parse the metadata
-			content, err := ioutil.ReadFile(fnam)
-			if err != nil {
-				ctw.Close()
-				logger.Error("Failed exporting container", ctxMap)
-				return err
-			}
-
-			metadata := new(api.ImageMetadata)
-			err = yaml.Unmarshal(content, &metadata)
-			if err != nil {
-				ctw.Close()
-				logger.Error("Failed exporting container", ctxMap)
-				return err
-			}
-			metadata.Properties = properties
-
-			// Generate a new metadata.yaml
-			tempDir, err := ioutil.TempDir("", "lxd_lxd_metadata_")
-			if err != nil {
-				ctw.Close()
-				logger.Error("Failed exporting container", ctxMap)
-				return err
-			}
-			defer os.RemoveAll(tempDir)
-
-			data, err := yaml.Marshal(&metadata)
-			if err != nil {
-				ctw.Close()
-				logger.Error("Failed exporting container", ctxMap)
-				return err
-			}
-
-			// Write the actual file
-			fnam = filepath.Join(tempDir, "metadata.yaml")
-			err = ioutil.WriteFile(fnam, data, 0644)
-			if err != nil {
-				ctw.Close()
-				logger.Error("Failed exporting container", ctxMap)
-				return err
-			}
-		}
-
-		// Include metadata.yaml in the tarball
-		fi, err := os.Lstat(fnam)
-		if err != nil {
-			ctw.Close()
-			logger.Debugf("Error statting %s during export", fnam)
-			logger.Error("Failed exporting container", ctxMap)
-			return err
-		}
-
-		if properties != nil {
-			tmpOffset := len(path.Dir(fnam)) + 1
-			err = ctw.WriteFile(tmpOffset, fnam, fi)
-		} else {
-			err = ctw.WriteFile(offset, fnam, fi)
-		}
-		if err != nil {
-			ctw.Close()
-			logger.Debugf("Error writing to tarfile: %s", err)
-			logger.Error("Failed exporting container", ctxMap)
-			return err
-		}
-	}
-
-	// Include all the rootfs files
-	fnam = c.RootfsPath()
-	err = filepath.Walk(fnam, writeToTar)
-	if err != nil {
-		logger.Error("Failed exporting container", ctxMap)
-		return err
-	}
-
-	// Include all the templates
-	fnam = c.TemplatesPath()
-	if shared.PathExists(fnam) {
-		err = filepath.Walk(fnam, writeToTar)
-		if err != nil {
-			logger.Error("Failed exporting container", ctxMap)
-			return err
-		}
-	}
-
-	err = ctw.Close()
-	if err != nil {
-		logger.Error("Failed exporting container", ctxMap)
-		return err
-	}
-
-	logger.Info("Exported container", ctxMap)
-	return nil
-}
-
-func collectCRIULogFile(c container, imagesDir string, function string, method string) error {
-	t := time.Now().Format(time.RFC3339)
-	newPath := shared.LogPath(c.Name(), fmt.Sprintf("%s_%s_%s.log", function, method, t))
-	return shared.FileCopy(filepath.Join(imagesDir, fmt.Sprintf("%s.log", method)), newPath)
-}
-
-func getCRIULogErrors(imagesDir string, method string) (string, error) {
-	f, err := os.Open(path.Join(imagesDir, fmt.Sprintf("%s.log", method)))
-	if err != nil {
-		return "", err
-	}
-
-	defer f.Close()
-
-	scanner := bufio.NewScanner(f)
-	ret := []string{}
-	for scanner.Scan() {
-		line := scanner.Text()
-		if strings.Contains(line, "Error") || strings.Contains(line, "Warn") {
-			ret = append(ret, scanner.Text())
-		}
-	}
-
-	return strings.Join(ret, "\n"), nil
-}
-
-type CriuMigrationArgs struct {
-	cmd          uint
-	stateDir     string
-	function     string
-	stop         bool
-	actionScript bool
-	dumpDir      string
-	preDumpDir   string
-	features     lxc.CriuFeatures
-}
-
-func (c *containerLXC) Migrate(args *CriuMigrationArgs) error {
-	ctxMap := log.Ctx{
-		"project":      c.project,
-		"name":         c.name,
-		"created":      c.creationDate,
-		"ephemeral":    c.ephemeral,
-		"used":         c.lastUsedDate,
-		"statedir":     args.stateDir,
-		"actionscript": args.actionScript,
-		"predumpdir":   args.preDumpDir,
-		"features":     args.features,
-		"stop":         args.stop}
-
-	_, err := exec.LookPath("criu")
-	if err != nil {
-		return fmt.Errorf("Unable to perform container live migration. CRIU isn't installed")
-	}
-
-	logger.Info("Migrating container", ctxMap)
-
-	// Initialize storage interface for the container.
-	err = c.initStorage()
-	if err != nil {
-		return err
-	}
-
-	prettyCmd := ""
-	switch args.cmd {
-	case lxc.MIGRATE_PRE_DUMP:
-		prettyCmd = "pre-dump"
-	case lxc.MIGRATE_DUMP:
-		prettyCmd = "dump"
-	case lxc.MIGRATE_RESTORE:
-		prettyCmd = "restore"
-	case lxc.MIGRATE_FEATURE_CHECK:
-		prettyCmd = "feature-check"
-	default:
-		prettyCmd = "unknown"
-		logger.Warn("Unknown migrate call", log.Ctx{"cmd": args.cmd})
-	}
-
-	preservesInodes := c.storage.PreservesInodes()
-	/* This feature was only added in 2.0.1, let's not ask for it
-	 * before then or migrations will fail.
-	 */
-	if !util.RuntimeLiblxcVersionAtLeast(2, 0, 1) {
-		preservesInodes = false
-	}
-
-	finalStateDir := args.stateDir
-	var migrateErr error
-
-	/* For restore, we need an extra fork so that we daemonize monitor
-	 * instead of having it be a child of LXD, so let's hijack the command
-	 * here and do the extra fork.
-	 */
-	if args.cmd == lxc.MIGRATE_RESTORE {
-		// Run the shared start
-		_, postStartHooks, err := c.startCommon()
-		if err != nil {
-			return err
-		}
-
-		/*
-		 * For unprivileged containers we need to shift the
-		 * perms on the images images so that they can be
-		 * opened by the process after it is in its user
-		 * namespace.
-		 */
-		idmapset, err := c.CurrentIdmap()
-		if err != nil {
-			return err
-		}
-
-		if idmapset != nil {
-			ourStart, err := c.StorageStart()
-			if err != nil {
-				return err
-			}
-
-			if c.Storage().GetStorageType() == storageTypeZfs {
-				err = idmapset.ShiftRootfs(args.stateDir, zfsIdmapSetSkipper)
-			} else if c.Storage().GetStorageType() == storageTypeBtrfs {
-				err = ShiftBtrfsRootfs(args.stateDir, idmapset)
-			} else {
-				err = idmapset.ShiftRootfs(args.stateDir, nil)
-			}
-			if ourStart {
-				_, err2 := c.StorageStop()
-				if err != nil {
-					return err
-				}
-
-				if err2 != nil {
-					return err2
-				}
-			}
-		}
-
-		configPath := filepath.Join(c.LogPath(), "lxc.conf")
-
-		if args.dumpDir != "" {
-			finalStateDir = fmt.Sprintf("%s/%s", args.stateDir, args.dumpDir)
-		}
-
-		_, migrateErr = shared.RunCommand(
-			c.state.OS.ExecPath,
-			"forkmigrate",
-			c.name,
-			c.state.OS.LxcPath,
-			configPath,
-			finalStateDir,
-			fmt.Sprintf("%v", preservesInodes))
-
-		if migrateErr == nil {
-			// Run any post start hooks.
-			err := c.runHooks(postStartHooks)
-			if err != nil {
-				// Attempt to stop container.
-				c.Stop(false)
-				return err
-			}
-		}
-	} else if args.cmd == lxc.MIGRATE_FEATURE_CHECK {
-		err := c.initLXC(true)
-		if err != nil {
-			return err
-		}
-
-		opts := lxc.MigrateOptions{
-			FeaturesToCheck: args.features,
-		}
-		migrateErr = c.c.Migrate(args.cmd, opts)
-		if migrateErr != nil {
-			logger.Info("CRIU feature check failed", ctxMap)
-			return migrateErr
-		}
-		return nil
-	} else {
-		err := c.initLXC(true)
-		if err != nil {
-			return err
-		}
-
-		script := ""
-		if args.actionScript {
-			script = filepath.Join(args.stateDir, "action.sh")
-		}
-
-		if args.dumpDir != "" {
-			finalStateDir = fmt.Sprintf("%s/%s", args.stateDir, args.dumpDir)
-		}
-
-		// TODO: make this configurable? Ultimately I think we don't
-		// want to do that; what we really want to do is have "modes"
-		// of criu operation where one is "make this succeed" and the
-		// other is "make this fast". Anyway, for now, let's choose a
-		// really big size so it almost always succeeds, even if it is
-		// slow.
-		ghostLimit := uint64(256 * 1024 * 1024)
-
-		opts := lxc.MigrateOptions{
-			Stop:            args.stop,
-			Directory:       finalStateDir,
-			Verbose:         true,
-			PreservesInodes: preservesInodes,
-			ActionScript:    script,
-			GhostLimit:      ghostLimit,
-		}
-		if args.preDumpDir != "" {
-			opts.PredumpDir = fmt.Sprintf("../%s", args.preDumpDir)
-		}
-
-		if !c.IsRunning() {
-			// otherwise the migration will needlessly fail
-			args.stop = false
-		}
-
-		migrateErr = c.c.Migrate(args.cmd, opts)
-	}
-
-	collectErr := collectCRIULogFile(c, finalStateDir, args.function, prettyCmd)
-	if collectErr != nil {
-		logger.Error("Error collecting checkpoint log file", log.Ctx{"err": collectErr})
-	}
-
-	if migrateErr != nil {
-		log, err2 := getCRIULogErrors(finalStateDir, prettyCmd)
-		if err2 == nil {
-			logger.Info("Failed migrating container", ctxMap)
-			migrateErr = fmt.Errorf("%s %s failed\n%s", args.function, prettyCmd, log)
-		}
-
-		return migrateErr
-	}
-
-	logger.Info("Migrated container", ctxMap)
-
-	return nil
-}
-
-func (c *containerLXC) TemplateApply(trigger string) error {
-	// "create" and "copy" are deferred until next start
-	if shared.StringInSlice(trigger, []string{"create", "copy"}) {
-		// The two events are mutually exclusive so only keep the last one
-		err := c.VolatileSet(map[string]string{"volatile.apply_template": trigger})
-		if err != nil {
-			return errors.Wrap(err, "Failed to set apply_template volatile key")
-		}
-
-		return nil
-	}
-
-	return c.templateApplyNow(trigger)
-}
-
-func (c *containerLXC) templateApplyNow(trigger string) error {
-	// If there's no metadata, just return
-	fname := filepath.Join(c.Path(), "metadata.yaml")
-	if !shared.PathExists(fname) {
-		return nil
-	}
-
-	// Parse the metadata
-	content, err := ioutil.ReadFile(fname)
-	if err != nil {
-		return errors.Wrap(err, "Failed to read metadata")
-	}
-
-	metadata := new(api.ImageMetadata)
-	err = yaml.Unmarshal(content, &metadata)
-
-	if err != nil {
-		return errors.Wrapf(err, "Could not parse %s", fname)
-	}
-
-	// Find rootUid and rootGid
-	idmapset, err := c.DiskIdmap()
-	if err != nil {
-		return errors.Wrap(err, "Failed to set ID map")
-	}
-
-	rootUid := int64(0)
-	rootGid := int64(0)
-
-	// Get the right uid and gid for the container
-	if idmapset != nil {
-		rootUid, rootGid = idmapset.ShiftIntoNs(0, 0)
-	}
-
-	// Figure out the container architecture
-	arch, err := osarch.ArchitectureName(c.architecture)
-	if err != nil {
-		arch, err = osarch.ArchitectureName(c.state.OS.Architectures[0])
-		if err != nil {
-			return errors.Wrap(err, "Failed to detect system architecture")
-		}
-	}
-
-	// Generate the container metadata
-	containerMeta := make(map[string]string)
-	containerMeta["name"] = c.name
-	containerMeta["architecture"] = arch
-
-	if c.ephemeral {
-		containerMeta["ephemeral"] = "true"
-	} else {
-		containerMeta["ephemeral"] = "false"
-	}
-
-	if c.IsPrivileged() {
-		containerMeta["privileged"] = "true"
-	} else {
-		containerMeta["privileged"] = "false"
-	}
-
-	// Go through the templates
-	for tplPath, tpl := range metadata.Templates {
-		var w *os.File
-
-		// Check if the template should be applied now
-		found := false
-		for _, tplTrigger := range tpl.When {
-			if tplTrigger == trigger {
-				found = true
-				break
-			}
-		}
-
-		if !found {
-			continue
-		}
-
-		// Open the file to template, create if needed
-		fullpath := filepath.Join(c.RootfsPath(), strings.TrimLeft(tplPath, "/"))
-		if shared.PathExists(fullpath) {
-			if tpl.CreateOnly {
-				continue
-			}
-
-			// Open the existing file
-			w, err = os.Create(fullpath)
-			if err != nil {
-				return errors.Wrap(err, "Failed to create template file")
-			}
-		} else {
-			// Create the directories leading to the file
-			shared.MkdirAllOwner(path.Dir(fullpath), 0755, int(rootUid), int(rootGid))
-
-			// Create the file itself
-			w, err = os.Create(fullpath)
-			if err != nil {
-				return err
-			}
-
-			// Fix ownership and mode
-			w.Chown(int(rootUid), int(rootGid))
-			w.Chmod(0644)
-		}
-		defer w.Close()
-
-		// Read the template
-		tplString, err := ioutil.ReadFile(filepath.Join(c.TemplatesPath(), tpl.Template))
-		if err != nil {
-			return errors.Wrap(err, "Failed to read template file")
-		}
-
-		// Restrict filesystem access to within the container's rootfs
-		tplSet := pongo2.NewSet(fmt.Sprintf("%s-%s", c.name, tpl.Template), template.ChrootLoader{Path: c.RootfsPath()})
-
-		tplRender, err := tplSet.FromString("{% autoescape off %}" + string(tplString) + "{% endautoescape %}")
-		if err != nil {
-			return errors.Wrap(err, "Failed to render template")
-		}
-
-		configGet := func(confKey, confDefault *pongo2.Value) *pongo2.Value {
-			val, ok := c.expandedConfig[confKey.String()]
-			if !ok {
-				return confDefault
-			}
-
-			return pongo2.AsValue(strings.TrimRight(val, "\r\n"))
-		}
-
-		// Render the template
-		tplRender.ExecuteWriter(pongo2.Context{"trigger": trigger,
-			"path":       tplPath,
-			"container":  containerMeta,
-			"config":     c.expandedConfig,
-			"devices":    c.expandedDevices,
-			"properties": tpl.Properties,
-			"config_get": configGet}, w)
-	}
-
-	return nil
-}
-
-func (c *containerLXC) FileExists(path string) error {
-	// Setup container storage if needed
-	var ourStart bool
-	var err error
-	if !c.IsRunning() {
-		ourStart, err = c.StorageStart()
-		if err != nil {
-			return err
-		}
-	}
-
-	// Check if the file exists in the container
-	_, stderr, err := shared.RunCommandSplit(
-		nil,
-		c.state.OS.ExecPath,
-		"forkfile",
-		"exists",
-		c.RootfsPath(),
-		fmt.Sprintf("%d", c.InitPID()),
-		path,
-	)
-
-	// Tear down container storage if needed
-	if !c.IsRunning() && ourStart {
-		_, err := c.StorageStop()
-		if err != nil {
-			return err
-		}
-	}
-
-	// Process forkcheckfile response
-	if stderr != "" {
-		if strings.HasPrefix(stderr, "error:") {
-			return fmt.Errorf(strings.TrimPrefix(strings.TrimSuffix(stderr, "\n"), "error: "))
-		}
-
-		for _, line := range strings.Split(strings.TrimRight(stderr, "\n"), "\n") {
-			logger.Debugf("forkcheckfile: %s", line)
-		}
-	}
-
-	if err != nil {
-		return err
-	}
-
-	return nil
-}
-
-func (c *containerLXC) FilePull(srcpath string, dstpath string) (int64, int64, os.FileMode, string, []string, error) {
-	var ourStart bool
-	var err error
-	// Setup container storage if needed
-	if !c.IsRunning() {
-		ourStart, err = c.StorageStart()
-		if err != nil {
-			return -1, -1, 0, "", nil, err
-		}
-	}
-
-	// Get the file from the container
-	_, stderr, err := shared.RunCommandSplit(
-		nil,
-		c.state.OS.ExecPath,
-		"forkfile",
-		"pull",
-		c.RootfsPath(),
-		fmt.Sprintf("%d", c.InitPID()),
-		srcpath,
-		dstpath,
-	)
-
-	// Tear down container storage if needed
-	if !c.IsRunning() && ourStart {
-		_, err := c.StorageStop()
-		if err != nil {
-			return -1, -1, 0, "", nil, err
-		}
-	}
-
-	uid := int64(-1)
-	gid := int64(-1)
-	mode := -1
-	type_ := "unknown"
-	var dirEnts []string
-	var errStr string
-
-	// Process forkgetfile response
-	for _, line := range strings.Split(strings.TrimRight(stderr, "\n"), "\n") {
-		if line == "" {
-			continue
-		}
-
-		// Extract errors
-		if strings.HasPrefix(line, "error: ") {
-			errStr = strings.TrimPrefix(line, "error: ")
-			continue
-		}
-
-		if strings.HasPrefix(line, "errno: ") {
-			errno := strings.TrimPrefix(line, "errno: ")
-			if errno == "2" {
-				return -1, -1, 0, "", nil, os.ErrNotExist
-			}
-
-			return -1, -1, 0, "", nil, fmt.Errorf(errStr)
-		}
-
-		// Extract the uid
-		if strings.HasPrefix(line, "uid: ") {
-			uid, err = strconv.ParseInt(strings.TrimPrefix(line, "uid: "), 10, 64)
-			if err != nil {
-				return -1, -1, 0, "", nil, err
-			}
-
-			continue
-		}
-
-		// Extract the gid
-		if strings.HasPrefix(line, "gid: ") {
-			gid, err = strconv.ParseInt(strings.TrimPrefix(line, "gid: "), 10, 64)
-			if err != nil {
-				return -1, -1, 0, "", nil, err
-			}
-
-			continue
-		}
-
-		// Extract the mode
-		if strings.HasPrefix(line, "mode: ") {
-			mode, err = strconv.Atoi(strings.TrimPrefix(line, "mode: "))
-			if err != nil {
-				return -1, -1, 0, "", nil, err
-			}
-
-			continue
-		}
-
-		if strings.HasPrefix(line, "type: ") {
-			type_ = strings.TrimPrefix(line, "type: ")
-			continue
-		}
-
-		if strings.HasPrefix(line, "entry: ") {
-			ent := strings.TrimPrefix(line, "entry: ")
-			ent = strings.Replace(ent, "\x00", "\n", -1)
-			dirEnts = append(dirEnts, ent)
-			continue
-		}
-
-		logger.Debugf("forkgetfile: %s", line)
-	}
-
-	if err != nil {
-		return -1, -1, 0, "", nil, err
-	}
-
-	// Unmap uid and gid if needed
-	if !c.IsRunning() {
-		idmapset, err := c.DiskIdmap()
-		if err != nil {
-			return -1, -1, 0, "", nil, err
-		}
-
-		if idmapset != nil {
-			uid, gid = idmapset.ShiftFromNs(uid, gid)
-		}
-	}
-
-	return uid, gid, os.FileMode(mode), type_, dirEnts, nil
-}
-
-func (c *containerLXC) FilePush(type_ string, srcpath string, dstpath string, uid int64, gid int64, mode int, write string) error {
-	var rootUid int64
-	var rootGid int64
-	var errStr string
-
-	// Map uid and gid if needed
-	if !c.IsRunning() {
-		idmapset, err := c.DiskIdmap()
-		if err != nil {
-			return err
-		}
-
-		if idmapset != nil {
-			uid, gid = idmapset.ShiftIntoNs(uid, gid)
-			rootUid, rootGid = idmapset.ShiftIntoNs(0, 0)
-		}
-	}
-
-	var ourStart bool
-	var err error
-	// Setup container storage if needed
-	if !c.IsRunning() {
-		ourStart, err = c.StorageStart()
-		if err != nil {
-			return err
-		}
-	}
-
-	defaultMode := 0640
-	if type_ == "directory" {
-		defaultMode = 0750
-	}
-
-	// Push the file to the container
-	_, stderr, err := shared.RunCommandSplit(
-		nil,
-		c.state.OS.ExecPath,
-		"forkfile",
-		"push",
-		c.RootfsPath(),
-		fmt.Sprintf("%d", c.InitPID()),
-		srcpath,
-		dstpath,
-		type_,
-		fmt.Sprintf("%d", uid),
-		fmt.Sprintf("%d", gid),
-		fmt.Sprintf("%d", mode),
-		fmt.Sprintf("%d", rootUid),
-		fmt.Sprintf("%d", rootGid),
-		fmt.Sprintf("%d", int(os.FileMode(defaultMode)&os.ModePerm)),
-		write,
-	)
-
-	// Tear down container storage if needed
-	if !c.IsRunning() && ourStart {
-		_, err := c.StorageStop()
-		if err != nil {
-			return err
-		}
-	}
-
-	// Process forkgetfile response
-	for _, line := range strings.Split(strings.TrimRight(stderr, "\n"), "\n") {
-		if line == "" {
-			continue
-		}
-
-		// Extract errors
-		if strings.HasPrefix(line, "error: ") {
-			errStr = strings.TrimPrefix(line, "error: ")
-			continue
-		}
-
-		if strings.HasPrefix(line, "errno: ") {
-			errno := strings.TrimPrefix(line, "errno: ")
-			if errno == "2" {
-				return os.ErrNotExist
-			}
-
-			return fmt.Errorf(errStr)
-		}
-	}
-
-	if err != nil {
-		return err
-	}
-
-	return nil
-}
-
-func (c *containerLXC) FileRemove(path string) error {
-	var errStr string
-	var ourStart bool
-	var err error
-
-	// Setup container storage if needed
-	if !c.IsRunning() {
-		ourStart, err = c.StorageStart()
-		if err != nil {
-			return err
-		}
-	}
-
-	// Remove the file from the container
-	_, stderr, err := shared.RunCommandSplit(
-		nil,
-		c.state.OS.ExecPath,
-		"forkfile",
-		"remove",
-		c.RootfsPath(),
-		fmt.Sprintf("%d", c.InitPID()),
-		path,
-	)
-
-	// Tear down container storage if needed
-	if !c.IsRunning() && ourStart {
-		_, err := c.StorageStop()
-		if err != nil {
-			return err
-		}
-	}
-
-	// Process forkremovefile response
-	for _, line := range strings.Split(strings.TrimRight(stderr, "\n"), "\n") {
-		if line == "" {
-			continue
-		}
-
-		// Extract errors
-		if strings.HasPrefix(line, "error: ") {
-			errStr = strings.TrimPrefix(line, "error: ")
-			continue
-		}
-
-		if strings.HasPrefix(line, "errno: ") {
-			errno := strings.TrimPrefix(line, "errno: ")
-			if errno == "2" {
-				return os.ErrNotExist
-			}
-
-			return fmt.Errorf(errStr)
-		}
-	}
-
-	if err != nil {
-		return err
-	}
-
-	return nil
-}
-
-func (c *containerLXC) Console(terminal *os.File) *exec.Cmd {
-	args := []string{
-		c.state.OS.ExecPath,
-		"forkconsole",
-		project.Prefix(c.Project(), c.Name()),
-		c.state.OS.LxcPath,
-		filepath.Join(c.LogPath(), "lxc.conf"),
-		"tty=0",
-		"escape=-1"}
-
-	cmd := exec.Cmd{}
-	cmd.Path = c.state.OS.ExecPath
-	cmd.Args = args
-	cmd.Stdin = terminal
-	cmd.Stdout = terminal
-	cmd.Stderr = terminal
-	return &cmd
-}
-
-func (c *containerLXC) ConsoleLog(opts lxc.ConsoleLogOptions) (string, error) {
-	msg, err := c.c.ConsoleLog(opts)
-	if err != nil {
-		return "", err
-	}
-
-	return string(msg), nil
-}
-
-func (c *containerLXC) Exec(command []string, env map[string]string, stdin *os.File, stdout *os.File, stderr *os.File, wait bool, cwd string, uid uint32, gid uint32) (*exec.Cmd, int, int, error) {
-	// Prepare the environment
-	envSlice := []string{}
-
-	for k, v := range env {
-		envSlice = append(envSlice, fmt.Sprintf("%s=%s", k, v))
-	}
-
-	// Setup logfile
-	logPath := filepath.Join(c.LogPath(), "forkexec.log")
-	logFile, err := os.OpenFile(logPath, os.O_WRONLY|os.O_CREATE|os.O_SYNC, 0644)
-	if err != nil {
-		return nil, -1, -1, err
-	}
-
-	// Prepare the subcommand
-	cname := project.Prefix(c.Project(), c.Name())
-	args := []string{
-		c.state.OS.ExecPath,
-		"forkexec",
-		cname,
-		c.state.OS.LxcPath,
-		filepath.Join(c.LogPath(), "lxc.conf"),
-		cwd,
-		fmt.Sprintf("%d", uid),
-		fmt.Sprintf("%d", gid),
-	}
-
-	args = append(args, "--")
-	args = append(args, "env")
-	args = append(args, envSlice...)
-
-	args = append(args, "--")
-	args = append(args, "cmd")
-	args = append(args, command...)
-
-	cmd := exec.Cmd{}
-	cmd.Path = c.state.OS.ExecPath
-	cmd.Args = args
-
-	cmd.Stdin = nil
-	cmd.Stdout = logFile
-	cmd.Stderr = logFile
-
-	// Mitigation for CVE-2019-5736
-	useRexec := false
-	if c.expandedConfig["raw.idmap"] != "" {
-		err := allowedUnprivilegedOnlyMap(c.expandedConfig["raw.idmap"])
-		if err != nil {
-			useRexec = true
-		}
-	}
-
-	if shared.IsTrue(c.expandedConfig["security.privileged"]) {
-		useRexec = true
-	}
-
-	if useRexec {
-		cmd.Env = append(os.Environ(), "LXC_MEMFD_REXEC=1")
-	}
-
-	// Setup communication PIPE
-	rStatus, wStatus, err := shared.Pipe()
-	defer rStatus.Close()
-	if err != nil {
-		return nil, -1, -1, err
-	}
-
-	cmd.ExtraFiles = []*os.File{stdin, stdout, stderr, wStatus}
-	err = cmd.Start()
-	if err != nil {
-		wStatus.Close()
-		return nil, -1, -1, err
-	}
-	wStatus.Close()
-
-	attachedPid := -1
-	if err := json.NewDecoder(rStatus).Decode(&attachedPid); err != nil {
-		logger.Errorf("Failed to retrieve PID of executing child process: %s", err)
-		return nil, -1, -1, err
-	}
-
-	// It's the callers responsibility to wait or not wait.
-	if !wait {
-		return &cmd, -1, attachedPid, nil
-	}
-
-	err = cmd.Wait()
-	if err != nil {
-		exitErr, ok := err.(*exec.ExitError)
-		if ok {
-			status, ok := exitErr.Sys().(syscall.WaitStatus)
-			if ok {
-				return nil, status.ExitStatus(), attachedPid, nil
-			}
-
-			if status.Signaled() {
-				// 128 + n == Fatal error signal "n"
-				return nil, 128 + int(status.Signal()), attachedPid, nil
-			}
-		}
-
-		return nil, -1, -1, err
-	}
-
-	return nil, 0, attachedPid, nil
-}
-
-func (c *containerLXC) cpuState() api.InstanceStateCPU {
-	cpu := api.InstanceStateCPU{}
-
-	if !c.state.OS.CGroupCPUacctController {
-		return cpu
-	}
-
-	// CPU usage in seconds
-	value, err := c.CGroupGet("cpuacct.usage")
-	if err != nil {
-		cpu.Usage = -1
-		return cpu
-	}
-
-	valueInt, err := strconv.ParseInt(value, 10, 64)
-	if err != nil {
-		cpu.Usage = -1
-		return cpu
-	}
-
-	cpu.Usage = valueInt
-
-	return cpu
-}
-
-func (c *containerLXC) diskState() map[string]api.InstanceStateDisk {
-	disk := map[string]api.InstanceStateDisk{}
-
-	// Initialize storage interface for the container.
-	err := c.initStorage()
-	if err != nil {
-		return disk
-	}
-
-	for _, dev := range c.expandedDevices.Sorted() {
-		if dev.Config["type"] != "disk" {
-			continue
-		}
-
-		if dev.Config["path"] != "/" {
-			continue
-		}
-
-		usage, err := c.storage.ContainerGetUsage(c)
-		if err != nil {
-			continue
-		}
-
-		disk[dev.Name] = api.InstanceStateDisk{Usage: usage}
-	}
-
-	return disk
-}
-
-func (c *containerLXC) memoryState() api.InstanceStateMemory {
-	memory := api.InstanceStateMemory{}
-
-	if !c.state.OS.CGroupMemoryController {
-		return memory
-	}
-
-	// Memory in bytes
-	value, err := c.CGroupGet("memory.usage_in_bytes")
-	valueInt, err1 := strconv.ParseInt(value, 10, 64)
-	if err == nil && err1 == nil {
-		memory.Usage = valueInt
-	}
-
-	// Memory peak in bytes
-	value, err = c.CGroupGet("memory.max_usage_in_bytes")
-	valueInt, err1 = strconv.ParseInt(value, 10, 64)
-	if err == nil && err1 == nil {
-		memory.UsagePeak = valueInt
-	}
-
-	if c.state.OS.CGroupSwapAccounting {
-		// Swap in bytes
-		if memory.Usage > 0 {
-			value, err := c.CGroupGet("memory.memsw.usage_in_bytes")
-			valueInt, err1 := strconv.ParseInt(value, 10, 64)
-			if err == nil && err1 == nil {
-				memory.SwapUsage = valueInt - memory.Usage
-			}
-		}
-
-		// Swap peak in bytes
-		if memory.UsagePeak > 0 {
-			value, err = c.CGroupGet("memory.memsw.max_usage_in_bytes")
-			valueInt, err1 = strconv.ParseInt(value, 10, 64)
-			if err == nil && err1 == nil {
-				memory.SwapUsagePeak = valueInt - memory.UsagePeak
-			}
-		}
-	}
-
-	return memory
-}
-
-func (c *containerLXC) networkState() map[string]api.InstanceStateNetwork {
-	result := map[string]api.InstanceStateNetwork{}
-
-	pid := c.InitPID()
-	if pid < 1 {
-		return result
-	}
-
-	couldUseNetnsGetifaddrs := c.state.OS.NetnsGetifaddrs
-	if couldUseNetnsGetifaddrs {
-		nw, err := netutils.NetnsGetifaddrs(int32(pid))
-		if err != nil {
-			couldUseNetnsGetifaddrs = false
-			logger.Error("Failed to retrieve network information via netlink", log.Ctx{"container": c.name, "pid": pid})
-		} else {
-			result = nw
-		}
-	}
-
-	if !couldUseNetnsGetifaddrs {
-		// Get the network state from the container
-		out, err := shared.RunCommand(
-			c.state.OS.ExecPath,
-			"forknet",
-			"info",
-			fmt.Sprintf("%d", pid))
-
-		// Process forkgetnet response
-		if err != nil {
-			logger.Error("Error calling 'lxd forkgetnet", log.Ctx{"container": c.name, "err": err, "pid": pid})
-			return result
-		}
-
-		// If we can use netns_getifaddrs() but it failed and the setns() +
-		// netns_getifaddrs() succeeded we should just always fallback to the
-		// setns() + netns_getifaddrs() style retrieval.
-		c.state.OS.NetnsGetifaddrs = false
-
-		nw := map[string]api.InstanceStateNetwork{}
-		err = json.Unmarshal([]byte(out), &nw)
-		if err != nil {
-			logger.Error("Failure to read forkgetnet json", log.Ctx{"container": c.name, "err": err})
-			return result
-		}
-		result = nw
-	}
-
-	// Get host_name from volatile data if not set already.
-	for name, dev := range result {
-		if dev.HostName == "" {
-			dev.HostName = c.localConfig[fmt.Sprintf("volatile.%s.host_name", name)]
-			result[name] = dev
-		}
-	}
-
-	return result
-}
-
-func (c *containerLXC) processesState() int64 {
-	// Return 0 if not running
-	pid := c.InitPID()
-	if pid == -1 {
-		return 0
-	}
-
-	if c.state.OS.CGroupPidsController {
-		value, err := c.CGroupGet("pids.current")
-		if err != nil {
-			return -1
-		}
-
-		valueInt, err := strconv.ParseInt(value, 10, 64)
-		if err != nil {
-			return -1
-		}
-
-		return valueInt
-	}
-
-	pids := []int64{int64(pid)}
-
-	// Go through the pid list, adding new pids at the end so we go through them all
-	for i := 0; i < len(pids); i++ {
-		fname := fmt.Sprintf("/proc/%d/task/%d/children", pids[i], pids[i])
-		fcont, err := ioutil.ReadFile(fname)
-		if err != nil {
-			// the process terminated during execution of this loop
-			continue
-		}
-
-		content := strings.Split(string(fcont), " ")
-		for j := 0; j < len(content); j++ {
-			pid, err := strconv.ParseInt(content[j], 10, 64)
-			if err == nil {
-				pids = append(pids, pid)
-			}
-		}
-	}
-
-	return int64(len(pids))
-}
-
-// Storage functions
-func (c *containerLXC) Storage() storage {
-	if c.storage == nil {
-		c.initStorage()
-	}
-
-	return c.storage
-}
-
-func (c *containerLXC) StorageStart() (bool, error) {
-	// Initialize storage interface for the container.
-	err := c.initStorage()
-	if err != nil {
-		return false, err
-	}
-
-	isOurOperation, err := c.StorageStartSensitive()
-	// Remove this as soon as zfs is fixed
-	if c.storage.GetStorageType() == storageTypeZfs && err == unix.EBUSY {
-		return isOurOperation, nil
-	}
-
-	return isOurOperation, err
-}
-
-// Kill this function as soon as zfs is fixed.
-func (c *containerLXC) StorageStartSensitive() (bool, error) {
-	// Initialize storage interface for the container.
-	err := c.initStorage()
-	if err != nil {
-		return false, err
-	}
-
-	var isOurOperation bool
-	if c.IsSnapshot() {
-		isOurOperation, err = c.storage.ContainerSnapshotStart(c)
-	} else {
-		isOurOperation, err = c.storage.ContainerMount(c)
-	}
-
-	return isOurOperation, err
-}
-
-func (c *containerLXC) StorageStop() (bool, error) {
-	// Initialize storage interface for the container.
-	err := c.initStorage()
-	if err != nil {
-		return false, err
-	}
-
-	var isOurOperation bool
-	if c.IsSnapshot() {
-		isOurOperation, err = c.storage.ContainerSnapshotStop(c)
-	} else {
-		isOurOperation, err = c.storage.ContainerUmount(c, c.Path())
-	}
-
-	return isOurOperation, err
-}
-
-// Mount handling
-func (c *containerLXC) insertMountLXD(source, target, fstype string, flags int, mntnsPID int, shiftfs bool) error {
-	pid := mntnsPID
-	if pid <= 0 {
-		// Get the init PID
-		pid = c.InitPID()
-		if pid == -1 {
-			// Container isn't running
-			return fmt.Errorf("Can't insert mount into stopped container")
-		}
-	}
-
-	// Create the temporary mount target
-	var tmpMount string
-	var err error
-	if shared.IsDir(source) {
-		tmpMount, err = ioutil.TempDir(c.ShmountsPath(), "lxdmount_")
-		if err != nil {
-			return fmt.Errorf("Failed to create shmounts path: %s", err)
-		}
-	} else {
-		f, err := ioutil.TempFile(c.ShmountsPath(), "lxdmount_")
-		if err != nil {
-			return fmt.Errorf("Failed to create shmounts path: %s", err)
-		}
-
-		tmpMount = f.Name()
-		f.Close()
-	}
-	defer os.Remove(tmpMount)
-
-	// Mount the filesystem
-	err = unix.Mount(source, tmpMount, fstype, uintptr(flags), "")
-	if err != nil {
-		return fmt.Errorf("Failed to setup temporary mount: %s", err)
-	}
-	defer unix.Unmount(tmpMount, unix.MNT_DETACH)
-
-	// Setup host side shiftfs as needed
-	if shiftfs {
-		err = unix.Mount(tmpMount, tmpMount, "shiftfs", 0, "mark,passthrough=3")
-		if err != nil {
-			return fmt.Errorf("Failed to setup host side shiftfs mount: %s", err)
-		}
-		defer unix.Unmount(tmpMount, unix.MNT_DETACH)
-	}
-
-	// Move the mount inside the container
-	mntsrc := filepath.Join("/dev/.lxd-mounts", filepath.Base(tmpMount))
-	pidStr := fmt.Sprintf("%d", pid)
-
-	_, err = shared.RunCommand(c.state.OS.ExecPath, "forkmount", "lxd-mount", pidStr, mntsrc, target, fmt.Sprintf("%v", shiftfs))
-	if err != nil {
-		return err
-	}
-
-	return nil
-}
-
-func (c *containerLXC) insertMountLXC(source, target, fstype string, flags int) error {
-	cname := project.Prefix(c.Project(), c.Name())
-	configPath := filepath.Join(c.LogPath(), "lxc.conf")
-	if fstype == "" {
-		fstype = "none"
-	}
-
-	if !strings.HasPrefix(target, "/") {
-		target = "/" + target
-	}
-
-	_, err := shared.RunCommand(c.state.OS.ExecPath, "forkmount", "lxc-mount", cname, c.state.OS.LxcPath, configPath, source, target, fstype, fmt.Sprintf("%d", flags))
-	if err != nil {
-		return err
-	}
-
-	return nil
-}
-
-func (c *containerLXC) insertMount(source, target, fstype string, flags int, shiftfs bool) error {
-	if c.state.OS.LXCFeatures["mount_injection_file"] && !shiftfs {
-		return c.insertMountLXC(source, target, fstype, flags)
-	}
-
-	return c.insertMountLXD(source, target, fstype, flags, -1, shiftfs)
-}
-
-func (c *containerLXC) removeMount(mount string) error {
-	// Get the init PID
-	pid := c.InitPID()
-	if pid == -1 {
-		// Container isn't running
-		return fmt.Errorf("Can't remove mount from stopped container")
-	}
-
-	if c.state.OS.LXCFeatures["mount_injection_file"] {
-		configPath := filepath.Join(c.LogPath(), "lxc.conf")
-		cname := project.Prefix(c.Project(), c.Name())
-
-		if !strings.HasPrefix(mount, "/") {
-			mount = "/" + mount
-		}
-
-		_, err := shared.RunCommand(c.state.OS.ExecPath, "forkmount", "lxc-umount", cname, c.state.OS.LxcPath, configPath, mount)
-		if err != nil {
-			return err
-		}
-	} else {
-		// Remove the mount from the container
-		pidStr := fmt.Sprintf("%d", pid)
-		_, err := shared.RunCommand(c.state.OS.ExecPath, "forkmount", "lxd-umount", pidStr, mount)
-		if err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (c *containerLXC) InsertSeccompUnixDevice(prefix string, m config.Device, pid int) error {
-	if pid < 0 {
-		return fmt.Errorf("Invalid request PID specified")
-	}
-
-	rootLink := fmt.Sprintf("/proc/%d/root", pid)
-	rootPath, err := os.Readlink(rootLink)
-	if err != nil {
-		return err
-	}
-
-	err, uid, gid, _, _ := taskIds(pid)
-	if err != nil {
-		return err
-	}
-
-	idmapset, err := c.CurrentIdmap()
-	if err != nil {
-		return err
-	}
-
-	nsuid, nsgid := idmapset.ShiftFromNs(uid, gid)
-	m["uid"] = fmt.Sprintf("%d", nsuid)
-	m["gid"] = fmt.Sprintf("%d", nsgid)
-
-	if !path.IsAbs(m["path"]) {
-		cwdLink := fmt.Sprintf("/proc/%d/cwd", pid)
-		prefixPath, err := os.Readlink(cwdLink)
-		if err != nil {
-			return err
-		}
-
-		prefixPath = strings.TrimPrefix(prefixPath, rootPath)
-		m["path"] = filepath.Join(rootPath, prefixPath, m["path"])
-	} else {
-		m["path"] = filepath.Join(rootPath, m["path"])
-	}
-
-	idmapSet, err := c.CurrentIdmap()
-	if err != nil {
-		return err
-	}
-
-	d, err := device.UnixDeviceCreate(c.state, idmapSet, c.DevicesPath(), prefix, m, true)
-	if err != nil {
-		return fmt.Errorf("Failed to setup device: %s", err)
-	}
-	devPath := d.HostPath
-	tgtPath := d.RelativePath
-
-	// Bind-mount it into the container
-	defer os.Remove(devPath)
-	return c.insertMountLXD(devPath, tgtPath, "none", unix.MS_BIND, pid, false)
-}
-
-func (c *containerLXC) removeUnixDevices() error {
-	// Check that we indeed have devices to remove
-	if !shared.PathExists(c.DevicesPath()) {
-		return nil
-	}
-
-	// Load the directory listing
-	dents, err := ioutil.ReadDir(c.DevicesPath())
-	if err != nil {
-		return err
-	}
-
-	// Go through all the unix devices
-	for _, f := range dents {
-		// Skip non-Unix devices
-		if !strings.HasPrefix(f.Name(), "forkmknod.unix.") && !strings.HasPrefix(f.Name(), "unix.") && !strings.HasPrefix(f.Name(), "infiniband.unix.") {
-			continue
-		}
-
-		// Remove the entry
-		devicePath := filepath.Join(c.DevicesPath(), f.Name())
-		err := os.Remove(devicePath)
-		if err != nil {
-			logger.Error("Failed removing unix device", log.Ctx{"err": err, "path": devicePath})
-		}
-	}
-
-	return nil
-}
-
-// fillNetworkDevice takes a nic or infiniband device type and enriches it with automatically
-// generated name and hwaddr properties if these are missing from the device.
-func (c *containerLXC) fillNetworkDevice(name string, m config.Device) (config.Device, error) {
-	newDevice := m.Clone()
-
-	// Function to try and guess an available name
-	nextInterfaceName := func() (string, error) {
-		devNames := []string{}
-
-		// Include all static interface names
-		for _, dev := range c.expandedDevices.Sorted() {
-			if dev.Config["name"] != "" && !shared.StringInSlice(dev.Config["name"], devNames) {
-				devNames = append(devNames, dev.Config["name"])
-			}
-		}
-
-		// Include all currently allocated interface names
-		for k, v := range c.expandedConfig {
-			if !strings.HasPrefix(k, "volatile.") {
-				continue
-			}
-
-			fields := strings.SplitN(k, ".", 3)
-			if len(fields) != 3 {
-				continue
-			}
-
-			if fields[2] != "name" || shared.StringInSlice(v, devNames) {
-				continue
-			}
-
-			devNames = append(devNames, v)
-		}
-
-		// Attempt to include all existing interfaces
-		cname := project.Prefix(c.Project(), c.Name())
-		cc, err := lxc.NewContainer(cname, c.state.OS.LxcPath)
-		if err == nil {
-			defer cc.Release()
-
-			interfaces, err := cc.Interfaces()
-			if err == nil {
-				for _, name := range interfaces {
-					if shared.StringInSlice(name, devNames) {
-						continue
-					}
-
-					devNames = append(devNames, name)
-				}
-			}
-		}
-
-		i := 0
-		name := ""
-		for {
-			if m["type"] == "infiniband" {
-				name = fmt.Sprintf("ib%d", i)
-			} else {
-				name = fmt.Sprintf("eth%d", i)
-			}
-
-			// Find a free device name
-			if !shared.StringInSlice(name, devNames) {
-				return name, nil
-			}
-
-			i++
-		}
-	}
-
-	updateKey := func(key string, value string) error {
-		tx, err := c.state.Cluster.Begin()
-		if err != nil {
-			return err
-		}
-
-		err = db.ContainerConfigInsert(tx, c.id, map[string]string{key: value})
-		if err != nil {
-			tx.Rollback()
-			return err
-		}
-
-		err = db.TxCommit(tx)
-		if err != nil {
-			return err
-		}
-
-		return nil
-	}
-
-	// Fill in the MAC address
-	if !shared.StringInSlice(m["nictype"], []string{"physical", "ipvlan", "sriov"}) && m["hwaddr"] == "" {
-		configKey := fmt.Sprintf("volatile.%s.hwaddr", name)
-		volatileHwaddr := c.localConfig[configKey]
-		if volatileHwaddr == "" {
-			// Generate a new MAC address
-			volatileHwaddr, err := deviceNextInterfaceHWAddr()
-			if err != nil {
-				return nil, err
-			}
-
-			// Update the database
-			err = query.Retry(func() error {
-				err := updateKey(configKey, volatileHwaddr)
-				if err != nil {
-					// Check if something else filled it in behind our back
-					value, err1 := c.state.Cluster.ContainerConfigGet(c.id, configKey)
-					if err1 != nil || value == "" {
-						return err
-					}
-
-					c.localConfig[configKey] = value
-					c.expandedConfig[configKey] = value
-					return nil
-				}
-
-				c.localConfig[configKey] = volatileHwaddr
-				c.expandedConfig[configKey] = volatileHwaddr
-				return nil
-			})
-			if err != nil {
-				return nil, err
-			}
-		}
-		newDevice["hwaddr"] = volatileHwaddr
-	}
-
-	// Fill in the name
-	if m["name"] == "" {
-		configKey := fmt.Sprintf("volatile.%s.name", name)
-		volatileName := c.localConfig[configKey]
-		if volatileName == "" {
-			// Generate a new interface name
-			volatileName, err := nextInterfaceName()
-			if err != nil {
-				return nil, err
-			}
-
-			// Update the database
-			err = updateKey(configKey, volatileName)
-			if err != nil {
-				// Check if something else filled it in behind our back
-				value, err1 := c.state.Cluster.ContainerConfigGet(c.id, configKey)
-				if err1 != nil || value == "" {
-					return nil, err
-				}
-
-				c.localConfig[configKey] = value
-				c.expandedConfig[configKey] = value
-			} else {
-				c.localConfig[configKey] = volatileName
-				c.expandedConfig[configKey] = volatileName
-			}
-		}
-		newDevice["name"] = volatileName
-	}
-
-	return newDevice, nil
-}
-
-func (c *containerLXC) removeDiskDevices() error {
-	// Check that we indeed have devices to remove
-	if !shared.PathExists(c.DevicesPath()) {
-		return nil
-	}
-
-	// Load the directory listing
-	dents, err := ioutil.ReadDir(c.DevicesPath())
-	if err != nil {
-		return err
-	}
-
-	// Go through all the unix devices
-	for _, f := range dents {
-		// Skip non-disk devices
-		if !strings.HasPrefix(f.Name(), "disk.") {
-			continue
-		}
-
-		// Always try to unmount the host side
-		_ = unix.Unmount(filepath.Join(c.DevicesPath(), f.Name()), unix.MNT_DETACH)
-
-		// Remove the entry
-		diskPath := filepath.Join(c.DevicesPath(), f.Name())
-		err := os.Remove(diskPath)
-		if err != nil {
-			logger.Error("Failed to remove disk device path", log.Ctx{"err": err, "path": diskPath})
-		}
-	}
-
-	return nil
-}
-
-// Network I/O limits
-func (c *containerLXC) setNetworkPriority() error {
-	// Check that the container is running
-	if !c.IsRunning() {
-		return fmt.Errorf("Can't set network priority on stopped container")
-	}
-
-	// Don't bother if the cgroup controller doesn't exist
-	if !c.state.OS.CGroupNetPrioController {
-		return nil
-	}
-
-	// Extract the current priority
-	networkPriority := c.expandedConfig["limits.network.priority"]
-	if networkPriority == "" {
-		networkPriority = "0"
-	}
-
-	networkInt, err := strconv.Atoi(networkPriority)
-	if err != nil {
-		return err
-	}
-
-	// Get all the interfaces
-	netifs, err := net.Interfaces()
-	if err != nil {
-		return err
-	}
-
-	// Check that we at least succeeded to set an entry
-	success := false
-	var last_error error
-	for _, netif := range netifs {
-		err = c.CGroupSet("net_prio.ifpriomap", fmt.Sprintf("%s %d", netif.Name, networkInt))
-		if err == nil {
-			success = true
-		} else {
-			last_error = err
-		}
-	}
-
-	if !success {
-		return fmt.Errorf("Failed to set network device priority: %s", last_error)
-	}
-
-	return nil
-}
-
-// Various state query functions
-func (c *containerLXC) IsStateful() bool {
-	return c.stateful
-}
-
-func (c *containerLXC) IsEphemeral() bool {
-	return c.ephemeral
-}
-
-func (c *containerLXC) IsFrozen() bool {
-	return c.State() == "FROZEN"
-}
-
-func (c *containerLXC) IsNesting() bool {
-	return shared.IsTrue(c.expandedConfig["security.nesting"])
-}
-
-func (c *containerLXC) isCurrentlyPrivileged() bool {
-	if !c.IsRunning() {
-		return c.IsPrivileged()
-	}
-
-	idmap, err := c.CurrentIdmap()
-	if err != nil {
-		return c.IsPrivileged()
-	}
-
-	return idmap == nil
-}
-
-func (c *containerLXC) IsPrivileged() bool {
-	return shared.IsTrue(c.expandedConfig["security.privileged"])
-}
-
-func (c *containerLXC) IsRunning() bool {
-	state := c.State()
-	return state != "BROKEN" && state != "STOPPED"
-}
-
-func (c *containerLXC) IsSnapshot() bool {
-	return c.snapshot
-}
-
-// Various property query functions
-func (c *containerLXC) Architecture() int {
-	return c.architecture
-}
-
-func (c *containerLXC) CreationDate() time.Time {
-	return c.creationDate
-}
-func (c *containerLXC) LastUsedDate() time.Time {
-	return c.lastUsedDate
-}
-func (c *containerLXC) ExpandedConfig() map[string]string {
-	return c.expandedConfig
-}
-
-func (c *containerLXC) ExpandedDevices() config.Devices {
-	return c.expandedDevices
-}
-
-func (c *containerLXC) Id() int {
-	return c.id
-}
-
-func (c *containerLXC) InitPID() int {
-	// Load the go-lxc struct
-	err := c.initLXC(false)
-	if err != nil {
-		return -1
-	}
-
-	return c.c.InitPid()
-}
-
-func (c *containerLXC) LocalConfig() map[string]string {
-	return c.localConfig
-}
-
-func (c *containerLXC) LocalDevices() config.Devices {
-	return c.localDevices
-}
-
-func (c *containerLXC) CurrentIdmap() (*idmap.IdmapSet, error) {
-	jsonIdmap, ok := c.LocalConfig()["volatile.idmap.current"]
-	if !ok {
-		return c.DiskIdmap()
-	}
-
-	return idmapsetFromString(jsonIdmap)
-}
-
-func (c *containerLXC) DiskIdmap() (*idmap.IdmapSet, error) {
-	jsonIdmap, ok := c.LocalConfig()["volatile.last_state.idmap"]
-	if !ok {
-		return nil, nil
-	}
-
-	return idmapsetFromString(jsonIdmap)
-}
-
-func (c *containerLXC) NextIdmap() (*idmap.IdmapSet, error) {
-	jsonIdmap, ok := c.LocalConfig()["volatile.idmap.next"]
-	if !ok {
-		return c.CurrentIdmap()
-	}
-
-	return idmapsetFromString(jsonIdmap)
-}
-
-func (c *containerLXC) DaemonState() *state.State {
-	// FIXME: This function should go away, since the abstract container
-	//        interface should not be coupled with internal state details.
-	//        However this is not currently possible, because many
-	//        higher-level APIs use container variables as "implicit
-	//        handles" to database/OS state and then need a way to get a
-	//        reference to it.
-	return c.state
-}
-
-func (c *containerLXC) Location() string {
-	return c.node
-}
-
-func (c *containerLXC) Project() string {
-	return c.project
-}
-
-func (c *containerLXC) Name() string {
-	return c.name
-}
-
-func (c *containerLXC) Description() string {
-	return c.description
-}
-
-func (c *containerLXC) Profiles() []string {
-	return c.profiles
-}
-
-func (c *containerLXC) State() string {
-	state, err := c.getLxcState()
-	if err != nil {
-		return api.Error.String()
-	}
-	return state.String()
-}
-
-// Various container paths
-func (c *containerLXC) Path() string {
-	name := project.Prefix(c.Project(), c.Name())
-	return driver.ContainerPath(name, c.IsSnapshot())
-}
-
-func (c *containerLXC) DevicesPath() string {
-	name := project.Prefix(c.Project(), c.Name())
-	return shared.VarPath("devices", name)
-}
-
-func (c *containerLXC) ShmountsPath() string {
-	name := project.Prefix(c.Project(), c.Name())
-	return shared.VarPath("shmounts", name)
-}
-
-func (c *containerLXC) LogPath() string {
-	name := project.Prefix(c.Project(), c.Name())
-	return shared.LogPath(name)
-}
-
-func (c *containerLXC) LogFilePath() string {
-	return filepath.Join(c.LogPath(), "lxc.log")
-}
-
-func (c *containerLXC) ConsoleBufferLogPath() string {
-	return filepath.Join(c.LogPath(), "console.log")
-}
-
-func (c *containerLXC) RootfsPath() string {
-	return filepath.Join(c.Path(), "rootfs")
-}
-
-func (c *containerLXC) TemplatesPath() string {
-	return filepath.Join(c.Path(), "templates")
-}
-
-func (c *containerLXC) StatePath() string {
-	/* FIXME: backwards compatibility: we used to use Join(RootfsPath(),
-	 * "state"), which was bad. Let's just check to see if that directory
-	 * exists.
-	 */
-	oldStatePath := filepath.Join(c.RootfsPath(), "state")
-	if shared.IsDir(oldStatePath) {
-		return oldStatePath
-	}
-	return filepath.Join(c.Path(), "state")
-}
-
-func (c *containerLXC) StoragePool() (string, error) {
-	poolName, err := c.state.Cluster.ContainerPool(c.Project(), c.Name())
-	if err != nil {
-		return "", err
-	}
-
-	return poolName, nil
-}
-
-// Progress tracking
-func (c *containerLXC) SetOperation(op *operation) {
-	c.op = op
-}
-
-func (c *containerLXC) ExpiryDate() time.Time {
-	if c.IsSnapshot() {
-		return c.expiryDate
-	}
-
-	// Return zero time if the container is not a snapshot
-	return time.Time{}
-}
-
-func (c *containerLXC) updateProgress(progress string) {
-	if c.op == nil {
-		return
-	}
-
-	meta := c.op.metadata
-	if meta == nil {
-		meta = make(map[string]interface{})
-	}
-
-	if meta["container_progress"] != progress {
-		meta["container_progress"] = progress
-		c.op.UpdateMetadata(meta)
-	}
-}
-
-// Internal MAAS handling
-func (c *containerLXC) maasInterfaces(devices map[string]map[string]string) ([]maas.ContainerInterface, error) {
-	interfaces := []maas.ContainerInterface{}
-	for k, m := range devices {
-		if m["type"] != "nic" {
-			continue
-		}
-
-		if m["maas.subnet.ipv4"] == "" && m["maas.subnet.ipv6"] == "" {
-			continue
-		}
-
-		m, err := c.fillNetworkDevice(k, m)
-		if err != nil {
-			return nil, err
-		}
-
-		subnets := []maas.ContainerInterfaceSubnet{}
-
-		// IPv4
-		if m["maas.subnet.ipv4"] != "" {
-			subnet := maas.ContainerInterfaceSubnet{
-				Name:    m["maas.subnet.ipv4"],
-				Address: m["ipv4.address"],
-			}
-
-			subnets = append(subnets, subnet)
-		}
-
-		// IPv6
-		if m["maas.subnet.ipv6"] != "" {
-			subnet := maas.ContainerInterfaceSubnet{
-				Name:    m["maas.subnet.ipv6"],
-				Address: m["ipv6.address"],
-			}
-
-			subnets = append(subnets, subnet)
-		}
-
-		iface := maas.ContainerInterface{
-			Name:       m["name"],
-			MACAddress: m["hwaddr"],
-			Subnets:    subnets,
-		}
-
-		interfaces = append(interfaces, iface)
-	}
-
-	return interfaces, nil
-}
-
-func (c *containerLXC) maasUpdate(oldDevices map[string]map[string]string) error {
-	// Check if MAAS is configured
-	maasURL, err := cluster.ConfigGetString(c.state.Cluster, "maas.api.url")
-	if err != nil {
-		return err
-	}
-
-	if maasURL == "" {
-		return nil
-	}
-
-	// Check if there's something that uses MAAS
-	interfaces, err := c.maasInterfaces(c.expandedDevices.CloneNative())
-	if err != nil {
-		return err
-	}
-
-	var oldInterfaces []maas.ContainerInterface
-	if oldDevices != nil {
-		oldInterfaces, err = c.maasInterfaces(oldDevices)
-		if err != nil {
-			return err
-		}
-	}
-
-	if len(interfaces) == 0 && len(oldInterfaces) == 0 {
-		return nil
-	}
-
-	// See if we're connected to MAAS
-	if c.state.MAAS == nil {
-		return fmt.Errorf("Can't perform the operation because MAAS is currently unavailable")
-	}
-
-	exists, err := c.state.MAAS.DefinedContainer(project.Prefix(c.project, c.name))
-	if err != nil {
-		return err
-	}
-
-	if exists {
-		if len(interfaces) == 0 && len(oldInterfaces) > 0 {
-			return c.state.MAAS.DeleteContainer(project.Prefix(c.project, c.name))
-		}
-
-		return c.state.MAAS.UpdateContainer(project.Prefix(c.project, c.name), interfaces)
-	}
-
-	return c.state.MAAS.CreateContainer(project.Prefix(c.project, c.name), interfaces)
-}
-
-func (c *containerLXC) maasRename(newName string) error {
-	maasURL, err := cluster.ConfigGetString(c.state.Cluster, "maas.api.url")
-	if err != nil {
-		return err
-	}
-
-	if maasURL == "" {
-		return nil
-	}
-
-	interfaces, err := c.maasInterfaces(c.expandedDevices.CloneNative())
-	if err != nil {
-		return err
-	}
-
-	if len(interfaces) == 0 {
-		return nil
-	}
-
-	if c.state.MAAS == nil {
-		return fmt.Errorf("Can't perform the operation because MAAS is currently unavailable")
-	}
-
-	exists, err := c.state.MAAS.DefinedContainer(project.Prefix(c.project, c.name))
-	if err != nil {
-		return err
-	}
-
-	if !exists {
-		return c.maasUpdate(nil)
-	}
-
-	return c.state.MAAS.RenameContainer(project.Prefix(c.project, c.name), project.Prefix(c.project, newName))
-}
-
-func (c *containerLXC) maasDelete() error {
-	maasURL, err := cluster.ConfigGetString(c.state.Cluster, "maas.api.url")
-	if err != nil {
-		return err
-	}
-
-	if maasURL == "" {
-		return nil
-	}
-
-	interfaces, err := c.maasInterfaces(c.expandedDevices.CloneNative())
-	if err != nil {
-		return err
-	}
-
-	if len(interfaces) == 0 {
-		return nil
-	}
-
-	if c.state.MAAS == nil {
-		return fmt.Errorf("Can't perform the operation because MAAS is currently unavailable")
-	}
-
-	exists, err := c.state.MAAS.DefinedContainer(project.Prefix(c.project, c.name))
-	if err != nil {
-		return err
-	}
-
-	if !exists {
-		return nil
-	}
-
-	return c.state.MAAS.DeleteContainer(project.Prefix(c.project, c.name))
-}

From 7dff3c442224b94c3667c5b6349aef7793f5de13 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Wed, 25 Sep 2019 11:10:36 +0100
Subject: [PATCH 16/72] lxd/container/lxc/utils: Moved to instance package

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/container_lxc_utils.go | 30 ------------------------------
 1 file changed, 30 deletions(-)
 delete mode 100644 lxd/container_lxc_utils.go

diff --git a/lxd/container_lxc_utils.go b/lxd/container_lxc_utils.go
deleted file mode 100644
index 9da660f398..0000000000
--- a/lxd/container_lxc_utils.go
+++ /dev/null
@@ -1,30 +0,0 @@
-package main
-
-import (
-	"encoding/json"
-
-	"github.com/lxc/lxd/shared/idmap"
-)
-
-func idmapsetFromString(idmapString string) (*idmap.IdmapSet, error) {
-	lastIdmap := new(idmap.IdmapSet)
-	err := json.Unmarshal([]byte(idmapString), &lastIdmap.Idmap)
-	if err != nil {
-		return nil, err
-	}
-
-	if len(lastIdmap.Idmap) == 0 {
-		return nil, nil
-	}
-
-	return lastIdmap, nil
-}
-
-func idmapsetToJSON(idmapSet *idmap.IdmapSet) (string, error) {
-	idmapBytes, err := json.Marshal(idmapSet.Idmap)
-	if err != nil {
-		return "", err
-	}
-
-	return string(idmapBytes), nil
-}

From a6aedb6e2ea79c89e32107e17e96983edc800d32 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Wed, 25 Sep 2019 11:10:53 +0100
Subject: [PATCH 17/72] lxd/appamor: Moved to instance package

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/apparmor.go | 788 ------------------------------------------------
 1 file changed, 788 deletions(-)
 delete mode 100644 lxd/apparmor.go

diff --git a/lxd/apparmor.go b/lxd/apparmor.go
deleted file mode 100644
index 46037ff32f..0000000000
--- a/lxd/apparmor.go
+++ /dev/null
@@ -1,788 +0,0 @@
-package main
-
-import (
-	"crypto/sha256"
-	"fmt"
-	"io"
-	"io/ioutil"
-	"os"
-	"path"
-	"strings"
-
-	"github.com/lxc/lxd/lxd/project"
-	"github.com/lxc/lxd/shared"
-	"github.com/lxc/lxd/shared/logger"
-
-	log "github.com/lxc/lxd/shared/log15"
-)
-
-const (
-	APPARMOR_CMD_LOAD   = "r"
-	APPARMOR_CMD_UNLOAD = "R"
-	APPARMOR_CMD_PARSE  = "Q"
-)
-
-var aaPath = shared.VarPath("security", "apparmor")
-
-const AA_PROFILE_BASE = `
-  ### Base profile
-  capability,
-  dbus,
-  file,
-  network,
-  umount,
-
-  # Hide common denials
-  deny mount options=(ro, remount) -> /,
-  deny mount options=(ro, remount, silent) -> /,
-
-  # Allow normal signal handling
-  signal (receive),
-  signal peer=@{profile_name},
-
-  # Allow normal process handling
-  ptrace (readby),
-  ptrace (tracedby),
-  ptrace peer=@{profile_name},
-
-  # Handle binfmt
-  mount fstype=binfmt_misc -> /proc/sys/fs/binfmt_misc/,
-  deny /proc/sys/fs/binfmt_misc/{,**} rwklx,
-
-  # Handle cgroupfs
-  mount options=(ro, nosuid, nodev, noexec, remount, strictatime) -> /sys/fs/cgroup/,
-
-  # Handle debugfs
-  mount fstype=debugfs -> /sys/kernel/debug/,
-  deny /sys/kernel/debug/{,**} rwklx,
-
-  # Handle efivarfs
-  mount fstype=efivarfs -> /sys/firmware/efi/efivars/,
-  deny /sys/firmware/efi/efivars/{,**} rwklx,
-
-  # Handle fuse
-  mount fstype=fuse,
-  mount fstype=fuse.*,
-  mount fstype=fusectl -> /sys/fs/fuse/connections/,
-
-  # Handle hugetlbfs
-  mount fstype=hugetlbfs,
-
-  # Handle mqueue
-  mount fstype=mqueue,
-
-  # Handle proc
-  mount fstype=proc -> /proc/,
-  deny /proc/bus/** wklx,
-  deny /proc/kcore rwklx,
-  deny /proc/sysrq-trigger rwklx,
-  deny /proc/acpi/** rwklx,
-  deny /proc/sys/fs/** wklx,
-
-  # Handle securityfs (access handled separately)
-  mount fstype=securityfs -> /sys/kernel/security/,
-
-  # Handle sysfs (access handled below)
-  mount fstype=sysfs -> /sys/,
-  mount options=(rw, nosuid, nodev, noexec, remount) -> /sys/,
-
-  # Handle tmpfs
-  mount fstype=tmpfs,
-
-  # Allow limited modification of mount propagation
-  mount options=(rw,slave) -> /,
-  mount options=(rw,rslave) -> /,
-  mount options=(rw,shared) -> /,
-  mount options=(rw,rshared) -> /,
-  mount options=(rw,private) -> /,
-  mount options=(rw,rprivate) -> /,
-  mount options=(rw,unbindable) -> /,
-  mount options=(rw,runbindable) -> /,
-
-  # Allow various ro-bind-*re*-mounts
-  mount options=(ro,remount,bind) /[^spd]*{,/**},
-  mount options=(ro,remount,bind) /d[^e]*{,/**},
-  mount options=(ro,remount,bind) /de[^v]*{,/**},
-  mount options=(ro,remount,bind) /dev/.[^l]*{,/**},
-  mount options=(ro,remount,bind) /dev/.l[^x]*{,/**},
-  mount options=(ro,remount,bind) /dev/.lx[^c]*{,/**},
-  mount options=(ro,remount,bind) /dev/.lxc?*{,/**},
-  mount options=(ro,remount,bind) /dev/[^.]*{,/**},
-  mount options=(ro,remount,bind) /dev?*{,/**},
-  mount options=(ro,remount,bind) /p[^r]*{,/**},
-  mount options=(ro,remount,bind) /pr[^o]*{,/**},
-  mount options=(ro,remount,bind) /pro[^c]*{,/**},
-  mount options=(ro,remount,bind) /proc?*{,/**},
-  mount options=(ro,remount,bind) /s[^y]*{,/**},
-  mount options=(ro,remount,bind) /sy[^s]*{,/**},
-  mount options=(ro,remount,bind) /sys?*{,/**},
-
-  mount options=(ro,remount,bind,nodev) /[^spd]*{,/**},
-  mount options=(ro,remount,bind,nodev) /d[^e]*{,/**},
-  mount options=(ro,remount,bind,nodev) /de[^v]*{,/**},
-  mount options=(ro,remount,bind,nodev) /dev/.[^l]*{,/**},
-  mount options=(ro,remount,bind,nodev) /dev/.l[^x]*{,/**},
-  mount options=(ro,remount,bind,nodev) /dev/.lx[^c]*{,/**},
-  mount options=(ro,remount,bind,nodev) /dev/.lxc?*{,/**},
-  mount options=(ro,remount,bind,nodev) /dev/[^.]*{,/**},
-  mount options=(ro,remount,bind,nodev) /dev?*{,/**},
-  mount options=(ro,remount,bind,nodev) /p[^r]*{,/**},
-  mount options=(ro,remount,bind,nodev) /pr[^o]*{,/**},
-  mount options=(ro,remount,bind,nodev) /pro[^c]*{,/**},
-  mount options=(ro,remount,bind,nodev) /proc?*{,/**},
-  mount options=(ro,remount,bind,nodev) /s[^y]*{,/**},
-  mount options=(ro,remount,bind,nodev) /sy[^s]*{,/**},
-  mount options=(ro,remount,bind,nodev) /sys?*{,/**},
-
-  mount options=(ro,remount,bind,nodev,nosuid) /[^spd]*{,/**},
-  mount options=(ro,remount,bind,nodev,nosuid) /d[^e]*{,/**},
-  mount options=(ro,remount,bind,nodev,nosuid) /de[^v]*{,/**},
-  mount options=(ro,remount,bind,nodev,nosuid) /dev/.[^l]*{,/**},
-  mount options=(ro,remount,bind,nodev,nosuid) /dev/.l[^x]*{,/**},
-  mount options=(ro,remount,bind,nodev,nosuid) /dev/.lx[^c]*{,/**},
-  mount options=(ro,remount,bind,nodev,nosuid) /dev/.lxc?*{,/**},
-  mount options=(ro,remount,bind,nodev,nosuid) /dev/[^.]*{,/**},
-  mount options=(ro,remount,bind,nodev,nosuid) /dev?*{,/**},
-  mount options=(ro,remount,bind,nodev,nosuid) /p[^r]*{,/**},
-  mount options=(ro,remount,bind,nodev,nosuid) /pr[^o]*{,/**},
-  mount options=(ro,remount,bind,nodev,nosuid) /pro[^c]*{,/**},
-  mount options=(ro,remount,bind,nodev,nosuid) /proc?*{,/**},
-  mount options=(ro,remount,bind,nodev,nosuid) /s[^y]*{,/**},
-  mount options=(ro,remount,bind,nodev,nosuid) /sy[^s]*{,/**},
-  mount options=(ro,remount,bind,nodev,nosuid) /sys?*{,/**},
-
-  mount options=(ro,remount,bind,noexec) /[^spd]*{,/**},
-  mount options=(ro,remount,bind,noexec) /d[^e]*{,/**},
-  mount options=(ro,remount,bind,noexec) /de[^v]*{,/**},
-  mount options=(ro,remount,bind,noexec) /dev/.[^l]*{,/**},
-  mount options=(ro,remount,bind,noexec) /dev/.l[^x]*{,/**},
-  mount options=(ro,remount,bind,noexec) /dev/.lx[^c]*{,/**},
-  mount options=(ro,remount,bind,noexec) /dev/.lxc?*{,/**},
-  mount options=(ro,remount,bind,noexec) /dev/[^.]*{,/**},
-  mount options=(ro,remount,bind,noexec) /dev?*{,/**},
-  mount options=(ro,remount,bind,noexec) /p[^r]*{,/**},
-  mount options=(ro,remount,bind,noexec) /pr[^o]*{,/**},
-  mount options=(ro,remount,bind,noexec) /pro[^c]*{,/**},
-  mount options=(ro,remount,bind,noexec) /proc?*{,/**},
-  mount options=(ro,remount,bind,noexec) /s[^y]*{,/**},
-  mount options=(ro,remount,bind,noexec) /sy[^s]*{,/**},
-  mount options=(ro,remount,bind,noexec) /sys?*{,/**},
-
-  mount options=(ro,remount,bind,noexec,nodev) /[^spd]*{,/**},
-  mount options=(ro,remount,bind,noexec,nodev) /d[^e]*{,/**},
-  mount options=(ro,remount,bind,noexec,nodev) /de[^v]*{,/**},
-  mount options=(ro,remount,bind,noexec,nodev) /dev/.[^l]*{,/**},
-  mount options=(ro,remount,bind,noexec,nodev) /dev/.l[^x]*{,/**},
-  mount options=(ro,remount,bind,noexec,nodev) /dev/.lx[^c]*{,/**},
-  mount options=(ro,remount,bind,noexec,nodev) /dev/.lxc?*{,/**},
-  mount options=(ro,remount,bind,noexec,nodev) /dev/[^.]*{,/**},
-  mount options=(ro,remount,bind,noexec,nodev) /dev?*{,/**},
-  mount options=(ro,remount,bind,noexec,nodev) /p[^r]*{,/**},
-  mount options=(ro,remount,bind,noexec,nodev) /pr[^o]*{,/**},
-  mount options=(ro,remount,bind,noexec,nodev) /pro[^c]*{,/**},
-  mount options=(ro,remount,bind,noexec,nodev) /proc?*{,/**},
-  mount options=(ro,remount,bind,noexec,nodev) /s[^y]*{,/**},
-  mount options=(ro,remount,bind,noexec,nodev) /sy[^s]*{,/**},
-  mount options=(ro,remount,bind,noexec,nodev) /sys?*{,/**},
-
-  mount options=(ro,remount,bind,nosuid) /[^spd]*{,/**},
-  mount options=(ro,remount,bind,nosuid) /d[^e]*{,/**},
-  mount options=(ro,remount,bind,nosuid) /de[^v]*{,/**},
-  mount options=(ro,remount,bind,nosuid) /dev/.[^l]*{,/**},
-  mount options=(ro,remount,bind,nosuid) /dev/.l[^x]*{,/**},
-  mount options=(ro,remount,bind,nosuid) /dev/.lx[^c]*{,/**},
-  mount options=(ro,remount,bind,nosuid) /dev/.lxc?*{,/**},
-  mount options=(ro,remount,bind,nosuid) /dev/[^.]*{,/**},
-  mount options=(ro,remount,bind,nosuid) /dev?*{,/**},
-  mount options=(ro,remount,bind,nosuid) /p[^r]*{,/**},
-  mount options=(ro,remount,bind,nosuid) /pr[^o]*{,/**},
-  mount options=(ro,remount,bind,nosuid) /pro[^c]*{,/**},
-  mount options=(ro,remount,bind,nosuid) /proc?*{,/**},
-  mount options=(ro,remount,bind,nosuid) /s[^y]*{,/**},
-  mount options=(ro,remount,bind,nosuid) /sy[^s]*{,/**},
-  mount options=(ro,remount,bind,nosuid) /sys?*{,/**},
-
-  mount options=(ro,remount,bind,nosuid,nodev) /[^spd]*{,/**},
-  mount options=(ro,remount,bind,nosuid,nodev) /d[^e]*{,/**},
-  mount options=(ro,remount,bind,nosuid,nodev) /de[^v]*{,/**},
-  mount options=(ro,remount,bind,nosuid,nodev) /dev/.[^l]*{,/**},
-  mount options=(ro,remount,bind,nosuid,nodev) /dev/.l[^x]*{,/**},
-  mount options=(ro,remount,bind,nosuid,nodev) /dev/.lx[^c]*{,/**},
-  mount options=(ro,remount,bind,nosuid,nodev) /dev/.lxc?*{,/**},
-  mount options=(ro,remount,bind,nosuid,nodev) /dev/[^.]*{,/**},
-  mount options=(ro,remount,bind,nosuid,nodev) /dev?*{,/**},
-  mount options=(ro,remount,bind,nosuid,nodev) /p[^r]*{,/**},
-  mount options=(ro,remount,bind,nosuid,nodev) /pr[^o]*{,/**},
-  mount options=(ro,remount,bind,nosuid,nodev) /pro[^c]*{,/**},
-  mount options=(ro,remount,bind,nosuid,nodev) /proc?*{,/**},
-  mount options=(ro,remount,bind,nosuid,nodev) /s[^y]*{,/**},
-  mount options=(ro,remount,bind,nosuid,nodev) /sy[^s]*{,/**},
-  mount options=(ro,remount,bind,nosuid,nodev) /sys?*{,/**},
-
-  mount options=(ro,remount,bind,nosuid,noexec) /[^spd]*{,/**},
-  mount options=(ro,remount,bind,nosuid,noexec) /d[^e]*{,/**},
-  mount options=(ro,remount,bind,nosuid,noexec) /de[^v]*{,/**},
-  mount options=(ro,remount,bind,nosuid,noexec) /dev/.[^l]*{,/**},
-  mount options=(ro,remount,bind,nosuid,noexec) /dev/.l[^x]*{,/**},
-  mount options=(ro,remount,bind,nosuid,noexec) /dev/.lx[^c]*{,/**},
-  mount options=(ro,remount,bind,nosuid,noexec) /dev/.lxc?*{,/**},
-  mount options=(ro,remount,bind,nosuid,noexec) /dev/[^.]*{,/**},
-  mount options=(ro,remount,bind,nosuid,noexec) /dev?*{,/**},
-  mount options=(ro,remount,bind,nosuid,noexec) /p[^r]*{,/**},
-  mount options=(ro,remount,bind,nosuid,noexec) /pr[^o]*{,/**},
-  mount options=(ro,remount,bind,nosuid,noexec) /pro[^c]*{,/**},
-  mount options=(ro,remount,bind,nosuid,noexec) /proc?*{,/**},
-  mount options=(ro,remount,bind,nosuid,noexec) /s[^y]*{,/**},
-  mount options=(ro,remount,bind,nosuid,noexec) /sy[^s]*{,/**},
-  mount options=(ro,remount,bind,nosuid,noexec) /sys?*{,/**},
-
-  mount options=(ro,remount,bind,nosuid,noexec,nodev) /[^spd]*{,/**},
-  mount options=(ro,remount,bind,nosuid,noexec,nodev) /d[^e]*{,/**},
-  mount options=(ro,remount,bind,nosuid,noexec,nodev) /de[^v]*{,/**},
-  mount options=(ro,remount,bind,nosuid,noexec,nodev) /dev/.[^l]*{,/**},
-  mount options=(ro,remount,bind,nosuid,noexec,nodev) /dev/.l[^x]*{,/**},
-  mount options=(ro,remount,bind,nosuid,noexec,nodev) /dev/.lx[^c]*{,/**},
-  mount options=(ro,remount,bind,nosuid,noexec,nodev) /dev/.lxc?*{,/**},
-  mount options=(ro,remount,bind,nosuid,noexec,nodev) /dev/[^.]*{,/**},
-  mount options=(ro,remount,bind,nosuid,noexec,nodev) /dev?*{,/**},
-  mount options=(ro,remount,bind,nosuid,noexec,nodev) /p[^r]*{,/**},
-  mount options=(ro,remount,bind,nosuid,noexec,nodev) /pr[^o]*{,/**},
-  mount options=(ro,remount,bind,nosuid,noexec,nodev) /pro[^c]*{,/**},
-  mount options=(ro,remount,bind,nosuid,noexec,nodev) /proc?*{,/**},
-  mount options=(ro,remount,bind,nosuid,noexec,nodev) /s[^y]*{,/**},
-  mount options=(ro,remount,bind,nosuid,noexec,nodev) /sy[^s]*{,/**},
-  mount options=(ro,remount,bind,nosuid,noexec,nodev) /sys?*{,/**},
-
-  mount options=(ro,remount,bind,nosuid,noexec,strictatime) /[^spd]*{,/**},
-  mount options=(ro,remount,bind,nosuid,noexec,strictatime) /d[^e]*{,/**},
-  mount options=(ro,remount,bind,nosuid,noexec,strictatime) /de[^v]*{,/**},
-  mount options=(ro,remount,bind,nosuid,noexec,strictatime) /dev/.[^l]*{,/**},
-  mount options=(ro,remount,bind,nosuid,noexec,strictatime) /dev/.l[^x]*{,/**},
-  mount options=(ro,remount,bind,nosuid,noexec,strictatime) /dev/.lx[^c]*{,/**},
-  mount options=(ro,remount,bind,nosuid,noexec,strictatime) /dev/.lxc?*{,/**},
-  mount options=(ro,remount,bind,nosuid,noexec,strictatime) /dev/[^.]*{,/**},
-  mount options=(ro,remount,bind,nosuid,noexec,strictatime) /dev?*{,/**},
-  mount options=(ro,remount,bind,nosuid,noexec,strictatime) /p[^r]*{,/**},
-  mount options=(ro,remount,bind,nosuid,noexec,strictatime) /pr[^o]*{,/**},
-  mount options=(ro,remount,bind,nosuid,noexec,strictatime) /pro[^c]*{,/**},
-  mount options=(ro,remount,bind,nosuid,noexec,strictatime) /proc?*{,/**},
-  mount options=(ro,remount,bind,nosuid,noexec,strictatime) /s[^y]*{,/**},
-  mount options=(ro,remount,bind,nosuid,noexec,strictatime) /sy[^s]*{,/**},
-  mount options=(ro,remount,bind,nosuid,noexec,strictatime) /sys?*{,/**},
-
-  # Allow bind-mounts of anything except /proc, /sys and /dev/.lxc
-  mount options=(rw,bind) /[^spd]*{,/**},
-  mount options=(rw,bind) /d[^e]*{,/**},
-  mount options=(rw,bind) /de[^v]*{,/**},
-  mount options=(rw,bind) /dev/.[^l]*{,/**},
-  mount options=(rw,bind) /dev/.l[^x]*{,/**},
-  mount options=(rw,bind) /dev/.lx[^c]*{,/**},
-  mount options=(rw,bind) /dev/.lxc?*{,/**},
-  mount options=(rw,bind) /dev/[^.]*{,/**},
-  mount options=(rw,bind) /dev?*{,/**},
-  mount options=(rw,bind) /p[^r]*{,/**},
-  mount options=(rw,bind) /pr[^o]*{,/**},
-  mount options=(rw,bind) /pro[^c]*{,/**},
-  mount options=(rw,bind) /proc?*{,/**},
-  mount options=(rw,bind) /s[^y]*{,/**},
-  mount options=(rw,bind) /sy[^s]*{,/**},
-  mount options=(rw,bind) /sys?*{,/**},
-
-  # Allow rbind-mounts of anything except /, /dev, /proc and /sys
-  mount options=(rw,rbind) /[^spd]*{,/**},
-  mount options=(rw,rbind) /d[^e]*{,/**},
-  mount options=(rw,rbind) /de[^v]*{,/**},
-  mount options=(rw,rbind) /dev?*{,/**},
-  mount options=(rw,rbind) /p[^r]*{,/**},
-  mount options=(rw,rbind) /pr[^o]*{,/**},
-  mount options=(rw,rbind) /pro[^c]*{,/**},
-  mount options=(rw,rbind) /proc?*{,/**},
-  mount options=(rw,rbind) /s[^y]*{,/**},
-  mount options=(rw,rbind) /sy[^s]*{,/**},
-  mount options=(rw,rbind) /sys?*{,/**},
-
-  # Allow read-only bind-mounts of anything except /proc, /sys and /dev/.lxc
-  mount options=(ro,remount,bind) /[^spd]*{,/**},
-  mount options=(ro,remount,bind) /d[^e]*{,/**},
-  mount options=(ro,remount,bind) /de[^v]*{,/**},
-  mount options=(ro,remount,bind) /dev/.[^l]*{,/**},
-  mount options=(ro,remount,bind) /dev/.l[^x]*{,/**},
-  mount options=(ro,remount,bind) /dev/.lx[^c]*{,/**},
-  mount options=(ro,remount,bind) /dev/.lxc?*{,/**},
-  mount options=(ro,remount,bind) /dev/[^.]*{,/**},
-  mount options=(ro,remount,bind) /dev?*{,/**},
-  mount options=(ro,remount,bind) /p[^r]*{,/**},
-  mount options=(ro,remount,bind) /pr[^o]*{,/**},
-  mount options=(ro,remount,bind) /pro[^c]*{,/**},
-  mount options=(ro,remount,bind) /proc?*{,/**},
-  mount options=(ro,remount,bind) /s[^y]*{,/**},
-  mount options=(ro,remount,bind) /sy[^s]*{,/**},
-  mount options=(ro,remount,bind) /sys?*{,/**},
-
-  # Allow moving mounts except for /proc, /sys and /dev/.lxc
-  mount options=(rw,move) /[^spd]*{,/**},
-  mount options=(rw,move) /d[^e]*{,/**},
-  mount options=(rw,move) /de[^v]*{,/**},
-  mount options=(rw,move) /dev/.[^l]*{,/**},
-  mount options=(rw,move) /dev/.l[^x]*{,/**},
-  mount options=(rw,move) /dev/.lx[^c]*{,/**},
-  mount options=(rw,move) /dev/.lxc?*{,/**},
-  mount options=(rw,move) /dev/[^.]*{,/**},
-  mount options=(rw,move) /dev?*{,/**},
-  mount options=(rw,move) /p[^r]*{,/**},
-  mount options=(rw,move) /pr[^o]*{,/**},
-  mount options=(rw,move) /pro[^c]*{,/**},
-  mount options=(rw,move) /proc?*{,/**},
-  mount options=(rw,move) /s[^y]*{,/**},
-  mount options=(rw,move) /sy[^s]*{,/**},
-  mount options=(rw,move) /sys?*{,/**},
-
-  # Block dangerous paths under /proc/sys
-  deny /proc/sys/[^kn]*{,/**} wklx,
-  deny /proc/sys/k[^e]*{,/**} wklx,
-  deny /proc/sys/ke[^r]*{,/**} wklx,
-  deny /proc/sys/ker[^n]*{,/**} wklx,
-  deny /proc/sys/kern[^e]*{,/**} wklx,
-  deny /proc/sys/kerne[^l]*{,/**} wklx,
-  deny /proc/sys/kernel/[^smhd]*{,/**} wklx,
-  deny /proc/sys/kernel/d[^o]*{,/**} wklx,
-  deny /proc/sys/kernel/do[^m]*{,/**} wklx,
-  deny /proc/sys/kernel/dom[^a]*{,/**} wklx,
-  deny /proc/sys/kernel/doma[^i]*{,/**} wklx,
-  deny /proc/sys/kernel/domai[^n]*{,/**} wklx,
-  deny /proc/sys/kernel/domain[^n]*{,/**} wklx,
-  deny /proc/sys/kernel/domainn[^a]*{,/**} wklx,
-  deny /proc/sys/kernel/domainna[^m]*{,/**} wklx,
-  deny /proc/sys/kernel/domainnam[^e]*{,/**} wklx,
-  deny /proc/sys/kernel/domainname?*{,/**} wklx,
-  deny /proc/sys/kernel/h[^o]*{,/**} wklx,
-  deny /proc/sys/kernel/ho[^s]*{,/**} wklx,
-  deny /proc/sys/kernel/hos[^t]*{,/**} wklx,
-  deny /proc/sys/kernel/host[^n]*{,/**} wklx,
-  deny /proc/sys/kernel/hostn[^a]*{,/**} wklx,
-  deny /proc/sys/kernel/hostna[^m]*{,/**} wklx,
-  deny /proc/sys/kernel/hostnam[^e]*{,/**} wklx,
-  deny /proc/sys/kernel/hostname?*{,/**} wklx,
-  deny /proc/sys/kernel/m[^s]*{,/**} wklx,
-  deny /proc/sys/kernel/ms[^g]*{,/**} wklx,
-  deny /proc/sys/kernel/msg*/** wklx,
-  deny /proc/sys/kernel/s[^he]*{,/**} wklx,
-  deny /proc/sys/kernel/se[^m]*{,/**} wklx,
-  deny /proc/sys/kernel/sem*/** wklx,
-  deny /proc/sys/kernel/sh[^m]*{,/**} wklx,
-  deny /proc/sys/kernel/shm*/** wklx,
-  deny /proc/sys/kernel?*{,/**} wklx,
-  deny /proc/sys/n[^e]*{,/**} wklx,
-  deny /proc/sys/ne[^t]*{,/**} wklx,
-  deny /proc/sys/net?*{,/**} wklx,
-
-  # Block dangerous paths under /sys
-  deny /sys/[^fdck]*{,/**} wklx,
-  deny /sys/c[^l]*{,/**} wklx,
-  deny /sys/cl[^a]*{,/**} wklx,
-  deny /sys/cla[^s]*{,/**} wklx,
-  deny /sys/clas[^s]*{,/**} wklx,
-  deny /sys/class/[^n]*{,/**} wklx,
-  deny /sys/class/n[^e]*{,/**} wklx,
-  deny /sys/class/ne[^t]*{,/**} wklx,
-  deny /sys/class/net?*{,/**} wklx,
-  deny /sys/class?*{,/**} wklx,
-  deny /sys/d[^e]*{,/**} wklx,
-  deny /sys/de[^v]*{,/**} wklx,
-  deny /sys/dev[^i]*{,/**} wklx,
-  deny /sys/devi[^c]*{,/**} wklx,
-  deny /sys/devic[^e]*{,/**} wklx,
-  deny /sys/device[^s]*{,/**} wklx,
-  deny /sys/devices/[^v]*{,/**} wklx,
-  deny /sys/devices/v[^i]*{,/**} wklx,
-  deny /sys/devices/vi[^r]*{,/**} wklx,
-  deny /sys/devices/vir[^t]*{,/**} wklx,
-  deny /sys/devices/virt[^u]*{,/**} wklx,
-  deny /sys/devices/virtu[^a]*{,/**} wklx,
-  deny /sys/devices/virtua[^l]*{,/**} wklx,
-  deny /sys/devices/virtual/[^n]*{,/**} wklx,
-  deny /sys/devices/virtual/n[^e]*{,/**} wklx,
-  deny /sys/devices/virtual/ne[^t]*{,/**} wklx,
-  deny /sys/devices/virtual/net?*{,/**} wklx,
-  deny /sys/devices/virtual?*{,/**} wklx,
-  deny /sys/devices?*{,/**} wklx,
-  deny /sys/f[^s]*{,/**} wklx,
-  deny /sys/fs/[^c]*{,/**} wklx,
-  deny /sys/fs/c[^g]*{,/**} wklx,
-  deny /sys/fs/cg[^r]*{,/**} wklx,
-  deny /sys/fs/cgr[^o]*{,/**} wklx,
-  deny /sys/fs/cgro[^u]*{,/**} wklx,
-  deny /sys/fs/cgrou[^p]*{,/**} wklx,
-  deny /sys/fs/cgroup?*{,/**} wklx,
-  deny /sys/fs?*{,/**} wklx,
-`
-
-const AA_PROFILE_NESTING = `
-  pivot_root,
-
-  # Allow sending signals and tracing children namespaces
-  ptrace,
-  signal,
-
-  # Prevent access to hidden proc/sys mounts
-  deny /dev/.lxc/proc/** rw,
-  deny /dev/.lxc/sys/** rw,
-
-  # Allow mounting proc and sysfs in the container
-  mount fstype=proc -> /usr/lib/*/lxc/**,
-  mount fstype=sysfs -> /usr/lib/*/lxc/**,
-
-  # Allow nested LXD
-  mount none -> /var/lib/lxd/shmounts/,
-  mount /var/lib/lxd/shmounts/ -> /var/lib/lxd/shmounts/,
-  mount options=bind /var/lib/lxd/shmounts/** -> /var/lib/lxd/**,
-
-  # FIXME: There doesn't seem to be a way to ask for:
-  # mount options=(ro,nosuid,nodev,noexec,remount,bind),
-  # as we always get mount to $cdir/proc/sys with those flags denied
-  # So allow all mounts until that is straightened out:
-  mount,
-`
-
-const AA_PROFILE_UNPRIVILEGED = `
-  pivot_root,
-
-  # Allow modifying mount propagation
-  mount options=(rw,slave) -> **,
-  mount options=(rw,rslave) -> **,
-  mount options=(rw,shared) -> **,
-  mount options=(rw,rshared) -> **,
-  mount options=(rw,private) -> **,
-  mount options=(rw,rprivate) -> **,
-  mount options=(rw,unbindable) -> **,
-  mount options=(rw,runbindable) -> **,
-
-  # Allow all bind-mounts
-  mount options=(rw,bind) / -> /**,
-  mount options=(rw,bind) /** -> /**,
-  mount options=(rw,rbind) / -> /**,
-  mount options=(rw,rbind) /** -> /**,
-
-  # Allow common combinations of bind/remount
-  # NOTE: AppArmor bug effectively turns those into wildcards mount allow
-  mount options=(ro,remount,bind),
-  mount options=(ro,remount,bind,nodev),
-  mount options=(ro,remount,bind,nodev,nosuid),
-  mount options=(ro,remount,bind,noexec),
-  mount options=(ro,remount,bind,noexec,nodev),
-  mount options=(ro,remount,bind,nosuid),
-  mount options=(ro,remount,bind,nosuid,nodev),
-  mount options=(ro,remount,bind,nosuid,noexec),
-  mount options=(ro,remount,bind,nosuid,noexec,nodev),
-  mount options=(ro,remount,bind,nosuid,noexec,strictatime),
-
-  # Allow remounting things read-only
-  mount options=(ro,remount) /,
-  mount options=(ro,remount) /**,
-`
-
-func mkApparmorName(name string) string {
-	if len(name)+7 >= 253 {
-		hash := sha256.New()
-		io.WriteString(hash, name)
-		return fmt.Sprintf("%x", hash.Sum(nil))
-	}
-
-	return name
-}
-
-func AANamespace(c container) string {
-	/* / is not allowed in apparmor namespace names; let's also trim the
-	 * leading / so it doesn't look like "-var-lib-lxd"
-	 */
-	lxddir := strings.Replace(strings.Trim(shared.VarPath(""), "/"), "/", "-", -1)
-	lxddir = mkApparmorName(lxddir)
-	name := project.Prefix(c.Project(), c.Name())
-	return fmt.Sprintf("lxd-%s_<%s>", name, lxddir)
-}
-
-func AAProfileFull(c container) string {
-	lxddir := shared.VarPath("")
-	lxddir = mkApparmorName(lxddir)
-	name := project.Prefix(c.Project(), c.Name())
-	return fmt.Sprintf("lxd-%s_<%s>", name, lxddir)
-}
-
-func AAProfileShort(c container) string {
-	name := project.Prefix(c.Project(), c.Name())
-	return fmt.Sprintf("lxd-%s", name)
-}
-
-// getProfileContent generates the apparmor profile template from the given
-// container. This includes the stock lxc includes as well as stuff from
-// raw.apparmor.
-func getAAProfileContent(c container) string {
-	profile := strings.TrimLeft(AA_PROFILE_BASE, "\n")
-
-	// Apply new features
-	if aaParserSupports("unix") {
-		profile += `
-  ### Feature: unix
-  # Allow receive via unix sockets from anywhere
-  unix (receive),
-
-  # Allow all unix in the container
-  unix peer=(label=@{profile_name}),
-`
-	}
-
-	// Apply cgns bits
-	if shared.PathExists("/proc/self/ns/cgroup") {
-		profile += "\n  ### Feature: cgroup namespace\n"
-		profile += "  mount fstype=cgroup -> /sys/fs/cgroup/**,\n"
-		profile += "  mount fstype=cgroup2 -> /sys/fs/cgroup/**,\n"
-	}
-
-	state := c.DaemonState()
-	if state.OS.AppArmorStacking && !state.OS.AppArmorStacked {
-		profile += "\n  ### Feature: apparmor stacking\n"
-		profile += `  ### Configuration: apparmor profile loading (in namespace)
-  deny /sys/k[^e]*{,/**} wklx,
-  deny /sys/ke[^r]*{,/**} wklx,
-  deny /sys/ker[^n]*{,/**} wklx,
-  deny /sys/kern[^e]*{,/**} wklx,
-  deny /sys/kerne[^l]*{,/**} wklx,
-  deny /sys/kernel/[^s]*{,/**} wklx,
-  deny /sys/kernel/s[^e]*{,/**} wklx,
-  deny /sys/kernel/se[^c]*{,/**} wklx,
-  deny /sys/kernel/sec[^u]*{,/**} wklx,
-  deny /sys/kernel/secu[^r]*{,/**} wklx,
-  deny /sys/kernel/secur[^i]*{,/**} wklx,
-  deny /sys/kernel/securi[^t]*{,/**} wklx,
-  deny /sys/kernel/securit[^y]*{,/**} wklx,
-  deny /sys/kernel/security/[^a]*{,/**} wklx,
-  deny /sys/kernel/security/a[^p]*{,/**} wklx,
-  deny /sys/kernel/security/ap[^p]*{,/**} wklx,
-  deny /sys/kernel/security/app[^a]*{,/**} wklx,
-  deny /sys/kernel/security/appa[^r]*{,/**} wklx,
-  deny /sys/kernel/security/appar[^m]*{,/**} wklx,
-  deny /sys/kernel/security/apparm[^o]*{,/**} wklx,
-  deny /sys/kernel/security/apparmo[^r]*{,/**} wklx,
-  deny /sys/kernel/security/apparmor?*{,/**} wklx,
-  deny /sys/kernel/security?*{,/**} wklx,
-  deny /sys/kernel?*{,/**} wklx,
-`
-		profile += fmt.Sprintf("  change_profile -> \":%s:*\",\n", AANamespace(c))
-		profile += fmt.Sprintf("  change_profile -> \":%s://*\",\n", AANamespace(c))
-	} else {
-		profile += "\n  ### Feature: apparmor stacking (not present)\n"
-		profile += "  deny /sys/k*{,/**} wklx,\n"
-	}
-
-	if c.IsNesting() {
-		// Apply nesting bits
-		profile += "\n  ### Configuration: nesting\n"
-		profile += strings.TrimLeft(AA_PROFILE_NESTING, "\n")
-		if !state.OS.AppArmorStacking || state.OS.AppArmorStacked {
-			profile += fmt.Sprintf("  change_profile -> \"%s\",\n", AAProfileFull(c))
-		}
-	}
-
-	if !c.IsPrivileged() || state.OS.RunningInUserNS {
-		// Apply unprivileged bits
-		profile += "\n  ### Configuration: unprivileged containers\n"
-		profile += strings.TrimLeft(AA_PROFILE_UNPRIVILEGED, "\n")
-	}
-
-	// Append raw.apparmor
-	rawApparmor, ok := c.ExpandedConfig()["raw.apparmor"]
-	if ok {
-		profile += "\n  ### Configuration: raw.apparmor\n"
-		for _, line := range strings.Split(strings.Trim(rawApparmor, "\n"), "\n") {
-			profile += fmt.Sprintf("  %s\n", line)
-		}
-	}
-
-	return fmt.Sprintf(`#include <tunables/global>
-profile "%s" flags=(attach_disconnected,mediate_deleted) {
-%s
-}
-`, AAProfileFull(c), strings.Trim(profile, "\n"))
-}
-
-func runApparmor(command string, c container) error {
-	state := c.DaemonState()
-	if !state.OS.AppArmorAvailable {
-		return nil
-	}
-
-	output, err := shared.RunCommand("apparmor_parser", []string{
-		fmt.Sprintf("-%sWL", command),
-		path.Join(aaPath, "cache"),
-		path.Join(aaPath, "profiles", AAProfileShort(c)),
-	}...)
-
-	if err != nil {
-		logger.Error("Running apparmor",
-			log.Ctx{"action": command, "output": output, "err": err})
-	}
-
-	return err
-}
-
-func getAACacheDir() string {
-	basePath := path.Join(aaPath, "cache")
-
-	major, minor, _, err := getAAParserVersion()
-	if err != nil {
-		return basePath
-	}
-
-	// multiple policy cache directories were only added in v2.13
-	if major < 2 || (major == 2 && minor < 13) {
-		return basePath
-	}
-
-	output, err := shared.RunCommand("apparmor_parser", "-L", basePath, "--print-cache-dir")
-	if err != nil {
-		return basePath
-	}
-
-	return strings.TrimSpace(output)
-}
-
-func mkApparmorNamespace(c container, namespace string) error {
-	state := c.DaemonState()
-	if !state.OS.AppArmorStacking || state.OS.AppArmorStacked {
-		return nil
-	}
-
-	p := path.Join("/sys/kernel/security/apparmor/policy/namespaces", namespace)
-	if err := os.Mkdir(p, 0755); !os.IsExist(err) {
-		return err
-	}
-
-	return nil
-}
-
-// Ensure that the container's policy is loaded into the kernel so the
-// container can boot.
-func AALoadProfile(c container) error {
-	state := c.DaemonState()
-	if !state.OS.AppArmorAdmin {
-		return nil
-	}
-
-	if err := mkApparmorNamespace(c, AANamespace(c)); err != nil {
-		return err
-	}
-
-	/* In order to avoid forcing a profile parse (potentially slow) on
-	 * every container start, let's use apparmor's binary policy cache,
-	 * which checks mtime of the files to figure out if the policy needs to
-	 * be regenerated.
-	 *
-	 * Since it uses mtimes, we shouldn't just always write out our local
-	 * apparmor template; instead we should check to see whether the
-	 * template is the same as ours. If it isn't we should write our
-	 * version out so that the new changes are reflected and we definitely
-	 * force a recompile.
-	 */
-	profile := path.Join(aaPath, "profiles", AAProfileShort(c))
-	content, err := ioutil.ReadFile(profile)
-	if err != nil && !os.IsNotExist(err) {
-		return err
-	}
-
-	updated := getAAProfileContent(c)
-
-	if string(content) != string(updated) {
-		if err := os.MkdirAll(path.Join(aaPath, "cache"), 0700); err != nil {
-			return err
-		}
-
-		if err := os.MkdirAll(path.Join(aaPath, "profiles"), 0700); err != nil {
-			return err
-		}
-
-		if err := ioutil.WriteFile(profile, []byte(updated), 0600); err != nil {
-			return err
-		}
-	}
-
-	return runApparmor(APPARMOR_CMD_LOAD, c)
-}
-
-// Ensure that the container's policy namespace is unloaded to free kernel
-// memory. This does not delete the policy from disk or cache.
-func AADestroy(c container) error {
-	state := c.DaemonState()
-	if !state.OS.AppArmorAdmin {
-		return nil
-	}
-
-	if state.OS.AppArmorStacking && !state.OS.AppArmorStacked {
-		p := path.Join("/sys/kernel/security/apparmor/policy/namespaces", AANamespace(c))
-		if err := os.Remove(p); err != nil {
-			logger.Error("Error removing apparmor namespace", log.Ctx{"err": err, "ns": p})
-		}
-	}
-
-	return runApparmor(APPARMOR_CMD_UNLOAD, c)
-}
-
-// Parse the profile without loading it into the kernel.
-func AAParseProfile(c container) error {
-	state := c.DaemonState()
-	if !state.OS.AppArmorAvailable {
-		return nil
-	}
-
-	return runApparmor(APPARMOR_CMD_PARSE, c)
-}
-
-// Delete the policy from cache/disk.
-func AADeleteProfile(c container) {
-	state := c.DaemonState()
-	if !state.OS.AppArmorAdmin {
-		return
-	}
-
-	/* It's ok if these deletes fail: if the container was never started,
-	 * we'll have never written a profile or cached it.
-	 */
-	os.Remove(path.Join(getAACacheDir(), AAProfileShort(c)))
-	os.Remove(path.Join(aaPath, "profiles", AAProfileShort(c)))
-}
-
-func aaParserSupports(feature string) bool {
-	major, minor, micro, err := getAAParserVersion()
-	if err != nil {
-		return false
-	}
-
-	switch feature {
-	case "unix":
-		if major < 2 {
-			return false
-		}
-
-		if major == 2 && minor < 10 {
-			return false
-		}
-
-		if major == 2 && minor == 10 && micro < 95 {
-			return false
-		}
-	}
-
-	return true
-}
-
-func getAAParserVersion() (major int, minor int, micro int, err error) {
-	var out string
-
-	out, err = shared.RunCommand("apparmor_parser", "--version")
-	if err != nil {
-		return
-	}
-
-	_, err = fmt.Sscanf(strings.Split(out, "\n")[0], "AppArmor parser version %d.%d.%d", &major, &minor, &micro)
-
-	return
-}

From f8d8fb7b841dd040a375f63043055fbd3ec2b542 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Wed, 25 Sep 2019 11:11:29 +0100
Subject: [PATCH 18/72] lxd/storage/zfs/utils: Removes zfsIdmapSetSkipper

Moved to storage package.

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/storage_zfs_utils.go | 13 -------------
 1 file changed, 13 deletions(-)

diff --git a/lxd/storage_zfs_utils.go b/lxd/storage_zfs_utils.go
index 4a2d87091f..24b24ed8c0 100644
--- a/lxd/storage_zfs_utils.go
+++ b/lxd/storage_zfs_utils.go
@@ -818,16 +818,3 @@ func (s *storageZfs) doContainerCreate(projectName, name string, privileged bool
 	logger.Debugf("Created empty ZFS storage volume for container \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
 	return nil
 }
-
-func zfsIdmapSetSkipper(dir string, absPath string, fi os.FileInfo) bool {
-	strippedPath := absPath
-	if dir != "" {
-		strippedPath = absPath[len(dir):]
-	}
-
-	if fi.IsDir() && strippedPath == "/.zfs/snapshot" {
-		return true
-	}
-
-	return false
-}

From 7c5602cc5ed445ed8ceec36ac8c3845005aafa11 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Wed, 25 Sep 2019 11:12:52 +0100
Subject: [PATCH 19/72] lxd/storagr/zfs: Updates used of moved types in
 instance package

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/storage_zfs.go | 99 +++++++++++++++++++++++-----------------------
 1 file changed, 50 insertions(+), 49 deletions(-)

diff --git a/lxd/storage_zfs.go b/lxd/storage_zfs.go
index aff449d39a..8ff735a8ed 100644
--- a/lxd/storage_zfs.go
+++ b/lxd/storage_zfs.go
@@ -11,10 +11,13 @@ import (
 	"strings"
 
 	"github.com/gorilla/websocket"
+	"github.com/pborman/uuid"
 	"github.com/pkg/errors"
 	"golang.org/x/sys/unix"
 
+	"github.com/lxc/lxd/lxd/instance"
 	"github.com/lxc/lxd/lxd/migration"
+	"github.com/lxc/lxd/lxd/operation"
 	"github.com/lxc/lxd/lxd/project"
 	driver "github.com/lxc/lxd/lxd/storage"
 	"github.com/lxc/lxd/lxd/util"
@@ -23,8 +26,6 @@ import (
 	"github.com/lxc/lxd/shared/ioprogress"
 	"github.com/lxc/lxd/shared/logger"
 	"github.com/lxc/lxd/shared/units"
-
-	"github.com/pborman/uuid"
 )
 
 // Global defaults
@@ -49,8 +50,8 @@ func (s *storageZfs) getOnDiskPoolName() string {
 
 // Only initialize the minimal information we need about a given storage type.
 func (s *storageZfs) StorageCoreInit() error {
-	s.sType = storageTypeZfs
-	typeName, err := storageTypeToString(s.sType)
+	s.sType = instance.StorageTypeZfs
+	typeName, err := instance.StorageTypeToString(s.sType)
 	if err != nil {
 		return err
 	}
@@ -760,11 +761,11 @@ func (s *storageZfs) StoragePoolVolumeRename(newName string) error {
 }
 
 // Things we don't need to care about
-func (s *storageZfs) ContainerMount(c Instance) (bool, error) {
+func (s *storageZfs) ContainerMount(c instance.Instance) (bool, error) {
 	return s.doContainerMount(c.Project(), c.Name(), c.IsPrivileged())
 }
 
-func (s *storageZfs) ContainerUmount(c Instance, path string) (bool, error) {
+func (s *storageZfs) ContainerUmount(c instance.Instance, path string) (bool, error) {
 	logger.Debugf("Unmounting ZFS storage volume for container \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
 	name := c.Name()
 
@@ -809,13 +810,13 @@ func (s *storageZfs) ContainerUmount(c Instance, path string) (bool, error) {
 }
 
 // Things we do have to care about
-func (s *storageZfs) ContainerStorageReady(container Instance) bool {
+func (s *storageZfs) ContainerStorageReady(container instance.Instance) bool {
 	volumeName := project.Prefix(container.Project(), container.Name())
 	fs := fmt.Sprintf("containers/%s", volumeName)
 	return zfsFilesystemEntityExists(s.getOnDiskPoolName(), fs)
 }
 
-func (s *storageZfs) ContainerCreate(container Instance) error {
+func (s *storageZfs) ContainerCreate(container instance.Instance) error {
 	err := s.doContainerCreate(container.Project(), container.Name(), container.IsPrivileged())
 	if err != nil {
 		s.doContainerDelete(container.Project(), container.Name())
@@ -838,7 +839,7 @@ func (s *storageZfs) ContainerCreate(container Instance) error {
 	return nil
 }
 
-func (s *storageZfs) ContainerCreateFromImage(container Instance, fingerprint string, tracker *ioprogress.ProgressTracker) error {
+func (s *storageZfs) ContainerCreateFromImage(container instance.Instance, fingerprint string, tracker *ioprogress.ProgressTracker) error {
 	logger.Debugf("Creating ZFS storage volume for container \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
 
 	containerPath := container.Path()
@@ -916,7 +917,7 @@ func (s *storageZfs) ContainerCreateFromImage(container Instance, fingerprint st
 	return nil
 }
 
-func (s *storageZfs) ContainerDelete(container Instance) error {
+func (s *storageZfs) ContainerDelete(container instance.Instance) error {
 	err := s.doContainerDelete(container.Project(), container.Name())
 	if err != nil {
 		return err
@@ -925,7 +926,7 @@ func (s *storageZfs) ContainerDelete(container Instance) error {
 	return nil
 }
 
-func (s *storageZfs) copyWithoutSnapshotsSparse(target Instance, source Instance) error {
+func (s *storageZfs) copyWithoutSnapshotsSparse(target instance.Instance, source instance.Instance) error {
 	poolName := s.getOnDiskPoolName()
 
 	sourceContainerName := source.Name()
@@ -1027,7 +1028,7 @@ func (s *storageZfs) copyWithoutSnapshotsSparse(target Instance, source Instance
 	return nil
 }
 
-func (s *storageZfs) copyWithoutSnapshotFull(target Instance, source Instance) error {
+func (s *storageZfs) copyWithoutSnapshotFull(target instance.Instance, source instance.Instance) error {
 	logger.Debugf("Creating full ZFS copy \"%s\" to \"%s\"", source.Name(), target.Name())
 
 	sourceIsSnapshot := source.IsSnapshot()
@@ -1128,7 +1129,7 @@ func (s *storageZfs) copyWithoutSnapshotFull(target Instance, source Instance) e
 	return nil
 }
 
-func (s *storageZfs) copyWithSnapshots(target Instance, source Instance, parentSnapshot string) error {
+func (s *storageZfs) copyWithSnapshots(target instance.Instance, source instance.Instance, parentSnapshot string) error {
 	sourceName := source.Name()
 	targetParentName, targetSnapOnlyName, _ := shared.ContainerGetParentAndSnapshotName(target.Name())
 	containersPath := driver.GetSnapshotMountPoint(target.Project(), s.pool.Name, targetParentName)
@@ -1175,7 +1176,7 @@ func (s *storageZfs) copyWithSnapshots(target Instance, source Instance, parentS
 	return nil
 }
 
-func (s *storageZfs) doCrossPoolContainerCopy(target Instance, source Instance, containerOnly bool, refresh bool, refreshSnapshots []Instance) error {
+func (s *storageZfs) doCrossPoolContainerCopy(target instance.Instance, source instance.Instance, containerOnly bool, refresh bool, refreshSnapshots []instance.Instance) error {
 	sourcePool, err := source.StoragePool()
 	if err != nil {
 		return err
@@ -1200,7 +1201,7 @@ func (s *storageZfs) doCrossPoolContainerCopy(target Instance, source Instance,
 		return err
 	}
 
-	var snapshots []Instance
+	var snapshots []instance.Instance
 
 	if refresh {
 		snapshots = refreshSnapshots
@@ -1253,7 +1254,7 @@ func (s *storageZfs) doCrossPoolContainerCopy(target Instance, source Instance,
 	return nil
 }
 
-func (s *storageZfs) ContainerCopy(target Instance, source Instance, containerOnly bool) error {
+func (s *storageZfs) ContainerCopy(target instance.Instance, source instance.Instance, containerOnly bool) error {
 	logger.Debugf("Copying ZFS container storage %s to %s", source.Name(), target.Name())
 
 	ourStart, err := source.StorageStart()
@@ -1303,7 +1304,7 @@ func (s *storageZfs) ContainerCopy(target Instance, source Instance, containerOn
 				prev = snapshots[i-1].Name()
 			}
 
-			sourceSnapshot, err := instanceLoadByProjectAndName(s.s, source.Project(), snap.Name())
+			sourceSnapshot, err := instance.InstanceLoadByProjectAndName(s.s, source.Project(), snap.Name())
 			if err != nil {
 				return err
 			}
@@ -1311,7 +1312,7 @@ func (s *storageZfs) ContainerCopy(target Instance, source Instance, containerOn
 			_, snapOnlyName, _ := shared.ContainerGetParentAndSnapshotName(snap.Name())
 			prevSnapOnlyName = snapOnlyName
 			newSnapName := fmt.Sprintf("%s/%s", target.Name(), snapOnlyName)
-			targetSnapshot, err := instanceLoadByProjectAndName(s.s, target.Project(), newSnapName)
+			targetSnapshot, err := instance.InstanceLoadByProjectAndName(s.s, target.Project(), newSnapName)
 			if err != nil {
 				return err
 			}
@@ -1380,7 +1381,7 @@ func (s *storageZfs) ContainerCopy(target Instance, source Instance, containerOn
 	return nil
 }
 
-func (s *storageZfs) ContainerRefresh(target Instance, source Instance, snapshots []Instance) error {
+func (s *storageZfs) ContainerRefresh(target instance.Instance, source instance.Instance, snapshots []instance.Instance) error {
 	logger.Debugf("Refreshing ZFS container storage for %s from %s", target.Name(), source.Name())
 
 	ourStart, err := source.StorageStart()
@@ -1394,7 +1395,7 @@ func (s *storageZfs) ContainerRefresh(target Instance, source Instance, snapshot
 	return s.doCrossPoolContainerCopy(target, source, len(snapshots) == 0, true, snapshots)
 }
 
-func (s *storageZfs) ContainerRename(container Instance, newName string) error {
+func (s *storageZfs) ContainerRename(container instance.Instance, newName string) error {
 	logger.Debugf("Renaming ZFS storage volume for container \"%s\" from %s to %s", s.volume.Name, s.volume.Name, newName)
 
 	poolName := s.getOnDiskPoolName()
@@ -1429,7 +1430,7 @@ func (s *storageZfs) ContainerRename(container Instance, newName string) error {
 	}
 
 	// Unmount the dataset.
-	container.(*containerLXC).name = newName
+	container.(*instance.ContainerLXC).SetName(newName)
 	_, err = s.ContainerUmount(container, "")
 	if err != nil {
 		return err
@@ -1478,7 +1479,7 @@ func (s *storageZfs) ContainerRename(container Instance, newName string) error {
 	return nil
 }
 
-func (s *storageZfs) ContainerRestore(target Instance, source Instance) error {
+func (s *storageZfs) ContainerRestore(target instance.Instance, source instance.Instance) error {
 	logger.Debugf("Restoring ZFS storage volume for container \"%s\" from %s to %s", s.volume.Name, source.Name(), target.Name())
 
 	snaps, err := target.Snapshots()
@@ -1542,7 +1543,7 @@ func (s *storageZfs) ContainerRestore(target Instance, source Instance) error {
 	return nil
 }
 
-func (s *storageZfs) ContainerGetUsage(container Instance) (int64, error) {
+func (s *storageZfs) ContainerGetUsage(container instance.Instance) (int64, error) {
 	var err error
 
 	fs := fmt.Sprintf("containers/%s", project.Prefix(container.Project(), container.Name()))
@@ -1621,7 +1622,7 @@ func (s *storageZfs) doContainerSnapshotCreate(projectName, targetName string, s
 	return nil
 }
 
-func (s *storageZfs) ContainerSnapshotCreate(snapshotContainer Instance, sourceContainer Instance) error {
+func (s *storageZfs) ContainerSnapshotCreate(snapshotContainer instance.Instance, sourceContainer instance.Instance) error {
 	err := s.doContainerSnapshotCreate(sourceContainer.Project(), snapshotContainer.Name(), sourceContainer.Name())
 	if err != nil {
 		s.ContainerSnapshotDelete(snapshotContainer)
@@ -1715,7 +1716,7 @@ func zfsSnapshotDeleteInternal(projectName, poolName string, ctName string, onDi
 	return nil
 }
 
-func (s *storageZfs) ContainerSnapshotDelete(snapshotContainer Instance) error {
+func (s *storageZfs) ContainerSnapshotDelete(snapshotContainer instance.Instance) error {
 	logger.Debugf("Deleting ZFS storage volume for snapshot \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
 
 	poolName := s.getOnDiskPoolName()
@@ -1729,7 +1730,7 @@ func (s *storageZfs) ContainerSnapshotDelete(snapshotContainer Instance) error {
 	return nil
 }
 
-func (s *storageZfs) ContainerSnapshotRename(snapshotContainer Instance, newName string) error {
+func (s *storageZfs) ContainerSnapshotRename(snapshotContainer instance.Instance, newName string) error {
 	logger.Debugf("Renaming ZFS storage volume for snapshot \"%s\" from %s to %s", s.volume.Name, s.volume.Name, newName)
 
 	oldName := snapshotContainer.Name()
@@ -1794,7 +1795,7 @@ func (s *storageZfs) ContainerSnapshotRename(snapshotContainer Instance, newName
 	return nil
 }
 
-func (s *storageZfs) ContainerSnapshotStart(container Instance) (bool, error) {
+func (s *storageZfs) ContainerSnapshotStart(container instance.Instance) (bool, error) {
 	logger.Debugf("Initializing ZFS storage volume for snapshot \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
 
 	cName, sName, _ := shared.ContainerGetParentAndSnapshotName(container.Name())
@@ -1818,7 +1819,7 @@ func (s *storageZfs) ContainerSnapshotStart(container Instance) (bool, error) {
 	return true, nil
 }
 
-func (s *storageZfs) ContainerSnapshotStop(container Instance) (bool, error) {
+func (s *storageZfs) ContainerSnapshotStop(container instance.Instance) (bool, error) {
 	logger.Debugf("Stopping ZFS storage volume for snapshot \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
 
 	cName, sName, _ := shared.ContainerGetParentAndSnapshotName(container.Name())
@@ -1833,12 +1834,12 @@ func (s *storageZfs) ContainerSnapshotStop(container Instance) (bool, error) {
 	return true, nil
 }
 
-func (s *storageZfs) ContainerSnapshotCreateEmpty(snapshotContainer Instance) error {
+func (s *storageZfs) ContainerSnapshotCreateEmpty(snapshotContainer instance.Instance) error {
 	/* don't touch the fs yet, as migration will do that for us */
 	return nil
 }
 
-func (s *storageZfs) doContainerOnlyBackup(tmpPath string, backup backup, source Instance) error {
+func (s *storageZfs) doContainerOnlyBackup(tmpPath string, backup instance.Backup, source instance.Instance) error {
 	sourceIsSnapshot := source.IsSnapshot()
 	poolName := s.getOnDiskPoolName()
 
@@ -1886,7 +1887,7 @@ func (s *storageZfs) doContainerOnlyBackup(tmpPath string, backup backup, source
 	return nil
 }
 
-func (s *storageZfs) doSnapshotBackup(tmpPath string, backup backup, source Instance, parentSnapshot string) error {
+func (s *storageZfs) doSnapshotBackup(tmpPath string, backup instance.Backup, source instance.Instance, parentSnapshot string) error {
 	sourceName := source.Name()
 	snapshotsPath := fmt.Sprintf("%s/snapshots", tmpPath)
 
@@ -1918,14 +1919,14 @@ func (s *storageZfs) doSnapshotBackup(tmpPath string, backup backup, source Inst
 	return zfsSendCmd.Run()
 }
 
-func (s *storageZfs) doContainerBackupCreateOptimized(tmpPath string, backup backup, source Instance) error {
+func (s *storageZfs) doContainerBackupCreateOptimized(tmpPath string, backup instance.Backup, source instance.Instance) error {
 	// Handle snapshots
 	snapshots, err := source.Snapshots()
 	if err != nil {
 		return err
 	}
 
-	if backup.instanceOnly || len(snapshots) == 0 {
+	if backup.InstanceOnly || len(snapshots) == 0 {
 		err = s.doContainerOnlyBackup(tmpPath, backup, source)
 	} else {
 		prev := ""
@@ -1935,7 +1936,7 @@ func (s *storageZfs) doContainerBackupCreateOptimized(tmpPath string, backup bac
 				prev = snapshots[i-1].Name()
 			}
 
-			sourceSnapshot, err := instanceLoadByProjectAndName(s.s, source.Project(), snap.Name())
+			sourceSnapshot, err := instance.InstanceLoadByProjectAndName(s.s, source.Project(), snap.Name())
 			if err != nil {
 				return err
 			}
@@ -1987,7 +1988,7 @@ func (s *storageZfs) doContainerBackupCreateOptimized(tmpPath string, backup bac
 	return nil
 }
 
-func (s *storageZfs) doContainerBackupCreateVanilla(tmpPath string, backup backup, source Instance) error {
+func (s *storageZfs) doContainerBackupCreateVanilla(tmpPath string, backup instance.Backup, source instance.Instance) error {
 	// Prepare for rsync
 	rsync := func(oldPath string, newPath string, bwlimit string) error {
 		output, err := rsyncLocalCopy(oldPath, newPath, bwlimit, true)
@@ -1999,10 +2000,10 @@ func (s *storageZfs) doContainerBackupCreateVanilla(tmpPath string, backup backu
 	}
 
 	bwlimit := s.pool.Config["rsync.bwlimit"]
-	projectName := backup.instance.Project()
+	projectName := backup.Instance.Project()
 
 	// Handle snapshots
-	if !backup.instanceOnly {
+	if !backup.InstanceOnly {
 		snapshotsPath := fmt.Sprintf("%s/snapshots", tmpPath)
 
 		// Retrieve the snapshots
@@ -2090,7 +2091,7 @@ func (s *storageZfs) doContainerBackupCreateVanilla(tmpPath string, backup backu
 	return nil
 }
 
-func (s *storageZfs) ContainerBackupCreate(backup backup, source Instance) error {
+func (s *storageZfs) ContainerBackupCreate(backup instance.Backup, source instance.Instance) error {
 	// Start storage
 	ourStart, err := source.StorageStart()
 	if err != nil {
@@ -2108,7 +2109,7 @@ func (s *storageZfs) ContainerBackupCreate(backup backup, source Instance) error
 	defer os.RemoveAll(tmpPath)
 
 	// Generate the actual backup
-	if backup.optimizedStorage {
+	if backup.OptimizedStorage {
 		err = s.doContainerBackupCreateOptimized(tmpPath, backup, source)
 		if err != nil {
 			return errors.Wrap(err, "Optimized backup")
@@ -2129,7 +2130,7 @@ func (s *storageZfs) ContainerBackupCreate(backup backup, source Instance) error
 	return nil
 }
 
-func (s *storageZfs) doContainerBackupLoadOptimized(info backupInfo, data io.ReadSeeker, tarArgs []string) error {
+func (s *storageZfs) doContainerBackupLoadOptimized(info instance.BackupInfo, data io.ReadSeeker, tarArgs []string) error {
 	containerName, _, _ := shared.ContainerGetParentAndSnapshotName(info.Name)
 	containerMntPoint := driver.GetContainerMountPoint(info.Project, s.pool.Name, containerName)
 	err := driver.CreateContainerMountpoint(containerMntPoint, driver.ContainerPath(info.Name, false), info.Privileged)
@@ -2239,7 +2240,7 @@ func (s *storageZfs) doContainerBackupLoadOptimized(info backupInfo, data io.Rea
 	return nil
 }
 
-func (s *storageZfs) doContainerBackupLoadVanilla(info backupInfo, data io.ReadSeeker, tarArgs []string) error {
+func (s *storageZfs) doContainerBackupLoadVanilla(info instance.BackupInfo, data io.ReadSeeker, tarArgs []string) error {
 	// create the main container
 	err := s.doContainerCreate(info.Project, info.Name, info.Privileged)
 	if err != nil {
@@ -2301,7 +2302,7 @@ func (s *storageZfs) doContainerBackupLoadVanilla(info backupInfo, data io.ReadS
 	return nil
 }
 
-func (s *storageZfs) ContainerBackupLoad(info backupInfo, data io.ReadSeeker, tarArgs []string) error {
+func (s *storageZfs) ContainerBackupLoad(info instance.BackupInfo, data io.ReadSeeker, tarArgs []string) error {
 	logger.Debugf("Loading ZFS storage volume for backup \"%s\" on storage pool \"%s\"", info.Name, s.pool.Name)
 
 	if info.HasBinaryFormat {
@@ -2413,7 +2414,7 @@ func (s *storageZfs) ImageCreate(fingerprint string, tracker *ioprogress.Progres
 	}
 
 	// Unpack the image into the temporary mountpoint.
-	err = unpackImage(imagePath, tmpImageDir, storageTypeZfs, s.s.OS.RunningInUserNS, nil)
+	err = unpackImage(imagePath, tmpImageDir, instance.StorageTypeZfs, s.s.OS.RunningInUserNS, nil)
 	if err != nil {
 		return err
 	}
@@ -2514,7 +2515,7 @@ func (s *storageZfs) PreservesInodes() bool {
 	return true
 }
 
-func (s *storageZfs) MigrationSource(args MigrationSourceArgs) (MigrationStorageSourceDriver, error) {
+func (s *storageZfs) MigrationSource(args instance.MigrationSourceArgs) (instance.MigrationStorageSourceDriver, error) {
 	/* If the container is a snapshot, let's just send that; we don't need
 	* to send anything else, because that's all the user asked for.
 	 */
@@ -2524,7 +2525,7 @@ func (s *storageZfs) MigrationSource(args MigrationSourceArgs) (MigrationStorage
 
 	driver := zfsMigrationSourceDriver{
 		instance:         args.Instance,
-		snapshots:        []Instance{},
+		snapshots:        []instance.Instance{},
 		zfsSnapshotNames: []string{},
 		zfs:              s,
 		zfsFeatures:      args.ZfsFeatures,
@@ -2554,7 +2555,7 @@ func (s *storageZfs) MigrationSource(args MigrationSourceArgs) (MigrationStorage
 		}
 
 		lxdName := fmt.Sprintf("%s%s%s", args.Instance.Name(), shared.SnapshotDelimiter, snap[len("snapshot-"):])
-		snapshot, err := instanceLoadByProjectAndName(s.s, args.Instance.Project(), lxdName)
+		snapshot, err := instance.InstanceLoadByProjectAndName(s.s, args.Instance.Project(), lxdName)
 		if err != nil {
 			return nil, err
 		}
@@ -2566,7 +2567,7 @@ func (s *storageZfs) MigrationSource(args MigrationSourceArgs) (MigrationStorage
 	return &driver, nil
 }
 
-func (s *storageZfs) MigrationSink(conn *websocket.Conn, op *operation, args MigrationSinkArgs) error {
+func (s *storageZfs) MigrationSink(conn *websocket.Conn, op *operation.Operation, args instance.MigrationSinkArgs) error {
 	poolName := s.getOnDiskPoolName()
 	zfsName := fmt.Sprintf("containers/%s", project.Prefix(args.Instance.Project(), args.Instance.Name()))
 	zfsRecv := func(zfsName string, writeWrapper func(io.WriteCloser) io.WriteCloser) error {
@@ -3200,11 +3201,11 @@ func (s *storageZfs) StoragePoolVolumeCopy(source *api.StorageVolumeSource) erro
 	return nil
 }
 
-func (s *storageZfs) StorageMigrationSource(args MigrationSourceArgs) (MigrationStorageSourceDriver, error) {
+func (s *storageZfs) StorageMigrationSource(args instance.MigrationSourceArgs) (instance.MigrationStorageSourceDriver, error) {
 	return rsyncStorageMigrationSource(args)
 }
 
-func (s *storageZfs) StorageMigrationSink(conn *websocket.Conn, op *operation, args MigrationSinkArgs) error {
+func (s *storageZfs) StorageMigrationSink(conn *websocket.Conn, op *operation.Operation, args instance.MigrationSinkArgs) error {
 	return rsyncStorageMigrationSink(conn, op, args)
 }
 

From e481940ef7fc686db57d2e799d11de8b77d3f90f Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Wed, 25 Sep 2019 11:13:28 +0100
Subject: [PATCH 20/72] lxd/storage/volumes/utils: Updates used of moved types
 in instance package

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/storage_volumes_utils.go | 11 ++++++-----
 1 file changed, 6 insertions(+), 5 deletions(-)

diff --git a/lxd/storage_volumes_utils.go b/lxd/storage_volumes_utils.go
index b25a1288d9..faf7fc9708 100644
--- a/lxd/storage_volumes_utils.go
+++ b/lxd/storage_volumes_utils.go
@@ -6,6 +6,7 @@ import (
 	"strings"
 
 	"github.com/lxc/lxd/lxd/db"
+	"github.com/lxc/lxd/lxd/instance"
 	"github.com/lxc/lxd/lxd/state"
 	"github.com/lxc/lxd/shared"
 	"github.com/lxc/lxd/shared/api"
@@ -265,7 +266,7 @@ func storagePoolVolumeUpdateUsers(d *Daemon, oldPoolName string,
 
 	s := d.State()
 	// update all instances
-	insts, err := instanceLoadAll(s)
+	insts, err := instance.InstanceLoadAll(s)
 	if err != nil {
 		return err
 	}
@@ -397,7 +398,7 @@ func storagePoolVolumeUpdateUsers(d *Daemon, oldPoolName string,
 func storagePoolVolumeUsedByRunningContainersWithProfilesGet(s *state.State,
 	poolName string, volumeName string, volumeTypeName string,
 	runningOnly bool) ([]string, error) {
-	insts, err := instanceLoadAll(s)
+	insts, err := instance.InstanceLoadAll(s)
 	if err != nil {
 		return []string{}, err
 	}
@@ -569,7 +570,7 @@ func storagePoolVolumeDBCreate(s *state.State, poolName string, volumeName, volu
 	return nil
 }
 
-func storagePoolVolumeDBCreateInternal(state *state.State, poolName string, vol *api.StorageVolumesPost) (storage, error) {
+func storagePoolVolumeDBCreateInternal(state *state.State, poolName string, vol *api.StorageVolumesPost) (instance.Storage, error) {
 	volumeName := vol.Name
 	volumeDescription := vol.Description
 	volumeTypeName := vol.Type
@@ -665,7 +666,7 @@ func storagePoolVolumeCreateInternal(state *state.State, poolName string, vol *a
 	return nil
 }
 
-func storagePoolVolumeSnapshotCopyInternal(state *state.State, poolName string, vol *api.StorageVolumesPost, snapshotName string) (storage, error) {
+func storagePoolVolumeSnapshotCopyInternal(state *state.State, poolName string, vol *api.StorageVolumesPost, snapshotName string) (instance.Storage, error) {
 	volumeType, err := storagePoolVolumeTypeNameToType(vol.Type)
 	if err != nil {
 		return nil, err
@@ -700,7 +701,7 @@ func storagePoolVolumeSnapshotCopyInternal(state *state.State, poolName string,
 	return storagePoolVolumeSnapshotDBCreateInternal(state, dbArgs)
 }
 
-func storagePoolVolumeSnapshotDBCreateInternal(state *state.State, dbArgs *db.StorageVolumeArgs) (storage, error) {
+func storagePoolVolumeSnapshotDBCreateInternal(state *state.State, dbArgs *db.StorageVolumeArgs) (instance.Storage, error) {
 	// Create database entry for new storage volume.
 	err := storagePoolVolumeDBCreate(state, dbArgs.PoolName, dbArgs.Name, dbArgs.Description, dbArgs.TypeName, true, dbArgs.Config)
 	if err != nil {

From d20f70ebff959363917c858398749960eb09312f Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Wed, 25 Sep 2019 11:14:03 +0100
Subject: [PATCH 21/72] lxd/storage/volumes/snapshot: Updates used of moved
 types in new packages

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/storage_volumes_snapshot.go | 32 +++++++++++++++++---------------
 1 file changed, 17 insertions(+), 15 deletions(-)

diff --git a/lxd/storage_volumes_snapshot.go b/lxd/storage_volumes_snapshot.go
index 7a5ff87ee7..f8ca08599a 100644
--- a/lxd/storage_volumes_snapshot.go
+++ b/lxd/storage_volumes_snapshot.go
@@ -8,7 +8,9 @@ import (
 
 	"github.com/gorilla/mux"
 
+	"github.com/lxc/lxd/lxd/daemon"
 	"github.com/lxc/lxd/lxd/db"
+	"github.com/lxc/lxd/lxd/operation"
 	driver "github.com/lxc/lxd/lxd/storage"
 	"github.com/lxc/lxd/lxd/util"
 	"github.com/lxc/lxd/shared"
@@ -32,7 +34,7 @@ var storagePoolVolumeSnapshotTypeCmd = APIEndpoint{
 	Put:    APIEndpointAction{Handler: storagePoolVolumeSnapshotTypePut},
 }
 
-func storagePoolVolumeSnapshotsTypePost(d *Daemon, r *http.Request) Response {
+func storagePoolVolumeSnapshotsTypePost(d *Daemon, r *http.Request) daemon.Response {
 	// Get the name of the pool.
 	poolName := mux.Vars(r)["pool"]
 
@@ -127,7 +129,7 @@ func storagePoolVolumeSnapshotsTypePost(d *Daemon, r *http.Request) Response {
 	volWritable := storage.GetStoragePoolVolumeWritable()
 	fullSnapName := fmt.Sprintf("%s%s%s", volumeName, shared.SnapshotDelimiter, req.Name)
 	req.Name = fullSnapName
-	snapshot := func(op *operation) error {
+	snapshot := func(op *operation.Operation) error {
 		dbArgs := &db.StorageVolumeArgs{
 			Name:        fullSnapName,
 			PoolName:    poolName,
@@ -152,7 +154,7 @@ func storagePoolVolumeSnapshotsTypePost(d *Daemon, r *http.Request) Response {
 	resources := map[string][]string{}
 	resources["storage_volumes"] = []string{volumeName}
 
-	op, err := operationCreate(d.cluster, "", operationClassTask, db.OperationVolumeSnapshotCreate, resources, nil, snapshot, nil, nil)
+	op, err := operation.OperationCreate(d.cluster, "", operation.OperationClassTask, db.OperationVolumeSnapshotCreate, resources, nil, snapshot, nil, nil)
 	if err != nil {
 		return InternalError(err)
 	}
@@ -160,7 +162,7 @@ func storagePoolVolumeSnapshotsTypePost(d *Daemon, r *http.Request) Response {
 	return OperationResponse(op)
 }
 
-func storagePoolVolumeSnapshotsTypeGet(d *Daemon, r *http.Request) Response {
+func storagePoolVolumeSnapshotsTypeGet(d *Daemon, r *http.Request) daemon.Response {
 	// Get the name of the pool the storage volume is supposed to be
 	// attached to.
 	poolName := mux.Vars(r)["pool"]
@@ -235,7 +237,7 @@ func storagePoolVolumeSnapshotsTypeGet(d *Daemon, r *http.Request) Response {
 	return SyncResponse(true, resultMap)
 }
 
-func storagePoolVolumeSnapshotTypePost(d *Daemon, r *http.Request) Response {
+func storagePoolVolumeSnapshotTypePost(d *Daemon, r *http.Request) daemon.Response {
 	// Get the name of the storage pool the volume is supposed to be
 	// attached to.
 	poolName := mux.Vars(r)["pool"]
@@ -298,7 +300,7 @@ func storagePoolVolumeSnapshotTypePost(d *Daemon, r *http.Request) Response {
 		return NotFound(err)
 	}
 
-	snapshotRename := func(op *operation) error {
+	snapshotRename := func(op *operation.Operation) error {
 		err = s.StoragePoolVolumeSnapshotRename(req.Name)
 		if err != nil {
 			return err
@@ -310,7 +312,7 @@ func storagePoolVolumeSnapshotTypePost(d *Daemon, r *http.Request) Response {
 	resources := map[string][]string{}
 	resources["storage_volume_snapshots"] = []string{volumeName}
 
-	op, err := operationCreate(d.cluster, "", operationClassTask, db.OperationVolumeSnapshotDelete, resources, nil, snapshotRename, nil, nil)
+	op, err := operation.OperationCreate(d.cluster, "", operation.OperationClassTask, db.OperationVolumeSnapshotDelete, resources, nil, snapshotRename, nil, nil)
 	if err != nil {
 		return InternalError(err)
 	}
@@ -318,7 +320,7 @@ func storagePoolVolumeSnapshotTypePost(d *Daemon, r *http.Request) Response {
 	return OperationResponse(op)
 }
 
-func storagePoolVolumeSnapshotTypeGet(d *Daemon, r *http.Request) Response {
+func storagePoolVolumeSnapshotTypeGet(d *Daemon, r *http.Request) daemon.Response {
 	// Get the name of the storage pool the volume is supposed to be
 	// attached to.
 	poolName := mux.Vars(r)["pool"]
@@ -374,7 +376,7 @@ func storagePoolVolumeSnapshotTypeGet(d *Daemon, r *http.Request) Response {
 	return SyncResponseETag(true, &snapshot, etag)
 }
 
-func storagePoolVolumeSnapshotTypePut(d *Daemon, r *http.Request) Response {
+func storagePoolVolumeSnapshotTypePut(d *Daemon, r *http.Request) daemon.Response {
 	// Get the name of the storage pool the volume is supposed to be
 	// attached to.
 	poolName := mux.Vars(r)["pool"]
@@ -433,9 +435,9 @@ func storagePoolVolumeSnapshotTypePut(d *Daemon, r *http.Request) Response {
 		return BadRequest(err)
 	}
 
-	var do func(*operation) error
+	var do func(*operation.Operation) error
 	var opDescription db.OperationType
-	do = func(op *operation) error {
+	do = func(op *operation.Operation) error {
 		err = storagePoolVolumeSnapshotUpdate(d.State(), poolName, volume.Name, volumeType, req.Description)
 		if err != nil {
 			return err
@@ -448,7 +450,7 @@ func storagePoolVolumeSnapshotTypePut(d *Daemon, r *http.Request) Response {
 	resources := map[string][]string{}
 	resources["storage_volume_snapshots"] = []string{volumeName}
 
-	op, err := operationCreate(d.cluster, "", operationClassTask, opDescription, resources, nil, do, nil, nil)
+	op, err := operation.OperationCreate(d.cluster, "", operation.OperationClassTask, opDescription, resources, nil, do, nil, nil)
 	if err != nil {
 		return InternalError(err)
 	}
@@ -456,7 +458,7 @@ func storagePoolVolumeSnapshotTypePut(d *Daemon, r *http.Request) Response {
 	return OperationResponse(op)
 }
 
-func storagePoolVolumeSnapshotTypeDelete(d *Daemon, r *http.Request) Response {
+func storagePoolVolumeSnapshotTypeDelete(d *Daemon, r *http.Request) daemon.Response {
 	// Get the name of the storage pool the volume is supposed to be
 	// attached to.
 	poolName := mux.Vars(r)["pool"]
@@ -502,7 +504,7 @@ func storagePoolVolumeSnapshotTypeDelete(d *Daemon, r *http.Request) Response {
 		return NotFound(err)
 	}
 
-	snapshotDelete := func(op *operation) error {
+	snapshotDelete := func(op *operation.Operation) error {
 		err = s.StoragePoolVolumeSnapshotDelete()
 		if err != nil {
 			return err
@@ -514,7 +516,7 @@ func storagePoolVolumeSnapshotTypeDelete(d *Daemon, r *http.Request) Response {
 	resources := map[string][]string{}
 	resources["storage_volume_snapshots"] = []string{volumeName}
 
-	op, err := operationCreate(d.cluster, "", operationClassTask, db.OperationVolumeSnapshotDelete, resources, nil, snapshotDelete, nil, nil)
+	op, err := operation.OperationCreate(d.cluster, "", operation.OperationClassTask, db.OperationVolumeSnapshotDelete, resources, nil, snapshotDelete, nil, nil)
 	if err != nil {
 		return InternalError(err)
 	}

From a2200a47a76425a9937519e61c1e71ae51232624 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Wed, 25 Sep 2019 11:14:35 +0100
Subject: [PATCH 22/72] lxd/storage/volumes/config: Links
 storageVolumeFillDefault to instance package

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/storage_volumes_config.go | 5 +++++
 1 file changed, 5 insertions(+)

diff --git a/lxd/storage_volumes_config.go b/lxd/storage_volumes_config.go
index 76dca2b558..ea21399f9c 100644
--- a/lxd/storage_volumes_config.go
+++ b/lxd/storage_volumes_config.go
@@ -4,11 +4,16 @@ import (
 	"fmt"
 	"strings"
 
+	"github.com/lxc/lxd/lxd/instance"
 	"github.com/lxc/lxd/shared"
 	"github.com/lxc/lxd/shared/api"
 	"github.com/lxc/lxd/shared/units"
 )
 
+func init() {
+	instance.StorageVolumeFillDefault = storageVolumeFillDefault
+}
+
 func storageVolumePropertiesTranslate(targetConfig map[string]string, targetParentPoolDriver string) (map[string]string, error) {
 	newConfig := make(map[string]string, len(targetConfig))
 	for key, val := range targetConfig {

From b3ce65cf04c2d5635b04ae2b2bbcd0e9d19fdd43 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Wed, 25 Sep 2019 11:15:03 +0100
Subject: [PATCH 23/72] lxd/storage/volumes: Updates used of moved types in new
 packages

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/storage_volumes.go | 80 ++++++++++++++++++++++--------------------
 1 file changed, 41 insertions(+), 39 deletions(-)

diff --git a/lxd/storage_volumes.go b/lxd/storage_volumes.go
index e9baf04748..1c3e0d565c 100644
--- a/lxd/storage_volumes.go
+++ b/lxd/storage_volumes.go
@@ -11,14 +11,16 @@ import (
 
 	"github.com/gorilla/mux"
 	"github.com/gorilla/websocket"
+	"github.com/lxc/lxd/lxd/daemon"
 	"github.com/lxc/lxd/lxd/db"
+	"github.com/lxc/lxd/lxd/instance"
+	"github.com/lxc/lxd/lxd/operation"
 	"github.com/lxc/lxd/lxd/util"
 	"github.com/lxc/lxd/shared"
 	"github.com/lxc/lxd/shared/api"
+	log "github.com/lxc/lxd/shared/log15"
 	"github.com/lxc/lxd/shared/logger"
 	"github.com/lxc/lxd/shared/version"
-
-	log "github.com/lxc/lxd/shared/log15"
 )
 
 var storagePoolVolumesCmd = APIEndpoint{
@@ -67,7 +69,7 @@ var storagePoolVolumeTypeImageCmd = APIEndpoint{
 
 // /1.0/storage-pools/{name}/volumes
 // List all storage volumes attached to a given storage pool.
-func storagePoolVolumesGet(d *Daemon, r *http.Request) Response {
+func storagePoolVolumesGet(d *Daemon, r *http.Request) daemon.Response {
 	project := projectParam(r)
 	poolName := mux.Vars(r)["name"]
 
@@ -151,7 +153,7 @@ func storagePoolVolumesGet(d *Daemon, r *http.Request) Response {
 
 // /1.0/storage-pools/{name}/volumes/{type}
 // List all storage volumes of a given volume type for a given storage pool.
-func storagePoolVolumesTypeGet(d *Daemon, r *http.Request) Response {
+func storagePoolVolumesTypeGet(d *Daemon, r *http.Request) daemon.Response {
 	project := projectParam(r)
 
 	// Get the name of the pool the storage volume is supposed to be
@@ -228,7 +230,7 @@ func storagePoolVolumesTypeGet(d *Daemon, r *http.Request) Response {
 
 // /1.0/storage-pools/{name}/volumes/{type}
 // Create a storage volume in a given storage pool.
-func storagePoolVolumesTypePost(d *Daemon, r *http.Request) Response {
+func storagePoolVolumesTypePost(d *Daemon, r *http.Request) daemon.Response {
 	response := ForwardedResponseIfTargetIsRemote(d, r)
 	if response != nil {
 		return response
@@ -275,7 +277,7 @@ func storagePoolVolumesTypePost(d *Daemon, r *http.Request) Response {
 	}
 }
 
-func doVolumeCreateOrCopy(d *Daemon, poolName string, req *api.StorageVolumesPost) Response {
+func doVolumeCreateOrCopy(d *Daemon, poolName string, req *api.StorageVolumesPost) daemon.Response {
 	doWork := func() error {
 		return storagePoolVolumeCreateInternal(d.State(), poolName, req)
 	}
@@ -289,11 +291,11 @@ func doVolumeCreateOrCopy(d *Daemon, poolName string, req *api.StorageVolumesPos
 		return EmptySyncResponse
 	}
 
-	run := func(op *operation) error {
+	run := func(op *operation.Operation) error {
 		return doWork()
 	}
 
-	op, err := operationCreate(d.cluster, "", operationClassTask, db.OperationVolumeCopy, nil, nil, run, nil, nil)
+	op, err := operation.OperationCreate(d.cluster, "", operation.OperationClassTask, db.OperationVolumeCopy, nil, nil, run, nil, nil)
 	if err != nil {
 		return InternalError(err)
 	}
@@ -304,7 +306,7 @@ func doVolumeCreateOrCopy(d *Daemon, poolName string, req *api.StorageVolumesPos
 
 // /1.0/storage-pools/{name}/volumes/{type}
 // Create a storage volume of a given volume type in a given storage pool.
-func storagePoolVolumesPost(d *Daemon, r *http.Request) Response {
+func storagePoolVolumesPost(d *Daemon, r *http.Request) daemon.Response {
 	response := ForwardedResponseIfTargetIsRemote(d, r)
 	if response != nil {
 		return response
@@ -355,7 +357,7 @@ func storagePoolVolumesPost(d *Daemon, r *http.Request) Response {
 	}
 }
 
-func doVolumeMigration(d *Daemon, poolName string, req *api.StorageVolumesPost) Response {
+func doVolumeMigration(d *Daemon, poolName string, req *api.StorageVolumesPost) daemon.Response {
 	// Validate migration mode
 	if req.Source.Mode != "pull" && req.Source.Mode != "push" {
 		return NotImplemented(fmt.Errorf("Mode '%s' not implemented", req.Source.Mode))
@@ -390,7 +392,7 @@ func doVolumeMigration(d *Daemon, poolName string, req *api.StorageVolumesPost)
 		push = true
 	}
 
-	migrationArgs := MigrationSinkArgs{
+	migrationArgs := instance.MigrationSinkArgs{
 		Url: req.Source.Operation,
 		Dialer: websocket.Dialer{
 			TLSClientConfig: config,
@@ -409,7 +411,7 @@ func doVolumeMigration(d *Daemon, poolName string, req *api.StorageVolumesPost)
 	resources := map[string][]string{}
 	resources["storage_volumes"] = []string{fmt.Sprintf("%s/volumes/custom/%s", poolName, req.Name)}
 
-	run := func(op *operation) error {
+	run := func(op *operation.Operation) error {
 		// And finally run the migration.
 		err = sink.DoStorage(op)
 		if err != nil {
@@ -420,14 +422,14 @@ func doVolumeMigration(d *Daemon, poolName string, req *api.StorageVolumesPost)
 		return nil
 	}
 
-	var op *operation
+	var op *operation.Operation
 	if push {
-		op, err = operationCreate(d.cluster, "", operationClassWebsocket, db.OperationVolumeCreate, resources, sink.Metadata(), run, nil, sink.Connect)
+		op, err = operation.OperationCreate(d.cluster, "", operation.OperationClassWebsocket, db.OperationVolumeCreate, resources, sink.Metadata(), run, nil, sink.Connect)
 		if err != nil {
 			return InternalError(err)
 		}
 	} else {
-		op, err = operationCreate(d.cluster, "", operationClassTask, db.OperationVolumeCopy, resources, nil, run, nil, nil)
+		op, err = operation.OperationCreate(d.cluster, "", operation.OperationClassTask, db.OperationVolumeCopy, resources, nil, run, nil, nil)
 		if err != nil {
 			return InternalError(err)
 		}
@@ -438,7 +440,7 @@ func doVolumeMigration(d *Daemon, poolName string, req *api.StorageVolumesPost)
 
 // /1.0/storage-pools/{name}/volumes/{type}/{name}
 // Rename a storage volume of a given volume type in a given storage pool.
-func storagePoolVolumeTypePost(d *Daemon, r *http.Request, volumeTypeName string) Response {
+func storagePoolVolumeTypePost(d *Daemon, r *http.Request, volumeTypeName string) daemon.Response {
 	// Get the name of the storage volume.
 	var volumeName string
 	fields := strings.Split(mux.Vars(r)["name"], "/")
@@ -542,7 +544,7 @@ func storagePoolVolumeTypePost(d *Daemon, r *http.Request, volumeTypeName string
 				return InternalError(err)
 			}
 
-			op, err := operationCreate(d.cluster, "", operationClassTask, db.OperationVolumeMigrate, resources, nil, ws.DoStorage, nil, nil)
+			op, err := operation.OperationCreate(d.cluster, "", operation.OperationClassTask, db.OperationVolumeMigrate, resources, nil, ws.DoStorage, nil, nil)
 			if err != nil {
 				return InternalError(err)
 			}
@@ -551,7 +553,7 @@ func storagePoolVolumeTypePost(d *Daemon, r *http.Request, volumeTypeName string
 		}
 
 		// Pull mode
-		op, err := operationCreate(d.cluster, "", operationClassWebsocket, db.OperationVolumeMigrate, resources, ws.Metadata(), ws.DoStorage, nil, ws.Connect)
+		op, err := operation.OperationCreate(d.cluster, "", operation.OperationClassWebsocket, db.OperationVolumeMigrate, resources, ws.Metadata(), ws.DoStorage, nil, ws.Connect)
 		if err != nil {
 			return InternalError(err)
 		}
@@ -631,11 +633,11 @@ func storagePoolVolumeTypePost(d *Daemon, r *http.Request, volumeTypeName string
 		return SyncResponseLocation(true, nil, fmt.Sprintf("/%s/storage-pools/%s/volumes/%s", version.APIVersion, poolName, storagePoolVolumeAPIEndpointCustom))
 	}
 
-	run := func(op *operation) error {
+	run := func(op *operation.Operation) error {
 		return doWork()
 	}
 
-	op, err := operationCreate(d.cluster, "", operationClassTask, db.OperationVolumeMove, nil, nil, run, nil, nil)
+	op, err := operation.OperationCreate(d.cluster, "", operation.OperationClassTask, db.OperationVolumeMove, nil, nil, run, nil, nil)
 	if err != nil {
 		return InternalError(err)
 	}
@@ -643,21 +645,21 @@ func storagePoolVolumeTypePost(d *Daemon, r *http.Request, volumeTypeName string
 	return OperationResponse(op)
 }
 
-func storagePoolVolumeTypeContainerPost(d *Daemon, r *http.Request) Response {
+func storagePoolVolumeTypeContainerPost(d *Daemon, r *http.Request) daemon.Response {
 	return storagePoolVolumeTypePost(d, r, "container")
 }
 
-func storagePoolVolumeTypeCustomPost(d *Daemon, r *http.Request) Response {
+func storagePoolVolumeTypeCustomPost(d *Daemon, r *http.Request) daemon.Response {
 	return storagePoolVolumeTypePost(d, r, "custom")
 }
 
-func storagePoolVolumeTypeImagePost(d *Daemon, r *http.Request) Response {
+func storagePoolVolumeTypeImagePost(d *Daemon, r *http.Request) daemon.Response {
 	return storagePoolVolumeTypePost(d, r, "image")
 }
 
 // /1.0/storage-pools/{pool}/volumes/{type}/{name}
 // Get storage volume of a given volume type on a given storage pool.
-func storagePoolVolumeTypeGet(d *Daemon, r *http.Request, volumeTypeName string) Response {
+func storagePoolVolumeTypeGet(d *Daemon, r *http.Request, volumeTypeName string) daemon.Response {
 	project := projectParam(r)
 
 	// Get the name of the storage volume.
@@ -724,20 +726,20 @@ func storagePoolVolumeTypeGet(d *Daemon, r *http.Request, volumeTypeName string)
 	return SyncResponseETag(true, volume, etag)
 }
 
-func storagePoolVolumeTypeContainerGet(d *Daemon, r *http.Request) Response {
+func storagePoolVolumeTypeContainerGet(d *Daemon, r *http.Request) daemon.Response {
 	return storagePoolVolumeTypeGet(d, r, "container")
 }
 
-func storagePoolVolumeTypeCustomGet(d *Daemon, r *http.Request) Response {
+func storagePoolVolumeTypeCustomGet(d *Daemon, r *http.Request) daemon.Response {
 	return storagePoolVolumeTypeGet(d, r, "custom")
 }
 
-func storagePoolVolumeTypeImageGet(d *Daemon, r *http.Request) Response {
+func storagePoolVolumeTypeImageGet(d *Daemon, r *http.Request) daemon.Response {
 	return storagePoolVolumeTypeGet(d, r, "image")
 }
 
 // /1.0/storage-pools/{pool}/volumes/{type}/{name}
-func storagePoolVolumeTypePut(d *Daemon, r *http.Request, volumeTypeName string) Response {
+func storagePoolVolumeTypePut(d *Daemon, r *http.Request, volumeTypeName string) daemon.Response {
 	// Get the name of the storage volume.
 	var volumeName string
 	fields := strings.Split(mux.Vars(r)["name"], "/")
@@ -832,20 +834,20 @@ func storagePoolVolumeTypePut(d *Daemon, r *http.Request, volumeTypeName string)
 	return EmptySyncResponse
 }
 
-func storagePoolVolumeTypeContainerPut(d *Daemon, r *http.Request) Response {
+func storagePoolVolumeTypeContainerPut(d *Daemon, r *http.Request) daemon.Response {
 	return storagePoolVolumeTypePut(d, r, "container")
 }
 
-func storagePoolVolumeTypeCustomPut(d *Daemon, r *http.Request) Response {
+func storagePoolVolumeTypeCustomPut(d *Daemon, r *http.Request) daemon.Response {
 	return storagePoolVolumeTypePut(d, r, "custom")
 }
 
-func storagePoolVolumeTypeImagePut(d *Daemon, r *http.Request) Response {
+func storagePoolVolumeTypeImagePut(d *Daemon, r *http.Request) daemon.Response {
 	return storagePoolVolumeTypePut(d, r, "image")
 }
 
 // /1.0/storage-pools/{pool}/volumes/{type}/{name}
-func storagePoolVolumeTypePatch(d *Daemon, r *http.Request, volumeTypeName string) Response {
+func storagePoolVolumeTypePatch(d *Daemon, r *http.Request, volumeTypeName string) daemon.Response {
 	// Get the name of the storage volume.
 	var volumeName string
 	fields := strings.Split(mux.Vars(r)["name"], "/")
@@ -937,20 +939,20 @@ func storagePoolVolumeTypePatch(d *Daemon, r *http.Request, volumeTypeName strin
 	return EmptySyncResponse
 }
 
-func storagePoolVolumeTypeContainerPatch(d *Daemon, r *http.Request) Response {
+func storagePoolVolumeTypeContainerPatch(d *Daemon, r *http.Request) daemon.Response {
 	return storagePoolVolumeTypePatch(d, r, "container")
 }
 
-func storagePoolVolumeTypeCustomPatch(d *Daemon, r *http.Request) Response {
+func storagePoolVolumeTypeCustomPatch(d *Daemon, r *http.Request) daemon.Response {
 	return storagePoolVolumeTypePatch(d, r, "custom")
 }
 
-func storagePoolVolumeTypeImagePatch(d *Daemon, r *http.Request) Response {
+func storagePoolVolumeTypeImagePatch(d *Daemon, r *http.Request) daemon.Response {
 	return storagePoolVolumeTypePatch(d, r, "image")
 }
 
 // /1.0/storage-pools/{pool}/volumes/{type}/{name}
-func storagePoolVolumeTypeDelete(d *Daemon, r *http.Request, volumeTypeName string) Response {
+func storagePoolVolumeTypeDelete(d *Daemon, r *http.Request, volumeTypeName string) daemon.Response {
 	project := projectParam(r)
 
 	// Get the name of the storage volume.
@@ -1063,14 +1065,14 @@ func storagePoolVolumeTypeDelete(d *Daemon, r *http.Request, volumeTypeName stri
 	return EmptySyncResponse
 }
 
-func storagePoolVolumeTypeContainerDelete(d *Daemon, r *http.Request) Response {
+func storagePoolVolumeTypeContainerDelete(d *Daemon, r *http.Request) daemon.Response {
 	return storagePoolVolumeTypeDelete(d, r, "container")
 }
 
-func storagePoolVolumeTypeCustomDelete(d *Daemon, r *http.Request) Response {
+func storagePoolVolumeTypeCustomDelete(d *Daemon, r *http.Request) daemon.Response {
 	return storagePoolVolumeTypeDelete(d, r, "custom")
 }
 
-func storagePoolVolumeTypeImageDelete(d *Daemon, r *http.Request) Response {
+func storagePoolVolumeTypeImageDelete(d *Daemon, r *http.Request) daemon.Response {
 	return storagePoolVolumeTypeDelete(d, r, "image")
 }

From 2b50c532ae79c08157eb5cf7d2914e0e169f7005 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Wed, 25 Sep 2019 11:33:12 +0100
Subject: [PATCH 24/72] lxd/storage: Removes some storage types and functions

These have moved to instance package so they can be used outside of main package

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/storage.go | 163 -------------------------------------------------
 1 file changed, 163 deletions(-)

diff --git a/lxd/storage.go b/lxd/storage.go
index b0191ee4aa..da8fe1ef87 100644
--- a/lxd/storage.go
+++ b/lxd/storage.go
@@ -91,169 +91,6 @@ func readStoragePoolDriversCache() map[string]string {
 	return drivers.(map[string]string)
 }
 
-// storageType defines the type of a storage
-type storageType int
-
-const (
-	storageTypeBtrfs storageType = iota
-	storageTypeCeph
-	storageTypeCephFs
-	storageTypeDir
-	storageTypeLvm
-	storageTypeMock
-	storageTypeZfs
-)
-
-var supportedStoragePoolDrivers = []string{"btrfs", "ceph", "cephfs", "dir", "lvm", "zfs"}
-
-func storageTypeToString(sType storageType) (string, error) {
-	switch sType {
-	case storageTypeBtrfs:
-		return "btrfs", nil
-	case storageTypeCeph:
-		return "ceph", nil
-	case storageTypeCephFs:
-		return "cephfs", nil
-	case storageTypeDir:
-		return "dir", nil
-	case storageTypeLvm:
-		return "lvm", nil
-	case storageTypeMock:
-		return "mock", nil
-	case storageTypeZfs:
-		return "zfs", nil
-	}
-
-	return "", fmt.Errorf("invalid storage type")
-}
-
-func storageStringToType(sName string) (storageType, error) {
-	switch sName {
-	case "btrfs":
-		return storageTypeBtrfs, nil
-	case "ceph":
-		return storageTypeCeph, nil
-	case "cephfs":
-		return storageTypeCephFs, nil
-	case "dir":
-		return storageTypeDir, nil
-	case "lvm":
-		return storageTypeLvm, nil
-	case "mock":
-		return storageTypeMock, nil
-	case "zfs":
-		return storageTypeZfs, nil
-	}
-
-	return -1, fmt.Errorf("invalid storage type name")
-}
-
-// The storage interface defines the functions needed to implement a storage
-// backend for a given storage driver.
-type storage interface {
-	// Functions dealing with basic driver properties only.
-	StorageCoreInit() error
-	GetStorageType() storageType
-	GetStorageTypeName() string
-	GetStorageTypeVersion() string
-	GetState() *state.State
-
-	// Functions dealing with storage pools.
-	StoragePoolInit() error
-	StoragePoolCheck() error
-	StoragePoolCreate() error
-	StoragePoolDelete() error
-	StoragePoolMount() (bool, error)
-	StoragePoolUmount() (bool, error)
-	StoragePoolResources() (*api.ResourcesStoragePool, error)
-	StoragePoolUpdate(writable *api.StoragePoolPut, changedConfig []string) error
-	GetStoragePoolWritable() api.StoragePoolPut
-	SetStoragePoolWritable(writable *api.StoragePoolPut)
-	GetStoragePool() *api.StoragePool
-
-	// Functions dealing with custom storage volumes.
-	StoragePoolVolumeCreate() error
-	StoragePoolVolumeDelete() error
-	StoragePoolVolumeMount() (bool, error)
-	StoragePoolVolumeUmount() (bool, error)
-	StoragePoolVolumeUpdate(writable *api.StorageVolumePut, changedConfig []string) error
-	StoragePoolVolumeRename(newName string) error
-	StoragePoolVolumeCopy(source *api.StorageVolumeSource) error
-	GetStoragePoolVolumeWritable() api.StorageVolumePut
-	SetStoragePoolVolumeWritable(writable *api.StorageVolumePut)
-	GetStoragePoolVolume() *api.StorageVolume
-
-	// Functions dealing with custom storage volume snapshots.
-	StoragePoolVolumeSnapshotCreate(target *api.StorageVolumeSnapshotsPost) error
-	StoragePoolVolumeSnapshotDelete() error
-	StoragePoolVolumeSnapshotRename(newName string) error
-
-	// Functions dealing with container storage volumes.
-	// ContainerCreate creates an empty container (no rootfs/metadata.yaml)
-	ContainerCreate(container Instance) error
-
-	// ContainerCreateFromImage creates a container from a image.
-	ContainerCreateFromImage(c Instance, fingerprint string, tracker *ioprogress.ProgressTracker) error
-	ContainerDelete(c Instance) error
-	ContainerCopy(target Instance, source Instance, containerOnly bool) error
-	ContainerRefresh(target Instance, source Instance, snapshots []Instance) error
-	ContainerMount(c Instance) (bool, error)
-	ContainerUmount(c Instance, path string) (bool, error)
-	ContainerRename(container Instance, newName string) error
-	ContainerRestore(container Instance, sourceContainer Instance) error
-	ContainerGetUsage(container Instance) (int64, error)
-	GetContainerPoolInfo() (int64, string, string)
-	ContainerStorageReady(container Instance) bool
-
-	ContainerSnapshotCreate(target Instance, source Instance) error
-	ContainerSnapshotDelete(c Instance) error
-	ContainerSnapshotRename(c Instance, newName string) error
-	ContainerSnapshotStart(c Instance) (bool, error)
-	ContainerSnapshotStop(c Instance) (bool, error)
-
-	ContainerBackupCreate(backup backup, sourceContainer Instance) error
-	ContainerBackupLoad(info backupInfo, data io.ReadSeeker, tarArgs []string) error
-
-	// For use in migrating snapshots.
-	ContainerSnapshotCreateEmpty(c Instance) error
-
-	// Functions dealing with image storage volumes.
-	ImageCreate(fingerprint string, tracker *ioprogress.ProgressTracker) error
-	ImageDelete(fingerprint string) error
-
-	// Storage type agnostic functions.
-	StorageEntitySetQuota(volumeType int, size int64, data interface{}) error
-
-	// Functions dealing with migration.
-	MigrationType() migration.MigrationFSType
-	// Does this storage backend preserve inodes when it is moved across LXD
-	// hosts?
-	PreservesInodes() bool
-
-	// Get the pieces required to migrate the source. This contains a list
-	// of the "object" (i.e. container or snapshot, depending on whether or
-	// not it is a snapshot name) to be migrated in order, and a channel
-	// for arguments of the specific migration command. We use a channel
-	// here so we don't have to invoke `zfs send` or `rsync` or whatever
-	// and keep its stdin/stdout open for each snapshot during the course
-	// of migration, we can do it lazily.
-	//
-	// N.B. that the order here important: e.g. in btrfs/zfs, snapshots
-	// which are parents of other snapshots should be sent first, to save
-	// as much transfer as possible. However, the base container is always
-	// sent as the first object, since that is the grandparent of every
-	// snapshot.
-	//
-	// We leave sending containers which are snapshots of other containers
-	// already present on the target instance as an exercise for the
-	// enterprising developer.
-	MigrationSource(args MigrationSourceArgs) (MigrationStorageSourceDriver, error)
-	MigrationSink(conn *websocket.Conn, op *operation, args MigrationSinkArgs) error
-
-	StorageMigrationSource(args MigrationSourceArgs) (MigrationStorageSourceDriver, error)
-	StorageMigrationSink(conn *websocket.Conn, op *operation, args MigrationSinkArgs) error
-}
-
 func storageCoreInit(driver string) (storage, error) {
 	sType, err := storageStringToType(driver)
 	if err != nil {

From 3b3b1d8e0ce09649d65549b358e2c8ed3194c8b6 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Wed, 25 Sep 2019 11:34:21 +0100
Subject: [PATCH 25/72] lxd/storage: Links storagePoolVolumeContainerCreateInit
 and storagePoolVolumeContainerLoadInit to instance package

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/storage.go | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git a/lxd/storage.go b/lxd/storage.go
index da8fe1ef87..8442bac168 100644
--- a/lxd/storage.go
+++ b/lxd/storage.go
@@ -33,7 +33,9 @@ func init() {
 	device.StorageVolumeUmount = storageVolumeUmount
 	// Expose storageRootFSApplyQuota to the device package as StorageRootFSApplyQuota.
 	device.StorageRootFSApplyQuota = storageRootFSApplyQuota
-
+	// Expose storagePoolVolumeContainerCreateInit to the instance package as StoragePoolVolumeContainerCreateInit.
+	instance.StoragePoolVolumeContainerCreateInit = storagePoolVolumeContainerCreateInit
+	instance.StoragePoolVolumeContainerLoadInit = storagePoolVolumeContainerLoadInit
 }
 
 // lxdStorageLockMap is a hashmap that allows functions to check whether the

From 7f5733d8caa5d7ec270953f33b861de38d45daf9 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Wed, 25 Sep 2019 11:34:47 +0100
Subject: [PATCH 26/72] lxd/storage: Updates use of moved types

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/storage.go | 95 +++++++++++++++++++++++++-------------------------
 1 file changed, 48 insertions(+), 47 deletions(-)

diff --git a/lxd/storage.go b/lxd/storage.go
index 8442bac168..da5d279a89 100644
--- a/lxd/storage.go
+++ b/lxd/storage.go
@@ -8,13 +8,13 @@ import (
 	"sync"
 	"sync/atomic"
 
-	"github.com/gorilla/websocket"
 	"github.com/pkg/errors"
 
 	"github.com/lxc/lxd/lxd/db"
 	"github.com/lxc/lxd/lxd/device"
 	"github.com/lxc/lxd/lxd/instance"
-	"github.com/lxc/lxd/lxd/migration"
+	"github.com/lxc/lxd/lxd/instance/instancetype"
+	"github.com/lxc/lxd/lxd/operation"
 	"github.com/lxc/lxd/lxd/state"
 	driver "github.com/lxc/lxd/lxd/storage"
 	"github.com/lxc/lxd/shared"
@@ -93,56 +93,56 @@ func readStoragePoolDriversCache() map[string]string {
 	return drivers.(map[string]string)
 }
 
-func storageCoreInit(driver string) (storage, error) {
-	sType, err := storageStringToType(driver)
+func storageCoreInit(driver string) (instance.Storage, error) {
+	sType, err := instance.StorageStringToType(driver)
 	if err != nil {
 		return nil, err
 	}
 
 	switch sType {
-	case storageTypeBtrfs:
+	case instance.StorageTypeBtrfs:
 		btrfs := storageBtrfs{}
 		err = btrfs.StorageCoreInit()
 		if err != nil {
 			return nil, err
 		}
 		return &btrfs, nil
-	case storageTypeDir:
+	case instance.StorageTypeDir:
 		dir := storageDir{}
 		err = dir.StorageCoreInit()
 		if err != nil {
 			return nil, err
 		}
 		return &dir, nil
-	case storageTypeCeph:
+	case instance.StorageTypeCeph:
 		ceph := storageCeph{}
 		err = ceph.StorageCoreInit()
 		if err != nil {
 			return nil, err
 		}
 		return &ceph, nil
-	case storageTypeCephFs:
+	case instance.StorageTypeCephFs:
 		cephfs := storageCephFs{}
 		err = cephfs.StorageCoreInit()
 		if err != nil {
 			return nil, err
 		}
 		return &cephfs, nil
-	case storageTypeLvm:
+	case instance.StorageTypeLvm:
 		lvm := storageLvm{}
 		err = lvm.StorageCoreInit()
 		if err != nil {
 			return nil, err
 		}
 		return &lvm, nil
-	case storageTypeMock:
+	case instance.StorageTypeMock:
 		mock := storageMock{}
 		err = mock.StorageCoreInit()
 		if err != nil {
 			return nil, err
 		}
 		return &mock, nil
-	case storageTypeZfs:
+	case instance.StorageTypeZfs:
 		zfs := storageZfs{}
 		err = zfs.StorageCoreInit()
 		if err != nil {
@@ -154,7 +154,7 @@ func storageCoreInit(driver string) (storage, error) {
 	return nil, fmt.Errorf("invalid storage type")
 }
 
-func storageInit(s *state.State, project, poolName, volumeName string, volumeType int) (storage, error) {
+func storageInit(s *state.State, project, poolName, volumeName string, volumeType int) (instance.Storage, error) {
 	// Load the storage pool.
 	poolID, pool, err := s.Cluster.StoragePoolGet(poolName)
 	if err != nil {
@@ -178,13 +178,13 @@ func storageInit(s *state.State, project, poolName, volumeName string, volumeTyp
 		}
 	}
 
-	sType, err := storageStringToType(driver)
+	sType, err := instance.StorageStringToType(driver)
 	if err != nil {
 		return nil, err
 	}
 
 	switch sType {
-	case storageTypeBtrfs:
+	case instance.StorageTypeBtrfs:
 		btrfs := storageBtrfs{}
 		btrfs.poolID = poolID
 		btrfs.pool = pool
@@ -195,7 +195,7 @@ func storageInit(s *state.State, project, poolName, volumeName string, volumeTyp
 			return nil, err
 		}
 		return &btrfs, nil
-	case storageTypeDir:
+	case instance.StorageTypeDir:
 		dir := storageDir{}
 		dir.poolID = poolID
 		dir.pool = pool
@@ -207,7 +207,7 @@ func storageInit(s *state.State, project, poolName, volumeName string, volumeTyp
 			return nil, err
 		}
 		return &dir, nil
-	case storageTypeCeph:
+	case instance.StorageTypeCeph:
 		ceph := storageCeph{}
 		ceph.poolID = poolID
 		ceph.pool = pool
@@ -218,7 +218,7 @@ func storageInit(s *state.State, project, poolName, volumeName string, volumeTyp
 			return nil, err
 		}
 		return &ceph, nil
-	case storageTypeCephFs:
+	case instance.StorageTypeCephFs:
 		cephfs := storageCephFs{}
 		cephfs.poolID = poolID
 		cephfs.pool = pool
@@ -229,7 +229,7 @@ func storageInit(s *state.State, project, poolName, volumeName string, volumeTyp
 			return nil, err
 		}
 		return &cephfs, nil
-	case storageTypeLvm:
+	case instance.StorageTypeLvm:
 		lvm := storageLvm{}
 		lvm.poolID = poolID
 		lvm.pool = pool
@@ -240,7 +240,7 @@ func storageInit(s *state.State, project, poolName, volumeName string, volumeTyp
 			return nil, err
 		}
 		return &lvm, nil
-	case storageTypeMock:
+	case instance.StorageTypeMock:
 		mock := storageMock{}
 		mock.poolID = poolID
 		mock.pool = pool
@@ -251,7 +251,7 @@ func storageInit(s *state.State, project, poolName, volumeName string, volumeTyp
 			return nil, err
 		}
 		return &mock, nil
-	case storageTypeZfs:
+	case instance.StorageTypeZfs:
 		zfs := storageZfs{}
 		zfs.poolID = poolID
 		zfs.pool = pool
@@ -267,11 +267,11 @@ func storageInit(s *state.State, project, poolName, volumeName string, volumeTyp
 	return nil, fmt.Errorf("invalid storage type")
 }
 
-func storagePoolInit(s *state.State, poolName string) (storage, error) {
+func storagePoolInit(s *state.State, poolName string) (instance.Storage, error) {
 	return storageInit(s, "default", poolName, "", -1)
 }
 
-func storagePoolVolumeAttachInit(s *state.State, poolName string, volumeName string, volumeType int, c container) (storage, error) {
+func storagePoolVolumeAttachInit(s *state.State, poolName string, volumeName string, volumeType int, c container) (instance.Storage, error) {
 	st, err := storageInit(s, "default", poolName, volumeName, volumeType)
 	if err != nil {
 		return nil, err
@@ -288,7 +288,7 @@ func storagePoolVolumeAttachInit(s *state.State, poolName string, volumeName str
 	// Get the on-disk idmap for the volume
 	var lastIdmap *idmap.IdmapSet
 	if poolVolumePut.Config["volatile.idmap.last"] != "" {
-		lastIdmap, err = idmapsetFromString(poolVolumePut.Config["volatile.idmap.last"])
+		lastIdmap, err = instance.IDMapsetFromString(poolVolumePut.Config["volatile.idmap.last"])
 		if err != nil {
 			logger.Errorf("Failed to unmarshal last idmapping: %s", poolVolumePut.Config["volatile.idmap.last"])
 			return nil, err
@@ -309,7 +309,7 @@ func storagePoolVolumeAttachInit(s *state.State, poolName string, volumeName str
 		}
 
 		if nextIdmap != nil {
-			nextJsonMap, err = idmapsetToJSON(nextIdmap)
+			nextJsonMap, err = instance.IDMapsetToJSON(nextIdmap)
 			if err != nil {
 				return nil, err
 			}
@@ -331,12 +331,12 @@ func storagePoolVolumeAttachInit(s *state.State, poolName string, volumeName str
 
 			if len(volumeUsedBy) > 1 {
 				for _, ctName := range volumeUsedBy {
-					instt, err := instanceLoadByProjectAndName(s, c.Project(), ctName)
+					instt, err := instance.InstanceLoadByProjectAndName(s, c.Project(), ctName)
 					if err != nil {
 						continue
 					}
 
-					if instt.Type() != instance.TypeContainer {
+					if instt.Type() != instancetype.Container {
 						continue
 					}
 
@@ -384,8 +384,8 @@ func storagePoolVolumeAttachInit(s *state.State, poolName string, volumeName str
 		if lastIdmap != nil {
 			var err error
 
-			if st.GetStorageType() == storageTypeZfs {
-				err = lastIdmap.UnshiftRootfs(remapPath, zfsIdmapSetSkipper)
+			if st.GetStorageType() == instance.StorageTypeZfs {
+				err = lastIdmap.UnshiftRootfs(remapPath, driver.ZFSIdmapSetSkipper)
 			} else {
 				err = lastIdmap.UnshiftRootfs(remapPath, nil)
 			}
@@ -400,8 +400,8 @@ func storagePoolVolumeAttachInit(s *state.State, poolName string, volumeName str
 		if nextIdmap != nil {
 			var err error
 
-			if st.GetStorageType() == storageTypeZfs {
-				err = nextIdmap.ShiftRootfs(remapPath, zfsIdmapSetSkipper)
+			if st.GetStorageType() == instance.StorageTypeZfs {
+				err = nextIdmap.ShiftRootfs(remapPath, driver.ZFSIdmapSetSkipper)
 			} else {
 				err = nextIdmap.ShiftRootfs(remapPath, nil)
 			}
@@ -417,7 +417,7 @@ func storagePoolVolumeAttachInit(s *state.State, poolName string, volumeName str
 	jsonIdmap := "[]"
 	if nextIdmap != nil {
 		var err error
-		jsonIdmap, err = idmapsetToJSON(nextIdmap)
+		jsonIdmap, err = instance.IDMapsetToJSON(nextIdmap)
 		if err != nil {
 			logger.Errorf("Failed to marshal idmap")
 			return nil, err
@@ -441,20 +441,20 @@ func storagePoolVolumeAttachInit(s *state.State, poolName string, volumeName str
 	return st, nil
 }
 
-func storagePoolVolumeInit(s *state.State, project, poolName, volumeName string, volumeType int) (storage, error) {
+func storagePoolVolumeInit(s *state.State, project, poolName, volumeName string, volumeType int) (instance.Storage, error) {
 	// No need to detect storage here, its a new container.
 	return storageInit(s, project, poolName, volumeName, volumeType)
 }
 
-func storagePoolVolumeImageInit(s *state.State, poolName string, imageFingerprint string) (storage, error) {
+func storagePoolVolumeImageInit(s *state.State, poolName string, imageFingerprint string) (instance.Storage, error) {
 	return storagePoolVolumeInit(s, "default", poolName, imageFingerprint, storagePoolVolumeTypeImage)
 }
 
-func storagePoolVolumeContainerCreateInit(s *state.State, project string, poolName string, containerName string) (storage, error) {
+func storagePoolVolumeContainerCreateInit(s *state.State, project string, poolName string, containerName string) (instance.Storage, error) {
 	return storagePoolVolumeInit(s, project, poolName, containerName, storagePoolVolumeTypeContainer)
 }
 
-func storagePoolVolumeContainerLoadInit(s *state.State, project, containerName string) (storage, error) {
+func storagePoolVolumeContainerLoadInit(s *state.State, project, containerName string) (instance.Storage, error) {
 	// Get the storage pool of a given container.
 	poolName, err := s.Cluster.ContainerPool(project, containerName)
 	if err != nil {
@@ -578,8 +578,8 @@ func resetContainerDiskIdmap(container container, srcIdmap *idmap.IdmapSet) erro
 	return nil
 }
 
-func progressWrapperRender(op *operation, key string, description string, progressInt int64, speedInt int64) {
-	meta := op.metadata
+func progressWrapperRender(op *operation.Operation, key string, description string, progressInt int64, speedInt int64) {
+	meta := op.Metadata
 	if meta == nil {
 		meta = make(map[string]interface{})
 	}
@@ -596,7 +596,7 @@ func progressWrapperRender(op *operation, key string, description string, progre
 }
 
 // StorageProgressReader reports the read progress.
-func StorageProgressReader(op *operation, key string, description string) func(io.ReadCloser) io.ReadCloser {
+func StorageProgressReader(op *operation.Operation, key string, description string) func(io.ReadCloser) io.ReadCloser {
 	return func(reader io.ReadCloser) io.ReadCloser {
 		if op == nil {
 			return reader
@@ -618,7 +618,7 @@ func StorageProgressReader(op *operation, key string, description string) func(i
 }
 
 // StorageProgressWriter reports the write progress.
-func StorageProgressWriter(op *operation, key string, description string) func(io.WriteCloser) io.WriteCloser {
+func StorageProgressWriter(op *operation.Operation, key string, description string) func(io.WriteCloser) io.WriteCloser {
 	return func(writer io.WriteCloser) io.WriteCloser {
 		if op == nil {
 			return writer
@@ -733,8 +733,8 @@ func storagePoolDriversCacheUpdate(cluster *db.Cluster) {
 
 // storageVolumeMount initialises a new storage interface and checks the pool and volume are
 // mounted. If they are not then they are mounted.
-func storageVolumeMount(state *state.State, poolName string, volumeName string, volumeTypeName string, instance device.InstanceIdentifier) error {
-	c, ok := instance.(*containerLXC)
+func storageVolumeMount(state *state.State, poolName string, volumeName string, volumeTypeName string, inst device.InstanceIdentifier) error {
+	c, ok := inst.(*instance.ContainerLXC)
 	if !ok {
 		return fmt.Errorf("Received non-LXC container instance")
 	}
@@ -771,26 +771,27 @@ func storageVolumeUmount(state *state.State, poolName string, volumeName string,
 
 // storageRootFSApplyQuota applies a quota to an instance if it can, if it cannot then it will
 // return false indicating that the quota needs to be stored in volatile to be applied on next boot.
-func storageRootFSApplyQuota(instance device.InstanceIdentifier, newSizeBytes int64) (bool, error) {
-	c, ok := instance.(*containerLXC)
+func storageRootFSApplyQuota(inst device.InstanceIdentifier, newSizeBytes int64) (bool, error) {
+	c, ok := inst.(*instance.ContainerLXC)
 	if !ok {
 		return false, fmt.Errorf("Received non-LXC container instance")
 	}
 
-	err := c.initStorage()
+	err := c.InitStorage()
 	if err != nil {
 		return false, errors.Wrap(err, "Initialize storage")
 	}
 
-	storageTypeName := c.storage.GetStorageTypeName()
-	storageIsReady := c.storage.ContainerStorageReady(c)
+	storage := c.Storage()
+	storageTypeName := storage.GetStorageTypeName()
+	storageIsReady := storage.ContainerStorageReady(c)
 
 	// If we cannot apply the quota now, then return false as needs to be applied on next boot.
 	if (storageTypeName == "lvm" || storageTypeName == "ceph") && c.IsRunning() || !storageIsReady {
 		return false, nil
 	}
 
-	err = c.storage.StorageEntitySetQuota(storagePoolVolumeTypeContainer, newSizeBytes, c)
+	err = storage.StorageEntitySetQuota(storagePoolVolumeTypeContainer, newSizeBytes, c)
 	if err != nil {
 		return false, errors.Wrap(err, "Set storage quota")
 	}

From f21fff231f7179a4ba8c3b5b8fb94b0a02beaaef Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Wed, 25 Sep 2019 11:35:34 +0100
Subject: [PATCH 27/72] lxd/storage/utils: Moves some functions to storage
 package

So they can be used outside of main package.

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/storage/utils.go | 89 ++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 89 insertions(+)

diff --git a/lxd/storage/utils.go b/lxd/storage/utils.go
index d23d4f4171..fa6161f2a1 100644
--- a/lxd/storage/utils.go
+++ b/lxd/storage/utils.go
@@ -3,6 +3,7 @@ package storage
 import (
 	"fmt"
 	"os"
+	"path/filepath"
 	"strings"
 	"time"
 
@@ -360,3 +361,91 @@ func GetStorageResource(path string) (*api.ResourcesStoragePool, error) {
 
 	return &res, nil
 }
+
+// ZFSIdmapSetSkipper sets skipper.
+func ZFSIdmapSetSkipper(dir string, absPath string, fi os.FileInfo) bool {
+	strippedPath := absPath
+	if dir != "" {
+		strippedPath = absPath[len(dir):]
+	}
+
+	if fi.IsDir() && strippedPath == "/.zfs/snapshot" {
+		return true
+	}
+
+	return false
+}
+
+// BTRFSSubVolumesGet gets sub volumes.
+func BTRFSSubVolumesGet(path string) ([]string, error) {
+	result := []string{}
+
+	if !strings.HasSuffix(path, "/") {
+		path = path + "/"
+	}
+
+	// Unprivileged users can't get to fs internals
+	filepath.Walk(path, func(fpath string, fi os.FileInfo, err error) error {
+		// Skip walk errors
+		if err != nil {
+			return nil
+		}
+
+		// Ignore the base path
+		if strings.TrimRight(fpath, "/") == strings.TrimRight(path, "/") {
+			return nil
+		}
+
+		// Subvolumes can only be directories
+		if !fi.IsDir() {
+			return nil
+		}
+
+		// Check if a btrfs subvolume
+		if IsBtrfsSubVolume(fpath) {
+			result = append(result, strings.TrimPrefix(fpath, path))
+		}
+
+		return nil
+	})
+
+	return result, nil
+}
+
+// IsBtrfsSubVolume returns true if the given Path is a btrfs subvolume else false.
+func IsBtrfsSubVolume(subvolPath string) bool {
+	fs := unix.Stat_t{}
+	err := unix.Lstat(subvolPath, &fs)
+	if err != nil {
+		return false
+	}
+
+	// Check if BTRFS_FIRST_FREE_OBJECTID
+	if fs.Ino != 256 {
+		return false
+	}
+
+	return true
+}
+
+// BTRFSSubVolumeIsRo returns whether sub volume is read only.
+func BTRFSSubVolumeIsRo(path string) bool {
+	output, err := shared.RunCommand("btrfs", "property", "get", "-ts", path)
+	if err != nil {
+		return false
+	}
+
+	return strings.HasPrefix(string(output), "ro=true")
+}
+
+// BTRFSSubVolumeMakeRo makes sub volume read only.
+func BTRFSSubVolumeMakeRo(path string) error {
+	_, err := shared.RunCommand("btrfs", "property", "set", "-ts", path, "ro", "true")
+	return err
+}
+
+// BTRFSSubVolumeMakeRw makes sub volume read write.
+func BTRFSSubVolumeMakeRw(path string) error {
+	_, err := shared.RunCommand("btrfs", "property", "set", "-ts", path, "ro", "false")
+	return err
+}

From 77840dbd5f4822f9d8fff377da2da234c878d45b Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Wed, 25 Sep 2019 11:37:11 +0100
Subject: [PATCH 28/72] lxd/storage/btrfs: Removes some helper functions

These have been moved to the storage package.

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/storage_btrfs.go | 71 --------------------------------------------
 1 file changed, 71 deletions(-)

diff --git a/lxd/storage_btrfs.go b/lxd/storage_btrfs.go
index f339b710a3..103bad5924 100644
--- a/lxd/storage_btrfs.go
+++ b/lxd/storage_btrfs.go
@@ -2335,23 +2335,6 @@ func (s *storageBtrfs) btrfsPoolVolumesSnapshot(source string, dest string, read
 	return nil
 }
 
-// isBtrfsSubVolume returns true if the given Path is a btrfs subvolume else
-// false.
-func isBtrfsSubVolume(subvolPath string) bool {
-	fs := unix.Stat_t{}
-	err := unix.Lstat(subvolPath, &fs)
-	if err != nil {
-		return false
-	}
-
-	// Check if BTRFS_FIRST_FREE_OBJECTID
-	if fs.Ino != 256 {
-		return false
-	}
-
-	return true
-}
-
 func isBtrfsFilesystem(path string) bool {
 	_, err := shared.RunCommand("btrfs", "filesystem", "show", path)
 	if err != nil {
@@ -2376,60 +2359,6 @@ func isOnBtrfs(path string) bool {
 	return true
 }
 
-func btrfsSubVolumeIsRo(path string) bool {
-	output, err := shared.RunCommand("btrfs", "property", "get", "-ts", path)
-	if err != nil {
-		return false
-	}
-
-	return strings.HasPrefix(string(output), "ro=true")
-}
-
-func btrfsSubVolumeMakeRo(path string) error {
-	_, err := shared.RunCommand("btrfs", "property", "set", "-ts", path, "ro", "true")
-	return err
-}
-
-func btrfsSubVolumeMakeRw(path string) error {
-	_, err := shared.RunCommand("btrfs", "property", "set", "-ts", path, "ro", "false")
-	return err
-}
-
-func btrfsSubVolumesGet(path string) ([]string, error) {
-	result := []string{}
-
-	if !strings.HasSuffix(path, "/") {
-		path = path + "/"
-	}
-
-	// Unprivileged users can't get to fs internals
-	filepath.Walk(path, func(fpath string, fi os.FileInfo, err error) error {
-		// Skip walk errors
-		if err != nil {
-			return nil
-		}
-
-		// Ignore the base path
-		if strings.TrimRight(fpath, "/") == strings.TrimRight(path, "/") {
-			return nil
-		}
-
-		// Subvolumes can only be directories
-		if !fi.IsDir() {
-			return nil
-		}
-
-		// Check if a btrfs subvolume
-		if isBtrfsSubVolume(fpath) {
-			result = append(result, strings.TrimPrefix(fpath, path))
-		}
-
-		return nil
-	})
-
-	return result, nil
-}
-
 func (s *storageBtrfs) MigrationType() migration.MigrationFSType {
 	if s.s.OS.RunningInUserNS {
 		return migration.MigrationFSType_RSYNC

From fe360a95f7787c37990673d2dd2d10bea455c154 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Wed, 25 Sep 2019 11:38:36 +0100
Subject: [PATCH 29/72] lxd/storage/migration: Removes
 MigrationStorageSourceDriver interface

This has been moved to the instance package.

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/storage_migration.go | 23 -----------------------
 1 file changed, 23 deletions(-)

diff --git a/lxd/storage_migration.go b/lxd/storage_migration.go
index 8d329cd5fa..3b1d42ec22 100644
--- a/lxd/storage_migration.go
+++ b/lxd/storage_migration.go
@@ -17,29 +17,6 @@ import (
 	"github.com/lxc/lxd/shared/logger"
 )
 
-// MigrationStorageSourceDriver defines the functions needed to implement a
-// migration source driver.
-type MigrationStorageSourceDriver interface {
-	/* send any bits of the container/snapshots that are possible while the
-	 * container is still running.
-	 */
-	SendWhileRunning(conn *websocket.Conn, op *operation, bwlimit string, containerOnly bool) error
-
-	/* send the final bits (e.g. a final delta snapshot for zfs, btrfs, or
-	 * do a final rsync) of the fs after the container has been
-	 * checkpointed. This will only be called when a container is actually
-	 * being live migrated.
-	 */
-	SendAfterCheckpoint(conn *websocket.Conn, bwlimit string) error
-
-	/* Called after either success or failure of a migration, can be used
-	 * to clean up any temporary snapshots, etc.
-	 */
-	Cleanup()
-
-	SendStorageVolume(conn *websocket.Conn, op *operation, bwlimit string, storage storage, volumeOnly bool) error
-}
-
 type rsyncStorageSourceDriver struct {
 	container     Instance
 	snapshots     []Instance

From 6dbbba53668f82c3db017792f2b71a008b80c645 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Wed, 25 Sep 2019 11:40:00 +0100
Subject: [PATCH 30/72] lxc/storage*: Updates use of moved types

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/storage_btrfs.go           | 110 +++++++++++++++++----------------
 lxd/storage_ceph.go            |  64 +++++++++----------
 lxd/storage_ceph_utils.go      |   7 ++-
 lxd/storage_cephfs.go          |  54 ++++++++--------
 lxd/storage_dir.go             |  70 +++++++++++----------
 lxd/storage_lvm.go             |  64 +++++++++----------
 lxd/storage_lvm_utils.go       |  19 +++---
 lxd/storage_migration.go       |  38 ++++++------
 lxd/storage_migration_btrfs.go |  10 +--
 lxd/storage_migration_ceph.go  |  13 ++--
 lxd/storage_migration_zfs.go   |  10 +--
 lxd/storage_mock.go            |  52 ++++++++--------
 lxd/storage_pools.go           |  15 ++---
 lxd/storage_pools_config.go    |   3 +-
 lxd/storage_shared.go          |   5 +-
 lxd/storage_utils.go           |   3 +-
 16 files changed, 281 insertions(+), 256 deletions(-)

diff --git a/lxd/storage_btrfs.go b/lxd/storage_btrfs.go
index 103bad5924..ebac01b454 100644
--- a/lxd/storage_btrfs.go
+++ b/lxd/storage_btrfs.go
@@ -16,7 +16,9 @@ import (
 	"github.com/pkg/errors"
 	"golang.org/x/sys/unix"
 
+	"github.com/lxc/lxd/lxd/instance"
 	"github.com/lxc/lxd/lxd/migration"
+	"github.com/lxc/lxd/lxd/operation"
 	"github.com/lxc/lxd/lxd/project"
 	"github.com/lxc/lxd/lxd/state"
 	driver "github.com/lxc/lxd/lxd/storage"
@@ -73,8 +75,8 @@ func (s *storageBtrfs) getCustomSnapshotSubvolumePath(poolName string) string {
 }
 
 func (s *storageBtrfs) StorageCoreInit() error {
-	s.sType = storageTypeBtrfs
-	typeName, err := storageTypeToString(s.sType)
+	s.sType = instance.StorageTypeBtrfs
+	typeName, err := instance.StorageTypeToString(s.sType)
 	if err != nil {
 		return err
 	}
@@ -173,8 +175,8 @@ func (s *storageBtrfs) StoragePoolCreate() error {
 					return fmt.Errorf("Failed to create the BTRFS pool: %s", output)
 				}
 			} else {
-				if isBtrfsSubVolume(source) {
-					subvols, err := btrfsSubVolumesGet(source)
+				if driver.IsBtrfsSubVolume(source) {
+					subvols, err := driver.BTRFSSubVolumesGet(source)
 					if err != nil {
 						return fmt.Errorf("Could not determine if existing BTRFS subvolume ist empty: %s", err)
 					}
@@ -339,7 +341,7 @@ func (s *storageBtrfs) StoragePoolDelete() error {
 			// This is a loop file so simply remove it.
 			err = os.Remove(source)
 		} else {
-			if !isBtrfsFilesystem(source) && isBtrfsSubVolume(source) {
+			if !isBtrfsFilesystem(source) && driver.IsBtrfsSubVolume(source) {
 				err = btrfsSubVolumesDelete(source)
 			}
 		}
@@ -610,7 +612,7 @@ func (s *storageBtrfs) StoragePoolVolumeDelete() error {
 
 	// Delete subvolume.
 	customSubvolumeName := driver.GetStoragePoolVolumeMountPoint(s.pool.Name, s.volume.Name)
-	if shared.PathExists(customSubvolumeName) && isBtrfsSubVolume(customSubvolumeName) {
+	if shared.PathExists(customSubvolumeName) && driver.IsBtrfsSubVolume(customSubvolumeName) {
 		err = btrfsSubVolumesDelete(customSubvolumeName)
 		if err != nil {
 			return err
@@ -800,9 +802,9 @@ func (s *storageBtrfs) StoragePoolVolumeRename(newName string) error {
 }
 
 // Functions dealing with container storage.
-func (s *storageBtrfs) ContainerStorageReady(container Instance) bool {
+func (s *storageBtrfs) ContainerStorageReady(container instance.Instance) bool {
 	containerMntPoint := driver.GetContainerMountPoint(container.Project(), s.pool.Name, container.Name())
-	return isBtrfsSubVolume(containerMntPoint)
+	return driver.IsBtrfsSubVolume(containerMntPoint)
 }
 
 func (s *storageBtrfs) doContainerCreate(projectName, name string, privileged bool) error {
@@ -845,7 +847,7 @@ func (s *storageBtrfs) doContainerCreate(projectName, name string, privileged bo
 	return nil
 }
 
-func (s *storageBtrfs) ContainerCreate(container Instance) error {
+func (s *storageBtrfs) ContainerCreate(container instance.Instance) error {
 	err := s.doContainerCreate(container.Project(), container.Name(), container.IsPrivileged())
 	if err != nil {
 		return err
@@ -855,7 +857,7 @@ func (s *storageBtrfs) ContainerCreate(container Instance) error {
 }
 
 // And this function is why I started hating on btrfs...
-func (s *storageBtrfs) ContainerCreateFromImage(container Instance, fingerprint string, tracker *ioprogress.ProgressTracker) error {
+func (s *storageBtrfs) ContainerCreateFromImage(container instance.Instance, fingerprint string, tracker *ioprogress.ProgressTracker) error {
 	logger.Debugf("Creating BTRFS storage volume for container \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
 
 	source := s.pool.Config["source"]
@@ -897,7 +899,7 @@ func (s *storageBtrfs) ContainerCreateFromImage(container Instance, fingerprint
 		lxdStorageMapLock.Unlock()
 
 		var imgerr error
-		if !shared.PathExists(imageMntPoint) || !isBtrfsSubVolume(imageMntPoint) {
+		if !shared.PathExists(imageMntPoint) || !driver.IsBtrfsSubVolume(imageMntPoint) {
 			imgerr = s.ImageCreate(fingerprint, tracker)
 		}
 
@@ -938,7 +940,7 @@ func (s *storageBtrfs) ContainerCreateFromImage(container Instance, fingerprint
 	return nil
 }
 
-func (s *storageBtrfs) ContainerDelete(container Instance) error {
+func (s *storageBtrfs) ContainerDelete(container instance.Instance) error {
 	logger.Debugf("Deleting BTRFS storage volume for container \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
 
 	// The storage pool needs to be mounted.
@@ -949,7 +951,7 @@ func (s *storageBtrfs) ContainerDelete(container Instance) error {
 
 	// Delete the subvolume.
 	containerSubvolumeName := driver.GetContainerMountPoint(container.Project(), s.pool.Name, container.Name())
-	if shared.PathExists(containerSubvolumeName) && isBtrfsSubVolume(containerSubvolumeName) {
+	if shared.PathExists(containerSubvolumeName) && driver.IsBtrfsSubVolume(containerSubvolumeName) {
 		err = btrfsSubVolumesDelete(containerSubvolumeName)
 		if err != nil {
 			return err
@@ -985,7 +987,7 @@ func (s *storageBtrfs) ContainerDelete(container Instance) error {
 	return nil
 }
 
-func (s *storageBtrfs) copyContainer(target Instance, source Instance) error {
+func (s *storageBtrfs) copyContainer(target instance.Instance, source instance.Instance) error {
 	sourceContainerSubvolumeName := driver.GetContainerMountPoint(source.Project(), s.pool.Name, source.Name())
 	if source.IsSnapshot() {
 		sourceContainerSubvolumeName = driver.GetSnapshotMountPoint(source.Project(), s.pool.Name, source.Name())
@@ -1019,7 +1021,7 @@ func (s *storageBtrfs) copyContainer(target Instance, source Instance) error {
 	return nil
 }
 
-func (s *storageBtrfs) copySnapshot(target Instance, source Instance) error {
+func (s *storageBtrfs) copySnapshot(target instance.Instance, source instance.Instance) error {
 	sourceName := source.Name()
 	targetName := target.Name()
 	sourceContainerSubvolumeName := driver.GetSnapshotMountPoint(source.Project(), s.pool.Name, sourceName)
@@ -1050,7 +1052,7 @@ func (s *storageBtrfs) copySnapshot(target Instance, source Instance) error {
 	return nil
 }
 
-func (s *storageBtrfs) doCrossPoolContainerCopy(target Instance, source Instance, containerOnly bool, refresh bool, refreshSnapshots []Instance) error {
+func (s *storageBtrfs) doCrossPoolContainerCopy(target instance.Instance, source instance.Instance, containerOnly bool, refresh bool, refreshSnapshots []instance.Instance) error {
 	sourcePool, err := source.StoragePool()
 	if err != nil {
 		return err
@@ -1075,7 +1077,7 @@ func (s *storageBtrfs) doCrossPoolContainerCopy(target Instance, source Instance
 		return err
 	}
 
-	var snapshots []Instance
+	var snapshots []instance.Instance
 
 	if refresh {
 		snapshots = refreshSnapshots
@@ -1122,7 +1124,7 @@ func (s *storageBtrfs) doCrossPoolContainerCopy(target Instance, source Instance
 	return nil
 }
 
-func (s *storageBtrfs) ContainerCopy(target Instance, source Instance, containerOnly bool) error {
+func (s *storageBtrfs) ContainerCopy(target instance.Instance, source instance.Instance, containerOnly bool) error {
 	logger.Debugf("Copying BTRFS container storage %s to %s", source.Name(), target.Name())
 
 	// The storage pool needs to be mounted.
@@ -1166,14 +1168,14 @@ func (s *storageBtrfs) ContainerCopy(target Instance, source Instance, container
 	}
 
 	for _, snap := range snapshots {
-		sourceSnapshot, err := instanceLoadByProjectAndName(s.s, source.Project(), snap.Name())
+		sourceSnapshot, err := instance.InstanceLoadByProjectAndName(s.s, source.Project(), snap.Name())
 		if err != nil {
 			return err
 		}
 
 		_, snapOnlyName, _ := shared.ContainerGetParentAndSnapshotName(snap.Name())
 		newSnapName := fmt.Sprintf("%s/%s", target.Name(), snapOnlyName)
-		targetSnapshot, err := instanceLoadByProjectAndName(s.s, target.Project(), newSnapName)
+		targetSnapshot, err := instance.InstanceLoadByProjectAndName(s.s, target.Project(), newSnapName)
 		if err != nil {
 			return err
 		}
@@ -1188,7 +1190,7 @@ func (s *storageBtrfs) ContainerCopy(target Instance, source Instance, container
 	return nil
 }
 
-func (s *storageBtrfs) ContainerRefresh(target Instance, source Instance, snapshots []Instance) error {
+func (s *storageBtrfs) ContainerRefresh(target instance.Instance, source instance.Instance, snapshots []instance.Instance) error {
 	logger.Debugf("Refreshing BTRFS container storage for %s from %s", target.Name(), source.Name())
 
 	// The storage pool needs to be mounted.
@@ -1208,7 +1210,7 @@ func (s *storageBtrfs) ContainerRefresh(target Instance, source Instance, snapsh
 	return s.doCrossPoolContainerCopy(target, source, len(snapshots) == 0, true, snapshots)
 }
 
-func (s *storageBtrfs) ContainerMount(c Instance) (bool, error) {
+func (s *storageBtrfs) ContainerMount(c instance.Instance) (bool, error) {
 	logger.Debugf("Mounting BTRFS storage volume for container \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
 
 	// The storage pool must be mounted.
@@ -1221,11 +1223,11 @@ func (s *storageBtrfs) ContainerMount(c Instance) (bool, error) {
 	return true, nil
 }
 
-func (s *storageBtrfs) ContainerUmount(c Instance, path string) (bool, error) {
+func (s *storageBtrfs) ContainerUmount(c instance.Instance, path string) (bool, error) {
 	return true, nil
 }
 
-func (s *storageBtrfs) ContainerRename(container Instance, newName string) error {
+func (s *storageBtrfs) ContainerRename(container instance.Instance, newName string) error {
 	logger.Debugf("Renaming BTRFS storage volume for container \"%s\" from %s to %s", s.volume.Name, s.volume.Name, newName)
 
 	// The storage pool must be mounted.
@@ -1274,7 +1276,7 @@ func (s *storageBtrfs) ContainerRename(container Instance, newName string) error
 	return nil
 }
 
-func (s *storageBtrfs) ContainerRestore(container Instance, sourceContainer Instance) error {
+func (s *storageBtrfs) ContainerRestore(container instance.Instance, sourceContainer instance.Instance) error {
 	logger.Debugf("Restoring BTRFS storage volume for container \"%s\" from %s to %s", s.volume.Name, sourceContainer.Name(), container.Name())
 
 	// The storage pool must be mounted.
@@ -1359,7 +1361,7 @@ func (s *storageBtrfs) ContainerRestore(container Instance, sourceContainer Inst
 	return failure
 }
 
-func (s *storageBtrfs) ContainerGetUsage(container Instance) (int64, error) {
+func (s *storageBtrfs) ContainerGetUsage(container instance.Instance) (int64, error) {
 	return s.btrfsPoolVolumeQGroupUsage(container.Path())
 }
 
@@ -1412,7 +1414,7 @@ func (s *storageBtrfs) doContainerSnapshotCreate(projectName string, targetName
 	return nil
 }
 
-func (s *storageBtrfs) ContainerSnapshotCreate(snapshotContainer Instance, sourceContainer Instance) error {
+func (s *storageBtrfs) ContainerSnapshotCreate(snapshotContainer instance.Instance, sourceContainer instance.Instance) error {
 	err := s.doContainerSnapshotCreate(sourceContainer.Project(), snapshotContainer.Name(), sourceContainer.Name())
 	if err != nil {
 		s.ContainerSnapshotDelete(snapshotContainer)
@@ -1428,7 +1430,7 @@ func btrfsSnapshotDeleteInternal(projectName, poolName string, snapshotName stri
 	roSnapshotSubvolumeName := fmt.Sprintf("%s.ro", snapshotSubvolumeName)
 	names := []string{snapshotSubvolumeName, roSnapshotSubvolumeName}
 	for _, name := range names {
-		if shared.PathExists(name) && isBtrfsSubVolume(name) {
+		if shared.PathExists(name) && driver.IsBtrfsSubVolume(name) {
 			err := btrfsSubVolumesDelete(name)
 			if err != nil {
 				return err
@@ -1451,7 +1453,7 @@ func btrfsSnapshotDeleteInternal(projectName, poolName string, snapshotName stri
 	return nil
 }
 
-func (s *storageBtrfs) ContainerSnapshotDelete(snapshotContainer Instance) error {
+func (s *storageBtrfs) ContainerSnapshotDelete(snapshotContainer instance.Instance) error {
 	logger.Debugf("Deleting BTRFS storage volume for snapshot \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
 
 	_, err := s.StoragePoolMount()
@@ -1468,7 +1470,7 @@ func (s *storageBtrfs) ContainerSnapshotDelete(snapshotContainer Instance) error
 	return nil
 }
 
-func (s *storageBtrfs) ContainerSnapshotStart(container Instance) (bool, error) {
+func (s *storageBtrfs) ContainerSnapshotStart(container instance.Instance) (bool, error) {
 	logger.Debugf("Initializing BTRFS storage volume for snapshot \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
 
 	_, err := s.StoragePoolMount()
@@ -1497,7 +1499,7 @@ func (s *storageBtrfs) ContainerSnapshotStart(container Instance) (bool, error)
 	return true, nil
 }
 
-func (s *storageBtrfs) ContainerSnapshotStop(container Instance) (bool, error) {
+func (s *storageBtrfs) ContainerSnapshotStop(container instance.Instance) (bool, error) {
 	logger.Debugf("Stopping BTRFS storage volume for snapshot \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
 
 	_, err := s.StoragePoolMount()
@@ -1512,7 +1514,7 @@ func (s *storageBtrfs) ContainerSnapshotStop(container Instance) (bool, error) {
 		return false, nil
 	}
 
-	if shared.PathExists(snapshotSubvolumeName) && isBtrfsSubVolume(snapshotSubvolumeName) {
+	if shared.PathExists(snapshotSubvolumeName) && driver.IsBtrfsSubVolume(snapshotSubvolumeName) {
 		err = btrfsSubVolumesDelete(snapshotSubvolumeName)
 		if err != nil {
 			return false, err
@@ -1529,7 +1531,7 @@ func (s *storageBtrfs) ContainerSnapshotStop(container Instance) (bool, error) {
 }
 
 // ContainerSnapshotRename renames a snapshot of a container.
-func (s *storageBtrfs) ContainerSnapshotRename(snapshotContainer Instance, newName string) error {
+func (s *storageBtrfs) ContainerSnapshotRename(snapshotContainer instance.Instance, newName string) error {
 	logger.Debugf("Renaming BTRFS storage volume for snapshot \"%s\" from %s to %s", s.volume.Name, s.volume.Name, newName)
 
 	// The storage pool must be mounted.
@@ -1553,7 +1555,7 @@ func (s *storageBtrfs) ContainerSnapshotRename(snapshotContainer Instance, newNa
 
 // Needed for live migration where an empty snapshot needs to be created before
 // rsyncing into it.
-func (s *storageBtrfs) ContainerSnapshotCreateEmpty(snapshotContainer Instance) error {
+func (s *storageBtrfs) ContainerSnapshotCreateEmpty(snapshotContainer instance.Instance) error {
 	logger.Debugf("Creating empty BTRFS storage volume for snapshot \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
 
 	// Mount the storage pool.
@@ -1615,10 +1617,10 @@ func (s *storageBtrfs) doBtrfsBackup(cur string, prev string, target string) err
 	return err
 }
 
-func (s *storageBtrfs) doContainerBackupCreateOptimized(tmpPath string, backup backup, source Instance) error {
+func (s *storageBtrfs) doContainerBackupCreateOptimized(tmpPath string, backup instance.Backup, source instance.Instance) error {
 	// Handle snapshots
 	finalParent := ""
-	if !backup.instanceOnly {
+	if !backup.InstanceOnly {
 		snapshotsPath := fmt.Sprintf("%s/snapshots", tmpPath)
 
 		// Retrieve the snapshots
@@ -1688,7 +1690,7 @@ func (s *storageBtrfs) doContainerBackupCreateOptimized(tmpPath string, backup b
 	return nil
 }
 
-func (s *storageBtrfs) doContainerBackupCreateVanilla(tmpPath string, backup backup, source Instance) error {
+func (s *storageBtrfs) doContainerBackupCreateVanilla(tmpPath string, backup instance.Backup, source instance.Instance) error {
 	// Prepare for rsync
 	rsync := func(oldPath string, newPath string, bwlimit string) error {
 		output, err := rsyncLocalCopy(oldPath, newPath, bwlimit, true)
@@ -1702,7 +1704,7 @@ func (s *storageBtrfs) doContainerBackupCreateVanilla(tmpPath string, backup bac
 	bwlimit := s.pool.Config["rsync.bwlimit"]
 
 	// Handle snapshots
-	if !backup.instanceOnly {
+	if !backup.InstanceOnly {
 		snapshotsPath := fmt.Sprintf("%s/snapshots", tmpPath)
 
 		// Retrieve the snapshots
@@ -1771,7 +1773,7 @@ func (s *storageBtrfs) doContainerBackupCreateVanilla(tmpPath string, backup bac
 	return nil
 }
 
-func (s *storageBtrfs) ContainerBackupCreate(backup backup, source Instance) error {
+func (s *storageBtrfs) ContainerBackupCreate(backup instance.Backup, source instance.Instance) error {
 	// Start storage
 	ourStart, err := source.StorageStart()
 	if err != nil {
@@ -1789,7 +1791,7 @@ func (s *storageBtrfs) ContainerBackupCreate(backup backup, source Instance) err
 	defer os.RemoveAll(tmpPath)
 
 	// Generate the actual backup
-	if backup.optimizedStorage {
+	if backup.OptimizedStorage {
 		err = s.doContainerBackupCreateOptimized(tmpPath, backup, source)
 		if err != nil {
 			return err
@@ -1810,7 +1812,7 @@ func (s *storageBtrfs) ContainerBackupCreate(backup backup, source Instance) err
 	return nil
 }
 
-func (s *storageBtrfs) doContainerBackupLoadOptimized(info backupInfo, data io.ReadSeeker, tarArgs []string) error {
+func (s *storageBtrfs) doContainerBackupLoadOptimized(info instance.BackupInfo, data io.ReadSeeker, tarArgs []string) error {
 	containerName, _, _ := shared.ContainerGetParentAndSnapshotName(info.Name)
 
 	containerMntPoint := driver.GetContainerMountPoint("default", s.pool.Name, "")
@@ -1908,7 +1910,7 @@ func (s *storageBtrfs) doContainerBackupLoadOptimized(info backupInfo, data io.R
 	return nil
 }
 
-func (s *storageBtrfs) doContainerBackupLoadVanilla(info backupInfo, data io.ReadSeeker, tarArgs []string) error {
+func (s *storageBtrfs) doContainerBackupLoadVanilla(info instance.BackupInfo, data io.ReadSeeker, tarArgs []string) error {
 	// create the main container
 	err := s.doContainerCreate(info.Project, info.Name, info.Privileged)
 	if err != nil {
@@ -1963,7 +1965,7 @@ func (s *storageBtrfs) doContainerBackupLoadVanilla(info backupInfo, data io.Rea
 	return nil
 }
 
-func (s *storageBtrfs) ContainerBackupLoad(info backupInfo, data io.ReadSeeker, tarArgs []string) error {
+func (s *storageBtrfs) ContainerBackupLoad(info instance.BackupInfo, data io.ReadSeeker, tarArgs []string) error {
 	logger.Debugf("Loading BTRFS storage volume for backup \"%s\" on storage pool \"%s\"", info.Name, s.pool.Name)
 
 	if info.HasBinaryFormat {
@@ -2025,7 +2027,7 @@ func (s *storageBtrfs) ImageCreate(fingerprint string, tracker *ioprogress.Progr
 
 	// Unpack the image in imageMntPoint.
 	imagePath := shared.VarPath("images", fingerprint)
-	err = unpackImage(imagePath, tmpImageSubvolumeName, storageTypeBtrfs, s.s.OS.RunningInUserNS, tracker)
+	err = unpackImage(imagePath, tmpImageSubvolumeName, instance.StorageTypeBtrfs, s.s.OS.RunningInUserNS, tracker)
 	if err != nil {
 		return err
 	}
@@ -2066,7 +2068,7 @@ func (s *storageBtrfs) ImageDelete(fingerprint string) error {
 	// Delete the btrfs subvolume. The path with which we
 	// do this is ${LXD_DIR}/storage-pools/<pool>/images/<fingerprint>.
 	imageSubvolumeName := driver.GetImageMountPoint(s.pool.Name, fingerprint)
-	if shared.PathExists(imageSubvolumeName) && isBtrfsSubVolume(imageSubvolumeName) {
+	if shared.PathExists(imageSubvolumeName) && driver.IsBtrfsSubVolume(imageSubvolumeName) {
 		err = btrfsSubVolumesDelete(imageSubvolumeName)
 		if err != nil {
 			return err
@@ -2231,7 +2233,7 @@ func btrfsSubVolumeDelete(subvol string) error {
 // subvolume itself.
 func btrfsSubVolumesDelete(subvol string) error {
 	// Delete subsubvols.
-	subsubvols, err := btrfsSubVolumesGet(subvol)
+	subsubvols, err := driver.BTRFSSubVolumesGet(subvol)
 	if err != nil {
 		return err
 	}
@@ -2296,7 +2298,7 @@ func (s *storageBtrfs) btrfsPoolVolumesSnapshot(source string, dest string, read
 	// Now snapshot all subvolumes of the root.
 	if recursive {
 		// Get a list of subvolumes of the root
-		subsubvols, err := btrfsSubVolumesGet(source)
+		subsubvols, err := driver.BTRFSSubVolumesGet(source)
 		if err != nil {
 			return err
 		}
@@ -2375,7 +2377,7 @@ func (s *storageBtrfs) PreservesInodes() bool {
 	return true
 }
 
-func (s *storageBtrfs) MigrationSource(args MigrationSourceArgs) (MigrationStorageSourceDriver, error) {
+func (s *storageBtrfs) MigrationSource(args instance.MigrationSourceArgs) (instance.MigrationStorageSourceDriver, error) {
 	if s.s.OS.RunningInUserNS {
 		return rsyncMigrationSource(args)
 	}
@@ -2385,7 +2387,7 @@ func (s *storageBtrfs) MigrationSource(args MigrationSourceArgs) (MigrationStora
 	 * xfer costs. Then, after all that, we send the container itself.
 	 */
 	var err error
-	var snapshots = []Instance{}
+	var snapshots = []instance.Instance{}
 	if !args.InstanceOnly {
 		snapshots, err = args.Instance.Snapshots()
 		if err != nil {
@@ -2410,7 +2412,7 @@ func (s *storageBtrfs) MigrationSource(args MigrationSourceArgs) (MigrationStora
 	return sourceDriver, nil
 }
 
-func (s *storageBtrfs) MigrationSink(conn *websocket.Conn, op *operation, args MigrationSinkArgs) error {
+func (s *storageBtrfs) MigrationSink(conn *websocket.Conn, op *operation.Operation, args instance.MigrationSinkArgs) error {
 	if s.s.OS.RunningInUserNS {
 		return rsyncMigrationSink(conn, op, args)
 	}
@@ -2754,7 +2756,7 @@ func (s *storageBtrfs) StoragePoolVolumeCopy(source *api.StorageVolumeSource) er
 		return nil
 	}
 
-	subvols, err := btrfsSubVolumesGet(s.getCustomSnapshotSubvolumePath(source.Pool))
+	subvols, err := driver.BTRFSSubVolumesGet(s.getCustomSnapshotSubvolumePath(source.Pool))
 	if err != nil {
 		return err
 	}
@@ -2886,11 +2888,11 @@ func (s *storageBtrfs) doCrossPoolVolumeCopy(sourcePool string, sourceName strin
 	return nil
 }
 
-func (s *storageBtrfs) StorageMigrationSource(args MigrationSourceArgs) (MigrationStorageSourceDriver, error) {
+func (s *storageBtrfs) StorageMigrationSource(args instance.MigrationSourceArgs) (instance.MigrationStorageSourceDriver, error) {
 	return rsyncStorageMigrationSource(args)
 }
 
-func (s *storageBtrfs) StorageMigrationSink(conn *websocket.Conn, op *operation, args MigrationSinkArgs) error {
+func (s *storageBtrfs) StorageMigrationSink(conn *websocket.Conn, op *operation.Operation, args instance.MigrationSinkArgs) error {
 	return rsyncStorageMigrationSink(conn, op, args)
 }
 
@@ -2942,7 +2944,7 @@ func (s *storageBtrfs) StoragePoolVolumeSnapshotDelete() error {
 	}
 
 	snapshotSubvolumeName := driver.GetStoragePoolVolumeSnapshotMountPoint(s.pool.Name, s.volume.Name)
-	if shared.PathExists(snapshotSubvolumeName) && isBtrfsSubVolume(snapshotSubvolumeName) {
+	if shared.PathExists(snapshotSubvolumeName) && driver.IsBtrfsSubVolume(snapshotSubvolumeName) {
 		err := btrfsSubVolumesDelete(snapshotSubvolumeName)
 		if err != nil {
 			return err
diff --git a/lxd/storage_ceph.go b/lxd/storage_ceph.go
index 0abd661940..6a46b8709f 100644
--- a/lxd/storage_ceph.go
+++ b/lxd/storage_ceph.go
@@ -15,7 +15,9 @@ import (
 	"golang.org/x/sys/unix"
 
 	"github.com/lxc/lxd/lxd/db"
+	"github.com/lxc/lxd/lxd/instance"
 	"github.com/lxc/lxd/lxd/migration"
+	"github.com/lxc/lxd/lxd/operation"
 	"github.com/lxc/lxd/lxd/project"
 	driver "github.com/lxc/lxd/lxd/storage"
 	"github.com/lxc/lxd/shared"
@@ -36,8 +38,8 @@ type storageCeph struct {
 var cephVersion = ""
 
 func (s *storageCeph) StorageCoreInit() error {
-	s.sType = storageTypeCeph
-	typeName, err := storageTypeToString(s.sType)
+	s.sType = instance.StorageTypeCeph
+	typeName, err := instance.StorageTypeToString(s.sType)
 	if err != nil {
 		return err
 	}
@@ -758,7 +760,7 @@ func (s *storageCeph) StoragePoolUpdate(writable *api.StoragePoolPut, changedCon
 	return nil
 }
 
-func (s *storageCeph) ContainerStorageReady(container Instance) bool {
+func (s *storageCeph) ContainerStorageReady(container instance.Instance) bool {
 	name := container.Name()
 	logger.Debugf(`Checking if RBD storage volume for container "%s" on storage pool "%s" is ready`, name, s.pool.Name)
 
@@ -773,7 +775,7 @@ func (s *storageCeph) ContainerStorageReady(container Instance) bool {
 	return true
 }
 
-func (s *storageCeph) ContainerCreate(container Instance) error {
+func (s *storageCeph) ContainerCreate(container instance.Instance) error {
 	containerName := container.Name()
 	err := s.doContainerCreate(container.Project(), containerName, container.IsPrivileged())
 	if err != nil {
@@ -792,7 +794,7 @@ func (s *storageCeph) ContainerCreate(container Instance) error {
 	return nil
 }
 
-func (s *storageCeph) ContainerCreateFromImage(container Instance, fingerprint string, tracker *ioprogress.ProgressTracker) error {
+func (s *storageCeph) ContainerCreateFromImage(container instance.Instance, fingerprint string, tracker *ioprogress.ProgressTracker) error {
 	logger.Debugf(`Creating RBD storage volume for container "%s" on storage pool "%s"`, s.volume.Name, s.pool.Name)
 
 	revert := true
@@ -944,7 +946,7 @@ func (s *storageCeph) ContainerCreateFromImage(container Instance, fingerprint s
 	return nil
 }
 
-func (s *storageCeph) ContainerDelete(container Instance) error {
+func (s *storageCeph) ContainerDelete(container instance.Instance) error {
 	containerName := container.Name()
 	logger.Debugf(`Deleting RBD storage volume for container "%s" on storage pool "%s"`, containerName, s.pool.Name)
 
@@ -993,7 +995,7 @@ func (s *storageCeph) ContainerDelete(container Instance) error {
 // - for each snapshot dump the contents into the empty storage volume and
 //   after each dump take a snapshot of the rbd storage volume
 // - dump the container contents into the rbd storage volume.
-func (s *storageCeph) doCrossPoolContainerCopy(target Instance, source Instance, containerOnly bool, refresh bool, refreshSnapshots []Instance) error {
+func (s *storageCeph) doCrossPoolContainerCopy(target instance.Instance, source instance.Instance, containerOnly bool, refresh bool, refreshSnapshots []instance.Instance) error {
 	sourcePool, err := source.StoragePool()
 	if err != nil {
 		return err
@@ -1018,7 +1020,7 @@ func (s *storageCeph) doCrossPoolContainerCopy(target Instance, source Instance,
 		return err
 	}
 
-	var snapshots []Instance
+	var snapshots []instance.Instance
 
 	if refresh {
 		snapshots = refreshSnapshots
@@ -1088,7 +1090,7 @@ func (s *storageCeph) doCrossPoolContainerCopy(target Instance, source Instance,
 	return nil
 }
 
-func (s *storageCeph) ContainerCopy(target Instance, source Instance, containerOnly bool) error {
+func (s *storageCeph) ContainerCopy(target instance.Instance, source instance.Instance, containerOnly bool) error {
 	sourceContainerName := source.Name()
 	logger.Debugf(`Copying RBD container storage %s to %s`, sourceContainerName, target.Name())
 
@@ -1321,13 +1323,13 @@ func (s *storageCeph) ContainerCopy(target Instance, source Instance, containerO
 	return nil
 }
 
-func (s *storageCeph) ContainerRefresh(target Instance, source Instance, snapshots []Instance) error {
+func (s *storageCeph) ContainerRefresh(target instance.Instance, source instance.Instance, snapshots []instance.Instance) error {
 	logger.Debugf(`Refreshing RBD container storage for %s from %s`, target.Name(), source.Name())
 
 	return s.doCrossPoolContainerCopy(target, source, len(snapshots) == 0, true, snapshots)
 }
 
-func (s *storageCeph) ContainerMount(c Instance) (bool, error) {
+func (s *storageCeph) ContainerMount(c instance.Instance) (bool, error) {
 	logger.Debugf("Mounting RBD storage volume for container \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
 
 	ourMount, err := s.doContainerMount(c.Project(), c.Name())
@@ -1339,7 +1341,7 @@ func (s *storageCeph) ContainerMount(c Instance) (bool, error) {
 	return ourMount, nil
 }
 
-func (s *storageCeph) ContainerUmount(c Instance, path string) (bool, error) {
+func (s *storageCeph) ContainerUmount(c instance.Instance, path string) (bool, error) {
 	logger.Debugf("Unmounting RBD storage volume for container \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
 	name := c.Name()
 
@@ -1388,7 +1390,7 @@ func (s *storageCeph) ContainerUmount(c Instance, path string) (bool, error) {
 	return ourUmount, nil
 }
 
-func (s *storageCeph) ContainerRename(c Instance, newName string) error {
+func (s *storageCeph) ContainerRename(c instance.Instance, newName string) error {
 	oldName := c.Name()
 	containerPath := c.Path()
 
@@ -1540,7 +1542,7 @@ func (s *storageCeph) ContainerRename(c Instance, newName string) error {
 	return nil
 }
 
-func (s *storageCeph) ContainerRestore(target Instance, source Instance) error {
+func (s *storageCeph) ContainerRestore(target instance.Instance, source instance.Instance) error {
 	sourceName := source.Name()
 	targetName := target.Name()
 
@@ -1582,11 +1584,11 @@ func (s *storageCeph) ContainerRestore(target Instance, source Instance) error {
 	return nil
 }
 
-func (s *storageCeph) ContainerGetUsage(container Instance) (int64, error) {
+func (s *storageCeph) ContainerGetUsage(container instance.Instance) (int64, error) {
 	return -1, fmt.Errorf("RBD quotas are currently not supported")
 }
 
-func (s *storageCeph) ContainerSnapshotCreate(snapshotContainer Instance, sourceContainer Instance) error {
+func (s *storageCeph) ContainerSnapshotCreate(snapshotContainer instance.Instance, sourceContainer instance.Instance) error {
 	containerMntPoint := driver.GetContainerMountPoint(sourceContainer.Project(), s.pool.Name, sourceContainer.Name())
 	if shared.IsMountPoint(containerMntPoint) {
 		// This is costly but we need to ensure that all cached data has
@@ -1605,7 +1607,7 @@ func (s *storageCeph) ContainerSnapshotCreate(snapshotContainer Instance, source
 	return s.doContainerSnapshotCreate(sourceContainer.Project(), snapshotContainer.Name(), sourceContainer.Name())
 }
 
-func (s *storageCeph) ContainerSnapshotDelete(snapshotContainer Instance) error {
+func (s *storageCeph) ContainerSnapshotDelete(snapshotContainer instance.Instance) error {
 	logger.Debugf(`Deleting RBD storage volume for snapshot "%s" on storage pool "%s"`, s.volume.Name, s.pool.Name)
 
 	snapshotContainerName := snapshotContainer.Name()
@@ -1671,7 +1673,7 @@ func (s *storageCeph) ContainerSnapshotDelete(snapshotContainer Instance) error
 	return nil
 }
 
-func (s *storageCeph) ContainerSnapshotRename(c Instance, newName string) error {
+func (s *storageCeph) ContainerSnapshotRename(c instance.Instance, newName string) error {
 	oldName := c.Name()
 	logger.Debugf(`Renaming RBD storage volume for snapshot "%s" from "%s" to "%s"`, oldName, oldName, newName)
 
@@ -1719,7 +1721,7 @@ func (s *storageCeph) ContainerSnapshotRename(c Instance, newName string) error
 	return nil
 }
 
-func (s *storageCeph) ContainerSnapshotStart(c Instance) (bool, error) {
+func (s *storageCeph) ContainerSnapshotStart(c instance.Instance) (bool, error) {
 	containerName := c.Name()
 	logger.Debugf(`Initializing RBD storage volume for snapshot "%s" on storage pool "%s"`, containerName, s.pool.Name)
 
@@ -1835,7 +1837,7 @@ func (s *storageCeph) ContainerSnapshotStart(c Instance) (bool, error) {
 	return true, nil
 }
 
-func (s *storageCeph) ContainerSnapshotStop(c Instance) (bool, error) {
+func (s *storageCeph) ContainerSnapshotStop(c instance.Instance) (bool, error) {
 	logger.Debugf(`Stopping RBD storage volume for snapshot "%s" on storage pool "%s"`, c.Name(), s.pool.Name)
 
 	containerName := c.Name()
@@ -1882,14 +1884,14 @@ func (s *storageCeph) ContainerSnapshotStop(c Instance) (bool, error) {
 	return true, nil
 }
 
-func (s *storageCeph) ContainerSnapshotCreateEmpty(c Instance) error {
+func (s *storageCeph) ContainerSnapshotCreateEmpty(c instance.Instance) error {
 	logger.Debugf(`Creating empty RBD storage volume for snapshot "%s" on storage pool "%s" (noop)`, c.Name(), s.pool.Name)
 
 	logger.Debugf(`Created empty RBD storage volume for snapshot "%s" on storage pool "%s" (noop)`, c.Name(), s.pool.Name)
 	return nil
 }
 
-func (s *storageCeph) ContainerBackupCreate(backup backup, source Instance) error {
+func (s *storageCeph) ContainerBackupCreate(backup instance.Backup, source instance.Instance) error {
 	// Start storage
 	ourStart, err := source.StorageStart()
 	if err != nil {
@@ -1907,7 +1909,7 @@ func (s *storageCeph) ContainerBackupCreate(backup backup, source Instance) erro
 	defer os.RemoveAll(tmpPath)
 
 	// Generate the actual backup
-	if !backup.instanceOnly {
+	if !backup.InstanceOnly {
 		snapshots, err := source.Snapshots()
 		if err != nil {
 			return err
@@ -1941,7 +1943,7 @@ func (s *storageCeph) ContainerBackupCreate(backup backup, source Instance) erro
 // - for each snapshot dump the contents into the empty storage volume and
 //   after each dump take a snapshot of the rbd storage volume
 // - dump the container contents into the rbd storage volume.
-func (s *storageCeph) ContainerBackupLoad(info backupInfo, data io.ReadSeeker, tarArgs []string) error {
+func (s *storageCeph) ContainerBackupLoad(info instance.BackupInfo, data io.ReadSeeker, tarArgs []string) error {
 	// create the main container
 	err := s.doContainerCreate(info.Project, info.Name, info.Privileged)
 	if err != nil {
@@ -2121,7 +2123,7 @@ func (s *storageCeph) ImageCreate(fingerprint string, tracker *ioprogress.Progre
 
 		// rsync contents into image
 		imagePath := shared.VarPath("images", fingerprint)
-		err = unpackImage(imagePath, imageMntPoint, storageTypeCeph, s.s.OS.RunningInUserNS, nil)
+		err = unpackImage(imagePath, imageMntPoint, instance.StorageTypeCeph, s.s.OS.RunningInUserNS, nil)
 		if err != nil {
 			logger.Errorf(`Failed to unpack image for RBD storage volume for image "%s" on storage pool "%s": %s`, fingerprint, s.pool.Name, err)
 
@@ -2685,11 +2687,11 @@ func (s *storageCeph) StoragePoolVolumeCopy(source *api.StorageVolumeSource) err
 	return nil
 }
 
-func (s *storageCeph) StorageMigrationSource(args MigrationSourceArgs) (MigrationStorageSourceDriver, error) {
+func (s *storageCeph) StorageMigrationSource(args instance.MigrationSourceArgs) (instance.MigrationStorageSourceDriver, error) {
 	return rsyncStorageMigrationSource(args)
 }
 
-func (s *storageCeph) StorageMigrationSink(conn *websocket.Conn, op *operation, args MigrationSinkArgs) error {
+func (s *storageCeph) StorageMigrationSink(conn *websocket.Conn, op *operation.Operation, args instance.MigrationSinkArgs) error {
 	return rsyncStorageMigrationSink(conn, op, args)
 }
 
@@ -2814,7 +2816,7 @@ func (s *storageCeph) PreservesInodes() bool {
 	return false
 }
 
-func (s *storageCeph) MigrationSource(args MigrationSourceArgs) (MigrationStorageSourceDriver, error) {
+func (s *storageCeph) MigrationSource(args instance.MigrationSourceArgs) (instance.MigrationStorageSourceDriver, error) {
 	// If the container is a snapshot, let's just send that. We don't need
 	// to send anything else, because that's all the user asked for.
 	if args.Instance.IsSnapshot() {
@@ -2826,7 +2828,7 @@ func (s *storageCeph) MigrationSource(args MigrationSourceArgs) (MigrationStorag
 
 	driver := rbdMigrationSourceDriver{
 		container:        args.Instance,
-		snapshots:        []Instance{},
+		snapshots:        []instance.Instance{},
 		rbdSnapshotNames: []string{},
 		ceph:             s,
 	}
@@ -2861,7 +2863,7 @@ func (s *storageCeph) MigrationSource(args MigrationSourceArgs) (MigrationStorag
 		}
 
 		lxdName := fmt.Sprintf("%s%s%s", instanceName, shared.SnapshotDelimiter, snap[len("snapshot_"):])
-		snapshot, err := instanceLoadByProjectAndName(s.s, args.Instance.Project(), lxdName)
+		snapshot, err := instance.InstanceLoadByProjectAndName(s.s, args.Instance.Project(), lxdName)
 		if err != nil {
 			logger.Errorf(`Failed to load snapshot "%s" for RBD storage volume "%s" on storage pool "%s": %s`, lxdName, instanceName, s.pool.Name, err)
 			return nil, err
@@ -2874,7 +2876,7 @@ func (s *storageCeph) MigrationSource(args MigrationSourceArgs) (MigrationStorag
 	return &driver, nil
 }
 
-func (s *storageCeph) MigrationSink(conn *websocket.Conn, op *operation, args MigrationSinkArgs) error {
+func (s *storageCeph) MigrationSink(conn *websocket.Conn, op *operation.Operation, args instance.MigrationSinkArgs) error {
 	// Check that we received a valid root disk device with a pool property
 	// set.
 	parentStoragePool := ""
diff --git a/lxd/storage_ceph_utils.go b/lxd/storage_ceph_utils.go
index 4d5c20222e..4434ffeccd 100644
--- a/lxd/storage_ceph_utils.go
+++ b/lxd/storage_ceph_utils.go
@@ -15,6 +15,7 @@ import (
 	"golang.org/x/sys/unix"
 
 	"github.com/lxc/lxd/lxd/db"
+	"github.com/lxc/lxd/lxd/instance"
 	"github.com/lxc/lxd/lxd/project"
 	driver "github.com/lxc/lxd/lxd/storage"
 	"github.com/lxc/lxd/shared"
@@ -727,7 +728,7 @@ func (s *storageCeph) getRBDMountOptions() string {
 // copyWithoutSnapshotsFull creates a non-sparse copy of a container
 // This does not introduce a dependency relation between the source RBD storage
 // volume and the target RBD storage volume.
-func (s *storageCeph) copyWithoutSnapshotsFull(target Instance, source Instance) error {
+func (s *storageCeph) copyWithoutSnapshotsFull(target instance.Instance, source instance.Instance) error {
 	logger.Debugf(`Creating non-sparse copy of RBD storage volume for container "%s" to "%s" without snapshots`, source.Name(), target.Name())
 
 	sourceIsSnapshot := source.IsSnapshot()
@@ -795,7 +796,7 @@ func (s *storageCeph) copyWithoutSnapshotsFull(target Instance, source Instance)
 // copyWithoutSnapshotsFull creates a sparse copy of a container
 // This introduces a dependency relation between the source RBD storage volume
 // and the target RBD storage volume.
-func (s *storageCeph) copyWithoutSnapshotsSparse(target Instance, source Instance) error {
+func (s *storageCeph) copyWithoutSnapshotsSparse(target instance.Instance, source instance.Instance) error {
 	logger.Debugf(`Creating sparse copy of RBD storage volume for container "%s" to "%s" without snapshots`, source.Name(),
 		target.Name())
 
@@ -1584,7 +1585,7 @@ func (s *storageCeph) cephRBDVolumeDumpToFile(sourceVolumeName string, file stri
 }
 
 // cephRBDVolumeBackupCreate creates a backup of a container or snapshot.
-func (s *storageCeph) cephRBDVolumeBackupCreate(tmpPath string, backup backup, source Instance) error {
+func (s *storageCeph) cephRBDVolumeBackupCreate(tmpPath string, backup instance.Backup, source instance.Instance) error {
 	sourceIsSnapshot := source.IsSnapshot()
 	sourceContainerName := source.Name()
 	sourceContainerOnlyName := project.Prefix(source.Project(), sourceContainerName)
diff --git a/lxd/storage_cephfs.go b/lxd/storage_cephfs.go
index 0c3f8b9aa3..3c1aa28ef4 100644
--- a/lxd/storage_cephfs.go
+++ b/lxd/storage_cephfs.go
@@ -13,7 +13,9 @@ import (
 	"github.com/gorilla/websocket"
 	"github.com/pkg/errors"
 
+	"github.com/lxc/lxd/lxd/instance"
 	"github.com/lxc/lxd/lxd/migration"
+	"github.com/lxc/lxd/lxd/operation"
 	"github.com/lxc/lxd/lxd/state"
 	driver "github.com/lxc/lxd/lxd/storage"
 	"github.com/lxc/lxd/shared"
@@ -31,8 +33,8 @@ type storageCephFs struct {
 }
 
 func (s *storageCephFs) StorageCoreInit() error {
-	s.sType = storageTypeCeph
-	typeName, err := storageTypeToString(s.sType)
+	s.sType = instance.StorageTypeCeph
+	typeName, err := instance.StorageTypeToString(s.sType)
 	if err != nil {
 		return err
 	}
@@ -618,85 +620,85 @@ func (s *storageCephFs) StoragePoolVolumeRename(newName string) error {
 	return nil
 }
 
-func (s *storageCephFs) ContainerStorageReady(container Instance) bool {
+func (s *storageCephFs) ContainerStorageReady(container instance.Instance) bool {
 	containerMntPoint := driver.GetContainerMountPoint(container.Project(), s.pool.Name, container.Name())
 	ok, _ := shared.PathIsEmpty(containerMntPoint)
 	return !ok
 }
 
-func (s *storageCephFs) ContainerCreate(container Instance) error {
+func (s *storageCephFs) ContainerCreate(container instance.Instance) error {
 	return fmt.Errorf("CEPHFS cannot be used for containers")
 }
 
-func (s *storageCephFs) ContainerCreateFromImage(container Instance, imageFingerprint string, tracker *ioprogress.ProgressTracker) error {
+func (s *storageCephFs) ContainerCreateFromImage(container instance.Instance, imageFingerprint string, tracker *ioprogress.ProgressTracker) error {
 	return fmt.Errorf("CEPHFS cannot be used for containers")
 }
 
-func (s *storageCephFs) ContainerCanRestore(container Instance, sourceContainer Instance) error {
+func (s *storageCephFs) ContainerCanRestore(container instance.Instance, sourceContainer instance.Instance) error {
 	return fmt.Errorf("CEPHFS cannot be used for containers")
 }
 
-func (s *storageCephFs) ContainerDelete(container Instance) error {
+func (s *storageCephFs) ContainerDelete(container instance.Instance) error {
 	return fmt.Errorf("CEPHFS cannot be used for containers")
 }
 
-func (s *storageCephFs) ContainerCopy(target Instance, source Instance, containerOnly bool) error {
+func (s *storageCephFs) ContainerCopy(target instance.Instance, source instance.Instance, containerOnly bool) error {
 	return fmt.Errorf("CEPHFS cannot be used for containers")
 }
 
-func (s *storageCephFs) ContainerRefresh(target Instance, source Instance, snapshots []Instance) error {
+func (s *storageCephFs) ContainerRefresh(target instance.Instance, source instance.Instance, snapshots []instance.Instance) error {
 	return fmt.Errorf("CEPHFS cannot be used for containers")
 }
 
-func (s *storageCephFs) ContainerMount(c Instance) (bool, error) {
+func (s *storageCephFs) ContainerMount(c instance.Instance) (bool, error) {
 	return false, fmt.Errorf("CEPHFS cannot be used for containers")
 }
 
-func (s *storageCephFs) ContainerUmount(c Instance, path string) (bool, error) {
+func (s *storageCephFs) ContainerUmount(c instance.Instance, path string) (bool, error) {
 	return false, fmt.Errorf("CEPHFS cannot be used for containers")
 }
 
-func (s *storageCephFs) ContainerRename(container Instance, newName string) error {
+func (s *storageCephFs) ContainerRename(container instance.Instance, newName string) error {
 	return fmt.Errorf("CEPHFS cannot be used for containers")
 }
 
-func (s *storageCephFs) ContainerRestore(container Instance, sourceContainer Instance) error {
+func (s *storageCephFs) ContainerRestore(container instance.Instance, sourceContainer instance.Instance) error {
 	return fmt.Errorf("CEPHFS cannot be used for containers")
 }
 
-func (s *storageCephFs) ContainerGetUsage(c Instance) (int64, error) {
+func (s *storageCephFs) ContainerGetUsage(c instance.Instance) (int64, error) {
 	return -1, fmt.Errorf("CEPHFS cannot be used for containers")
 }
 
-func (s *storageCephFs) ContainerSnapshotCreate(snapshotContainer Instance, sourceContainer Instance) error {
+func (s *storageCephFs) ContainerSnapshotCreate(snapshotContainer instance.Instance, sourceContainer instance.Instance) error {
 	return fmt.Errorf("CEPHFS cannot be used for containers")
 }
 
-func (s *storageCephFs) ContainerSnapshotCreateEmpty(snapshotContainer Instance) error {
+func (s *storageCephFs) ContainerSnapshotCreateEmpty(snapshotContainer instance.Instance) error {
 	return fmt.Errorf("CEPHFS cannot be used for containers")
 }
 
-func (s *storageCephFs) ContainerSnapshotDelete(snapshotContainer Instance) error {
+func (s *storageCephFs) ContainerSnapshotDelete(snapshotContainer instance.Instance) error {
 	return fmt.Errorf("CEPHFS cannot be used for containers")
 }
 
-func (s *storageCephFs) ContainerSnapshotRename(snapshotContainer Instance, newName string) error {
+func (s *storageCephFs) ContainerSnapshotRename(snapshotContainer instance.Instance, newName string) error {
 	return fmt.Errorf("CEPHFS cannot be used for containers")
 }
 
-func (s *storageCephFs) ContainerSnapshotStart(container Instance) (bool, error) {
+func (s *storageCephFs) ContainerSnapshotStart(container instance.Instance) (bool, error) {
 	return false, fmt.Errorf("CEPHFS cannot be used for containers")
 }
 
-func (s *storageCephFs) ContainerSnapshotStop(container Instance) (bool, error) {
+func (s *storageCephFs) ContainerSnapshotStop(container instance.Instance) (bool, error) {
 	return false, fmt.Errorf("CEPHFS cannot be used for containers")
 }
 
-func (s *storageCephFs) ContainerBackupCreate(backup backup, source Instance) error {
+func (s *storageCephFs) ContainerBackupCreate(backup instance.Backup, source instance.Instance) error {
 	return fmt.Errorf("CEPHFS cannot be used for containers")
 }
 
-func (s *storageCephFs) ContainerBackupLoad(info backupInfo, data io.ReadSeeker, tarArgs []string) error {
+func (s *storageCephFs) ContainerBackupLoad(info instance.BackupInfo, data io.ReadSeeker, tarArgs []string) error {
 	return fmt.Errorf("CEPHFS cannot be used for containers")
 }
 
@@ -724,11 +726,11 @@ func (s *storageCephFs) PreservesInodes() bool {
 	return false
 }
 
-func (s *storageCephFs) MigrationSource(args MigrationSourceArgs) (MigrationStorageSourceDriver, error) {
+func (s *storageCephFs) MigrationSource(args instance.MigrationSourceArgs) (instance.MigrationStorageSourceDriver, error) {
 	return rsyncMigrationSource(args)
 }
 
-func (s *storageCephFs) MigrationSink(conn *websocket.Conn, op *operation, args MigrationSinkArgs) error {
+func (s *storageCephFs) MigrationSink(conn *websocket.Conn, op *operation.Operation, args instance.MigrationSinkArgs) error {
 	return rsyncMigrationSink(conn, op, args)
 }
 
@@ -820,11 +822,11 @@ func (s *storageCephFs) StoragePoolVolumeCopy(source *api.StorageVolumeSource) e
 	return nil
 }
 
-func (s *storageCephFs) StorageMigrationSource(args MigrationSourceArgs) (MigrationStorageSourceDriver, error) {
+func (s *storageCephFs) StorageMigrationSource(args instance.MigrationSourceArgs) (instance.MigrationStorageSourceDriver, error) {
 	return rsyncStorageMigrationSource(args)
 }
 
-func (s *storageCephFs) StorageMigrationSink(conn *websocket.Conn, op *operation, args MigrationSinkArgs) error {
+func (s *storageCephFs) StorageMigrationSink(conn *websocket.Conn, op *operation.Operation, args instance.MigrationSinkArgs) error {
 	return rsyncStorageMigrationSink(conn, op, args)
 }
 
diff --git a/lxd/storage_dir.go b/lxd/storage_dir.go
index ff5973a8f7..3af5c0e45d 100644
--- a/lxd/storage_dir.go
+++ b/lxd/storage_dir.go
@@ -12,7 +12,9 @@ import (
 	"github.com/pkg/errors"
 	"golang.org/x/sys/unix"
 
+	"github.com/lxc/lxd/lxd/instance"
 	"github.com/lxc/lxd/lxd/migration"
+	"github.com/lxc/lxd/lxd/operation"
 	"github.com/lxc/lxd/lxd/project"
 	driver "github.com/lxc/lxd/lxd/storage"
 	"github.com/lxc/lxd/lxd/storage/quota"
@@ -31,8 +33,8 @@ type storageDir struct {
 
 // Only initialize the minimal information we need about a given storage type.
 func (s *storageDir) StorageCoreInit() error {
-	s.sType = storageTypeDir
-	typeName, err := storageTypeToString(s.sType)
+	s.sType = instance.StorageTypeDir
+	typeName, err := instance.StorageTypeToString(s.sType)
 	if err != nil {
 		return err
 	}
@@ -488,13 +490,13 @@ func (s *storageDir) StoragePoolVolumeRename(newName string) error {
 		storagePoolVolumeTypeCustom, s.poolID)
 }
 
-func (s *storageDir) ContainerStorageReady(container Instance) bool {
+func (s *storageDir) ContainerStorageReady(container instance.Instance) bool {
 	containerMntPoint := driver.GetContainerMountPoint(container.Project(), s.pool.Name, container.Name())
 	ok, _ := shared.PathIsEmpty(containerMntPoint)
 	return !ok
 }
 
-func (s *storageDir) ContainerCreate(container Instance) error {
+func (s *storageDir) ContainerCreate(container instance.Instance) error {
 	logger.Debugf("Creating empty DIR storage volume for container \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
 
 	_, err := s.StoragePoolMount()
@@ -536,7 +538,7 @@ func (s *storageDir) ContainerCreate(container Instance) error {
 	return nil
 }
 
-func (s *storageDir) ContainerCreateFromImage(container Instance, imageFingerprint string, tracker *ioprogress.ProgressTracker) error {
+func (s *storageDir) ContainerCreateFromImage(container instance.Instance, imageFingerprint string, tracker *ioprogress.ProgressTracker) error {
 	logger.Debugf("Creating DIR storage volume for container \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
 
 	_, err := s.StoragePoolMount()
@@ -570,7 +572,7 @@ func (s *storageDir) ContainerCreateFromImage(container Instance, imageFingerpri
 	}
 
 	imagePath := shared.VarPath("images", imageFingerprint)
-	err = unpackImage(imagePath, containerMntPoint, storageTypeDir, s.s.OS.RunningInUserNS, nil)
+	err = unpackImage(imagePath, containerMntPoint, instance.StorageTypeDir, s.s.OS.RunningInUserNS, nil)
 	if err != nil {
 		return errors.Wrap(err, "Unpack image")
 	}
@@ -586,7 +588,7 @@ func (s *storageDir) ContainerCreateFromImage(container Instance, imageFingerpri
 	return nil
 }
 
-func (s *storageDir) ContainerDelete(container Instance) error {
+func (s *storageDir) ContainerDelete(container instance.Instance) error {
 	logger.Debugf("Deleting DIR storage volume for container \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
 
 	source := s.pool.Config["source"]
@@ -648,7 +650,7 @@ func (s *storageDir) ContainerDelete(container Instance) error {
 	return nil
 }
 
-func (s *storageDir) copyContainer(target Instance, source Instance) error {
+func (s *storageDir) copyContainer(target instance.Instance, source instance.Instance) error {
 	_, sourcePool, _ := source.Storage().GetContainerPoolInfo()
 	_, targetPool, _ := target.Storage().GetContainerPoolInfo()
 	sourceContainerMntPoint := driver.GetContainerMountPoint(source.Project(), sourcePool, source.Name())
@@ -681,7 +683,7 @@ func (s *storageDir) copyContainer(target Instance, source Instance) error {
 	return nil
 }
 
-func (s *storageDir) copySnapshot(target Instance, targetPool string, source Instance, sourcePool string) error {
+func (s *storageDir) copySnapshot(target instance.Instance, targetPool string, source instance.Instance, sourcePool string) error {
 	sourceName := source.Name()
 	targetName := target.Name()
 	sourceContainerMntPoint := driver.GetSnapshotMountPoint(source.Project(), sourcePool, sourceName)
@@ -705,7 +707,7 @@ func (s *storageDir) copySnapshot(target Instance, targetPool string, source Ins
 	return nil
 }
 
-func (s *storageDir) ContainerCopy(target Instance, source Instance, containerOnly bool) error {
+func (s *storageDir) ContainerCopy(target instance.Instance, source instance.Instance, containerOnly bool) error {
 	logger.Debugf("Copying DIR container storage %s to %s", source.Name(), target.Name())
 
 	err := s.doContainerCopy(target, source, containerOnly, false, nil)
@@ -717,7 +719,7 @@ func (s *storageDir) ContainerCopy(target Instance, source Instance, containerOn
 	return nil
 }
 
-func (s *storageDir) doContainerCopy(target Instance, source Instance, containerOnly bool, refresh bool, refreshSnapshots []Instance) error {
+func (s *storageDir) doContainerCopy(target instance.Instance, source instance.Instance, containerOnly bool, refresh bool, refreshSnapshots []instance.Instance) error {
 	_, err := s.StoragePoolMount()
 	if err != nil {
 		return err
@@ -767,7 +769,7 @@ func (s *storageDir) doContainerCopy(target Instance, source Instance, container
 		return nil
 	}
 
-	var snapshots []Instance
+	var snapshots []instance.Instance
 
 	if refresh {
 		snapshots = refreshSnapshots
@@ -783,14 +785,14 @@ func (s *storageDir) doContainerCopy(target Instance, source Instance, container
 	}
 
 	for _, snap := range snapshots {
-		sourceSnapshot, err := instanceLoadByProjectAndName(srcState, source.Project(), snap.Name())
+		sourceSnapshot, err := instance.InstanceLoadByProjectAndName(srcState, source.Project(), snap.Name())
 		if err != nil {
 			return err
 		}
 
 		_, snapOnlyName, _ := shared.ContainerGetParentAndSnapshotName(snap.Name())
 		newSnapName := fmt.Sprintf("%s/%s", target.Name(), snapOnlyName)
-		targetSnapshot, err := instanceLoadByProjectAndName(s.s, source.Project(), newSnapName)
+		targetSnapshot, err := instance.InstanceLoadByProjectAndName(s.s, source.Project(), newSnapName)
 		if err != nil {
 			return err
 		}
@@ -804,7 +806,7 @@ func (s *storageDir) doContainerCopy(target Instance, source Instance, container
 	return nil
 }
 
-func (s *storageDir) ContainerRefresh(target Instance, source Instance, snapshots []Instance) error {
+func (s *storageDir) ContainerRefresh(target instance.Instance, source instance.Instance, snapshots []instance.Instance) error {
 	logger.Debugf("Refreshing DIR container storage for %s from %s", target.Name(), source.Name())
 
 	err := s.doContainerCopy(target, source, len(snapshots) == 0, true, snapshots)
@@ -816,15 +818,15 @@ func (s *storageDir) ContainerRefresh(target Instance, source Instance, snapshot
 	return nil
 }
 
-func (s *storageDir) ContainerMount(c Instance) (bool, error) {
+func (s *storageDir) ContainerMount(c instance.Instance) (bool, error) {
 	return s.StoragePoolMount()
 }
 
-func (s *storageDir) ContainerUmount(c Instance, path string) (bool, error) {
+func (s *storageDir) ContainerUmount(c instance.Instance, path string) (bool, error) {
 	return true, nil
 }
 
-func (s *storageDir) ContainerRename(container Instance, newName string) error {
+func (s *storageDir) ContainerRename(container instance.Instance, newName string) error {
 	logger.Debugf("Renaming DIR storage volume for container \"%s\" from %s to %s", s.volume.Name, s.volume.Name, newName)
 
 	_, err := s.StoragePoolMount()
@@ -879,7 +881,7 @@ func (s *storageDir) ContainerRename(container Instance, newName string) error {
 	return nil
 }
 
-func (s *storageDir) ContainerRestore(container Instance, sourceContainer Instance) error {
+func (s *storageDir) ContainerRestore(container instance.Instance, sourceContainer instance.Instance) error {
 	logger.Debugf("Restoring DIR storage volume for container \"%s\" from %s to %s", s.volume.Name, sourceContainer.Name(), container.Name())
 
 	_, err := s.StoragePoolMount()
@@ -901,7 +903,7 @@ func (s *storageDir) ContainerRestore(container Instance, sourceContainer Instan
 	return nil
 }
 
-func (s *storageDir) ContainerGetUsage(c Instance) (int64, error) {
+func (s *storageDir) ContainerGetUsage(c instance.Instance) (int64, error) {
 	path := driver.GetContainerMountPoint(c.Project(), s.pool.Name, c.Name())
 
 	ok, err := quota.Supported(path)
@@ -918,7 +920,7 @@ func (s *storageDir) ContainerGetUsage(c Instance) (int64, error) {
 	return size, nil
 }
 
-func (s *storageDir) ContainerSnapshotCreate(snapshotContainer Instance, sourceContainer Instance) error {
+func (s *storageDir) ContainerSnapshotCreate(snapshotContainer instance.Instance, sourceContainer instance.Instance) error {
 	logger.Debugf("Creating DIR storage volume for snapshot \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
 
 	_, err := s.StoragePoolMount()
@@ -934,7 +936,7 @@ func (s *storageDir) ContainerSnapshotCreate(snapshotContainer Instance, sourceC
 		return err
 	}
 
-	rsync := func(snapshotContainer Instance, oldPath string, newPath string, bwlimit string) error {
+	rsync := func(snapshotContainer instance.Instance, oldPath string, newPath string, bwlimit string) error {
 		output, err := rsyncLocalCopy(oldPath, newPath, bwlimit, true)
 		if err != nil {
 			s.ContainerDelete(snapshotContainer)
@@ -995,7 +997,7 @@ onSuccess:
 	return nil
 }
 
-func (s *storageDir) ContainerSnapshotCreateEmpty(snapshotContainer Instance) error {
+func (s *storageDir) ContainerSnapshotCreateEmpty(snapshotContainer instance.Instance) error {
 	logger.Debugf("Creating empty DIR storage volume for snapshot \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
 
 	_, err := s.StoragePoolMount()
@@ -1069,7 +1071,7 @@ func dirSnapshotDeleteInternal(projectName, poolName string, snapshotName string
 	return nil
 }
 
-func (s *storageDir) ContainerSnapshotDelete(snapshotContainer Instance) error {
+func (s *storageDir) ContainerSnapshotDelete(snapshotContainer instance.Instance) error {
 	logger.Debugf("Deleting DIR storage volume for snapshot \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
 
 	_, err := s.StoragePoolMount()
@@ -1092,7 +1094,7 @@ func (s *storageDir) ContainerSnapshotDelete(snapshotContainer Instance) error {
 	return nil
 }
 
-func (s *storageDir) ContainerSnapshotRename(snapshotContainer Instance, newName string) error {
+func (s *storageDir) ContainerSnapshotRename(snapshotContainer instance.Instance, newName string) error {
 	logger.Debugf("Renaming DIR storage volume for snapshot \"%s\" from %s to %s", s.volume.Name, s.volume.Name, newName)
 
 	_, err := s.StoragePoolMount()
@@ -1113,15 +1115,15 @@ func (s *storageDir) ContainerSnapshotRename(snapshotContainer Instance, newName
 	return nil
 }
 
-func (s *storageDir) ContainerSnapshotStart(container Instance) (bool, error) {
+func (s *storageDir) ContainerSnapshotStart(container instance.Instance) (bool, error) {
 	return s.StoragePoolMount()
 }
 
-func (s *storageDir) ContainerSnapshotStop(container Instance) (bool, error) {
+func (s *storageDir) ContainerSnapshotStop(container instance.Instance) (bool, error) {
 	return true, nil
 }
 
-func (s *storageDir) ContainerBackupCreate(backup backup, source Instance) error {
+func (s *storageDir) ContainerBackupCreate(backup instance.Backup, source instance.Instance) error {
 	// Start storage
 	ourStart, err := source.StorageStart()
 	if err != nil {
@@ -1151,7 +1153,7 @@ func (s *storageDir) ContainerBackupCreate(backup backup, source Instance) error
 	bwlimit := s.pool.Config["rsync.bwlimit"]
 
 	// Handle snapshots
-	if !backup.instanceOnly {
+	if !backup.InstanceOnly {
 		snapshotsPath := fmt.Sprintf("%s/snapshots", tmpPath)
 
 		// Retrieve the snapshots
@@ -1209,7 +1211,7 @@ func (s *storageDir) ContainerBackupCreate(backup backup, source Instance) error
 	return nil
 }
 
-func (s *storageDir) ContainerBackupLoad(info backupInfo, data io.ReadSeeker, tarArgs []string) error {
+func (s *storageDir) ContainerBackupLoad(info instance.BackupInfo, data io.ReadSeeker, tarArgs []string) error {
 	_, err := s.StoragePoolMount()
 	if err != nil {
 		return err
@@ -1302,11 +1304,11 @@ func (s *storageDir) PreservesInodes() bool {
 	return false
 }
 
-func (s *storageDir) MigrationSource(args MigrationSourceArgs) (MigrationStorageSourceDriver, error) {
+func (s *storageDir) MigrationSource(args instance.MigrationSourceArgs) (instance.MigrationStorageSourceDriver, error) {
 	return rsyncMigrationSource(args)
 }
 
-func (s *storageDir) MigrationSink(conn *websocket.Conn, op *operation, args MigrationSinkArgs) error {
+func (s *storageDir) MigrationSink(conn *websocket.Conn, op *operation.Operation, args instance.MigrationSinkArgs) error {
 	return rsyncMigrationSink(conn, op, args)
 }
 
@@ -1438,11 +1440,11 @@ func (s *storageDir) StoragePoolVolumeCopy(source *api.StorageVolumeSource) erro
 	return nil
 }
 
-func (s *storageDir) StorageMigrationSource(args MigrationSourceArgs) (MigrationStorageSourceDriver, error) {
+func (s *storageDir) StorageMigrationSource(args instance.MigrationSourceArgs) (instance.MigrationStorageSourceDriver, error) {
 	return rsyncStorageMigrationSource(args)
 }
 
-func (s *storageDir) StorageMigrationSink(conn *websocket.Conn, op *operation, args MigrationSinkArgs) error {
+func (s *storageDir) StorageMigrationSink(conn *websocket.Conn, op *operation.Operation, args instance.MigrationSinkArgs) error {
 	return rsyncStorageMigrationSink(conn, op, args)
 }
 
diff --git a/lxd/storage_lvm.go b/lxd/storage_lvm.go
index b3ef0b20eb..949b22822d 100644
--- a/lxd/storage_lvm.go
+++ b/lxd/storage_lvm.go
@@ -13,7 +13,9 @@ import (
 	"github.com/pborman/uuid"
 	"github.com/pkg/errors"
 
+	"github.com/lxc/lxd/lxd/instance"
 	"github.com/lxc/lxd/lxd/migration"
+	"github.com/lxc/lxd/lxd/operation"
 	"github.com/lxc/lxd/lxd/project"
 	driver "github.com/lxc/lxd/lxd/storage"
 	"github.com/lxc/lxd/shared"
@@ -35,8 +37,8 @@ var lvmVersion = ""
 
 // Only initialize the minimal information we need about a given storage type.
 func (s *storageLvm) StorageCoreInit() error {
-	s.sType = storageTypeLvm
-	typeName, err := storageTypeToString(s.sType)
+	s.sType = instance.StorageTypeLvm
+	typeName, err := instance.StorageTypeToString(s.sType)
 	if err != nil {
 		return err
 	}
@@ -916,7 +918,7 @@ func (s *storageLvm) StoragePoolVolumeRename(newName string) error {
 		storagePoolVolumeTypeCustom, s.poolID)
 }
 
-func (s *storageLvm) ContainerStorageReady(container Instance) bool {
+func (s *storageLvm) ContainerStorageReady(container instance.Instance) bool {
 	containerLvmName := containerNameToLVName(container.Name())
 	poolName := s.getOnDiskPoolName()
 	containerLvmPath := getLvmDevPath(container.Project(), poolName, storagePoolVolumeAPIEndpointContainers, containerLvmName)
@@ -924,7 +926,7 @@ func (s *storageLvm) ContainerStorageReady(container Instance) bool {
 	return ok
 }
 
-func (s *storageLvm) ContainerCreate(container Instance) error {
+func (s *storageLvm) ContainerCreate(container instance.Instance) error {
 	logger.Debugf("Creating empty LVM storage volume for container \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
 
 	tryUndo := true
@@ -988,7 +990,7 @@ func (s *storageLvm) ContainerCreate(container Instance) error {
 	return nil
 }
 
-func (s *storageLvm) ContainerCreateFromImage(container Instance, fingerprint string, tracker *ioprogress.ProgressTracker) error {
+func (s *storageLvm) ContainerCreateFromImage(container instance.Instance, fingerprint string, tracker *ioprogress.ProgressTracker) error {
 	logger.Debugf("Creating LVM storage volume for container \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
 
 	tryUndo := true
@@ -1107,7 +1109,7 @@ func lvmContainerDeleteInternal(projectName, poolName string, ctName string, isS
 	return nil
 }
 
-func (s *storageLvm) ContainerDelete(container Instance) error {
+func (s *storageLvm) ContainerDelete(container instance.Instance) error {
 	logger.Debugf("Deleting LVM storage volume for container \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
 
 	containerName := container.Name()
@@ -1121,7 +1123,7 @@ func (s *storageLvm) ContainerDelete(container Instance) error {
 	return nil
 }
 
-func (s *storageLvm) ContainerCopy(target Instance, source Instance, containerOnly bool) error {
+func (s *storageLvm) ContainerCopy(target instance.Instance, source instance.Instance, containerOnly bool) error {
 	logger.Debugf("Copying LVM container storage for container %s to %s", source.Name(), target.Name())
 
 	err := s.doContainerCopy(target, source, containerOnly, false, nil)
@@ -1133,7 +1135,7 @@ func (s *storageLvm) ContainerCopy(target Instance, source Instance, containerOn
 	return nil
 }
 
-func (s *storageLvm) doContainerCopy(target Instance, source Instance, containerOnly bool, refresh bool, refreshSnapshots []Instance) error {
+func (s *storageLvm) doContainerCopy(target instance.Instance, source instance.Instance, containerOnly bool, refresh bool, refreshSnapshots []instance.Instance) error {
 	ourStart, err := source.StorageStart()
 	if err != nil {
 		return err
@@ -1177,7 +1179,7 @@ func (s *storageLvm) doContainerCopy(target Instance, source Instance, container
 		return nil
 	}
 
-	var snapshots []Instance
+	var snapshots []instance.Instance
 
 	if refresh {
 		snapshots = refreshSnapshots
@@ -1198,12 +1200,12 @@ func (s *storageLvm) doContainerCopy(target Instance, source Instance, container
 
 		logger.Debugf("Copying LVM container storage for snapshot %s to %s", snap.Name(), newSnapName)
 
-		sourceSnapshot, err := instanceLoadByProjectAndName(srcState, source.Project(), snap.Name())
+		sourceSnapshot, err := instance.InstanceLoadByProjectAndName(srcState, source.Project(), snap.Name())
 		if err != nil {
 			return err
 		}
 
-		targetSnapshot, err := instanceLoadByProjectAndName(s.s, source.Project(), newSnapName)
+		targetSnapshot, err := instance.InstanceLoadByProjectAndName(s.s, source.Project(), newSnapName)
 		if err != nil {
 			return err
 		}
@@ -1219,7 +1221,7 @@ func (s *storageLvm) doContainerCopy(target Instance, source Instance, container
 	return nil
 }
 
-func (s *storageLvm) ContainerRefresh(target Instance, source Instance, snapshots []Instance) error {
+func (s *storageLvm) ContainerRefresh(target instance.Instance, source instance.Instance, snapshots []instance.Instance) error {
 	logger.Debugf("Refreshing LVM container storage for %s from %s", target.Name(), source.Name())
 
 	err := s.doContainerCopy(target, source, len(snapshots) == 0, true, snapshots)
@@ -1231,7 +1233,7 @@ func (s *storageLvm) ContainerRefresh(target Instance, source Instance, snapshot
 	return nil
 }
 
-func (s *storageLvm) ContainerMount(c Instance) (bool, error) {
+func (s *storageLvm) ContainerMount(c instance.Instance) (bool, error) {
 	return s.doContainerMount(c.Project(), c.Name(), false)
 }
 
@@ -1292,7 +1294,7 @@ func (s *storageLvm) doContainerMount(project, name string, snap bool) (bool, er
 	return ourMount, nil
 }
 
-func (s *storageLvm) ContainerUmount(c Instance, path string) (bool, error) {
+func (s *storageLvm) ContainerUmount(c instance.Instance, path string) (bool, error) {
 	return s.umount(c.Project(), c.Name(), path)
 }
 
@@ -1340,7 +1342,7 @@ func (s *storageLvm) umount(project, name string, path string) (bool, error) {
 	return ourUmount, nil
 }
 
-func (s *storageLvm) ContainerRename(container Instance, newContainerName string) error {
+func (s *storageLvm) ContainerRename(container instance.Instance, newContainerName string) error {
 	logger.Debugf("Renaming LVM storage volume for container \"%s\" from %s to %s", s.volume.Name, s.volume.Name, newContainerName)
 
 	tryUndo := true
@@ -1421,7 +1423,7 @@ func (s *storageLvm) ContainerRename(container Instance, newContainerName string
 	return nil
 }
 
-func (s *storageLvm) ContainerRestore(target Instance, source Instance) error {
+func (s *storageLvm) ContainerRestore(target instance.Instance, source instance.Instance) error {
 	logger.Debugf("Restoring LVM storage volume for container \"%s\" from %s to %s", s.volume.Name, source.Name(), target.Name())
 
 	_, sourcePool, _ := source.Storage().GetContainerPoolInfo()
@@ -1500,11 +1502,11 @@ func (s *storageLvm) ContainerRestore(target Instance, source Instance) error {
 	return nil
 }
 
-func (s *storageLvm) ContainerGetUsage(container Instance) (int64, error) {
+func (s *storageLvm) ContainerGetUsage(container instance.Instance) (int64, error) {
 	return -1, fmt.Errorf("the LVM container backend doesn't support quotas")
 }
 
-func (s *storageLvm) ContainerSnapshotCreate(snapshotContainer Instance, sourceContainer Instance) error {
+func (s *storageLvm) ContainerSnapshotCreate(snapshotContainer instance.Instance, sourceContainer instance.Instance) error {
 	logger.Debugf("Creating LVM storage volume for snapshot \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
 
 	err := s.createSnapshotContainer(snapshotContainer, sourceContainer, true)
@@ -1516,7 +1518,7 @@ func (s *storageLvm) ContainerSnapshotCreate(snapshotContainer Instance, sourceC
 	return nil
 }
 
-func (s *storageLvm) ContainerSnapshotDelete(snapshotContainer Instance) error {
+func (s *storageLvm) ContainerSnapshotDelete(snapshotContainer instance.Instance) error {
 	logger.Debugf("Deleting LVM storage volume for snapshot \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
 
 	err := s.ContainerDelete(snapshotContainer)
@@ -1528,7 +1530,7 @@ func (s *storageLvm) ContainerSnapshotDelete(snapshotContainer Instance) error {
 	return nil
 }
 
-func (s *storageLvm) ContainerSnapshotRename(snapshotContainer Instance, newContainerName string) error {
+func (s *storageLvm) ContainerSnapshotRename(snapshotContainer instance.Instance, newContainerName string) error {
 	logger.Debugf("Renaming LVM storage volume for snapshot \"%s\" from %s to %s", s.volume.Name, s.volume.Name, newContainerName)
 
 	tryUndo := true
@@ -1560,7 +1562,7 @@ func (s *storageLvm) ContainerSnapshotRename(snapshotContainer Instance, newCont
 	return nil
 }
 
-func (s *storageLvm) ContainerSnapshotStart(container Instance) (bool, error) {
+func (s *storageLvm) ContainerSnapshotStart(container instance.Instance) (bool, error) {
 	logger.Debugf(`Initializing LVM storage volume for snapshot "%s" on storage pool "%s"`, s.volume.Name, s.pool.Name)
 
 	poolName := s.getOnDiskPoolName()
@@ -1610,7 +1612,7 @@ func (s *storageLvm) ContainerSnapshotStart(container Instance) (bool, error) {
 	return true, nil
 }
 
-func (s *storageLvm) ContainerSnapshotStop(container Instance) (bool, error) {
+func (s *storageLvm) ContainerSnapshotStop(container instance.Instance) (bool, error) {
 	logger.Debugf("Stopping LVM storage volume for snapshot \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
 
 	containerName := container.Name()
@@ -1649,7 +1651,7 @@ func (s *storageLvm) ContainerSnapshotStop(container Instance) (bool, error) {
 	return true, nil
 }
 
-func (s *storageLvm) ContainerSnapshotCreateEmpty(snapshotContainer Instance) error {
+func (s *storageLvm) ContainerSnapshotCreateEmpty(snapshotContainer instance.Instance) error {
 	logger.Debugf("Creating empty LVM storage volume for snapshot \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
 
 	err := s.ContainerCreate(snapshotContainer)
@@ -1661,7 +1663,7 @@ func (s *storageLvm) ContainerSnapshotCreateEmpty(snapshotContainer Instance) er
 	return nil
 }
 
-func (s *storageLvm) ContainerBackupCreate(backup backup, source Instance) error {
+func (s *storageLvm) ContainerBackupCreate(backup instance.Backup, source instance.Instance) error {
 	poolName := s.getOnDiskPoolName()
 
 	// Start storage
@@ -1693,7 +1695,7 @@ func (s *storageLvm) ContainerBackupCreate(backup backup, source Instance) error
 	bwlimit := s.pool.Config["rsync.bwlimit"]
 
 	// Handle snapshots
-	if !backup.instanceOnly {
+	if !backup.InstanceOnly {
 		snapshotsPath := fmt.Sprintf("%s/snapshots", tmpPath)
 
 		// Retrieve the snapshots
@@ -1771,7 +1773,7 @@ func (s *storageLvm) ContainerBackupCreate(backup backup, source Instance) error
 	return nil
 }
 
-func (s *storageLvm) ContainerBackupLoad(info backupInfo, data io.ReadSeeker, tarArgs []string) error {
+func (s *storageLvm) ContainerBackupLoad(info instance.BackupInfo, data io.ReadSeeker, tarArgs []string) error {
 	containerPath, err := s.doContainerBackupLoad(info.Project, info.Name, info.Privileged, false)
 	if err != nil {
 		return err
@@ -1959,7 +1961,7 @@ func (s *storageLvm) ImageCreate(fingerprint string, tracker *ioprogress.Progres
 		}
 
 		imagePath := shared.VarPath("images", fingerprint)
-		err = unpackImage(imagePath, imageMntPoint, storageTypeLvm, s.s.OS.RunningInUserNS, nil)
+		err = unpackImage(imagePath, imageMntPoint, instance.StorageTypeLvm, s.s.OS.RunningInUserNS, nil)
 		if err != nil {
 			return err
 		}
@@ -2064,11 +2066,11 @@ func (s *storageLvm) PreservesInodes() bool {
 	return false
 }
 
-func (s *storageLvm) MigrationSource(args MigrationSourceArgs) (MigrationStorageSourceDriver, error) {
+func (s *storageLvm) MigrationSource(args instance.MigrationSourceArgs) (instance.MigrationStorageSourceDriver, error) {
 	return rsyncMigrationSource(args)
 }
 
-func (s *storageLvm) MigrationSink(conn *websocket.Conn, op *operation, args MigrationSinkArgs) error {
+func (s *storageLvm) MigrationSink(conn *websocket.Conn, op *operation.Operation, args instance.MigrationSinkArgs) error {
 	return rsyncMigrationSink(conn, op, args)
 }
 
@@ -2261,11 +2263,11 @@ func (s *storageLvm) StoragePoolVolumeCopy(source *api.StorageVolumeSource) erro
 	return nil
 }
 
-func (s *storageLvm) StorageMigrationSource(args MigrationSourceArgs) (MigrationStorageSourceDriver, error) {
+func (s *storageLvm) StorageMigrationSource(args instance.MigrationSourceArgs) (instance.MigrationStorageSourceDriver, error) {
 	return rsyncStorageMigrationSource(args)
 }
 
-func (s *storageLvm) StorageMigrationSink(conn *websocket.Conn, op *operation, args MigrationSinkArgs) error {
+func (s *storageLvm) StorageMigrationSink(conn *websocket.Conn, op *operation.Operation, args instance.MigrationSinkArgs) error {
 	return rsyncStorageMigrationSink(conn, op, args)
 }
 
diff --git a/lxd/storage_lvm_utils.go b/lxd/storage_lvm_utils.go
index 65824f20d4..4949b8e477 100644
--- a/lxd/storage_lvm_utils.go
+++ b/lxd/storage_lvm_utils.go
@@ -12,6 +12,7 @@ import (
 
 	"github.com/lxc/lxd/lxd/db"
 	"github.com/lxc/lxd/lxd/instance"
+	"github.com/lxc/lxd/lxd/instance/instancetype"
 	"github.com/lxc/lxd/lxd/project"
 	"github.com/lxc/lxd/lxd/state"
 	driver "github.com/lxc/lxd/lxd/storage"
@@ -256,7 +257,7 @@ func (s *storageLvm) createSnapshotLV(project, vgName string, origLvName string,
 	return targetLvmVolumePath, nil
 }
 
-func (s *storageLvm) createSnapshotContainer(snapshotContainer Instance, sourceContainer Instance, readonly bool) error {
+func (s *storageLvm) createSnapshotContainer(snapshotContainer instance.Instance, sourceContainer instance.Instance, readonly bool) error {
 	tryUndo := true
 
 	sourceContainerName := sourceContainer.Name()
@@ -303,7 +304,7 @@ func (s *storageLvm) createSnapshotContainer(snapshotContainer Instance, sourceC
 }
 
 // Copy a container on a storage pool that does use a thinpool.
-func (s *storageLvm) copyContainerThinpool(target Instance, source Instance, readonly bool) error {
+func (s *storageLvm) copyContainerThinpool(target instance.Instance, source instance.Instance, readonly bool) error {
 	err := s.createSnapshotContainer(target, source, readonly)
 	if err != nil {
 		logger.Errorf("Error creating snapshot LV for copy: %s", err)
@@ -341,7 +342,7 @@ func (s *storageLvm) copyContainerThinpool(target Instance, source Instance, rea
 	return nil
 }
 
-func (s *storageLvm) copySnapshot(target Instance, source Instance, refresh bool) error {
+func (s *storageLvm) copySnapshot(target instance.Instance, source instance.Instance, refresh bool) error {
 	sourcePool, err := source.StoragePool()
 	if err != nil {
 		return err
@@ -370,7 +371,7 @@ func (s *storageLvm) copySnapshot(target Instance, source Instance, refresh bool
 }
 
 // Copy a container on a storage pool that does not use a thinpool.
-func (s *storageLvm) copyContainerLv(target Instance, source Instance, readonly bool, refresh bool) error {
+func (s *storageLvm) copyContainerLv(target instance.Instance, source instance.Instance, readonly bool, refresh bool) error {
 	exists, err := storageLVExists(getLvmDevPath(target.Project(), s.getOnDiskPoolName(),
 		storagePoolVolumeAPIEndpointContainers, containerNameToLVName(target.Name())))
 	if err != nil {
@@ -445,7 +446,7 @@ func (s *storageLvm) copyContainerLv(target Instance, source Instance, readonly
 }
 
 // Copy an lvm container.
-func (s *storageLvm) copyContainer(target Instance, source Instance, refresh bool) error {
+func (s *storageLvm) copyContainer(target instance.Instance, source instance.Instance, refresh bool) error {
 	targetPool, err := target.StoragePool()
 	if err != nil {
 		return err
@@ -483,7 +484,7 @@ func (s *storageLvm) copyContainer(target Instance, source Instance, refresh boo
 	return nil
 }
 
-func (s *storageLvm) containerCreateFromImageLv(c Instance, fp string) error {
+func (s *storageLvm) containerCreateFromImageLv(c instance.Instance, fp string) error {
 	containerName := c.Name()
 
 	err := s.ContainerCreate(c)
@@ -503,7 +504,7 @@ func (s *storageLvm) containerCreateFromImageLv(c Instance, fp string) error {
 
 	imagePath := shared.VarPath("images", fp)
 	containerMntPoint := driver.GetContainerMountPoint(c.Project(), s.pool.Name, containerName)
-	err = unpackImage(imagePath, containerMntPoint, storageTypeLvm, s.s.OS.RunningInUserNS, nil)
+	err = unpackImage(imagePath, containerMntPoint, instance.StorageTypeLvm, s.s.OS.RunningInUserNS, nil)
 	if err != nil {
 		logger.Errorf(`Failed to unpack image "%s" into non-thinpool LVM storage volume "%s" for container "%s" on storage pool "%s": %s`, imagePath, containerMntPoint, containerName, s.pool.Name, err)
 		return err
@@ -515,7 +516,7 @@ func (s *storageLvm) containerCreateFromImageLv(c Instance, fp string) error {
 	return nil
 }
 
-func (s *storageLvm) containerCreateFromImageThinLv(c Instance, fp string) error {
+func (s *storageLvm) containerCreateFromImageThinLv(c instance.Instance, fp string) error {
 	poolName := s.getOnDiskPoolName()
 	// Check if the image already exists.
 	imageLvmDevPath := getLvmDevPath("default", poolName, storagePoolVolumeAPIEndpointImages, fp)
@@ -721,7 +722,7 @@ func storageLVMThinpoolExists(vgName string, poolName string) (bool, error) {
 func storageLVMGetThinPoolUsers(s *state.State) ([]string, error) {
 	results := []string{}
 
-	cNames, err := s.Cluster.ContainersNodeList(instance.TypeContainer)
+	cNames, err := s.Cluster.ContainersNodeList(instancetype.Container)
 	if err != nil {
 		return results, err
 	}
diff --git a/lxd/storage_migration.go b/lxd/storage_migration.go
index 3b1d42ec22..993f252fb6 100644
--- a/lxd/storage_migration.go
+++ b/lxd/storage_migration.go
@@ -9,7 +9,9 @@ import (
 	"github.com/lxc/lxd/lxd/db"
 	deviceConfig "github.com/lxc/lxd/lxd/device/config"
 	"github.com/lxc/lxd/lxd/instance"
+	"github.com/lxc/lxd/lxd/instance/instancetype"
 	"github.com/lxc/lxd/lxd/migration"
+	"github.com/lxc/lxd/lxd/operation"
 	"github.com/lxc/lxd/lxd/project"
 	driver "github.com/lxc/lxd/lxd/storage"
 	"github.com/lxc/lxd/shared"
@@ -18,12 +20,12 @@ import (
 )
 
 type rsyncStorageSourceDriver struct {
-	container     Instance
-	snapshots     []Instance
+	container     instance.Instance
+	snapshots     []instance.Instance
 	rsyncFeatures []string
 }
 
-func (s rsyncStorageSourceDriver) SendStorageVolume(conn *websocket.Conn, op *operation, bwlimit string, storage storage, volumeOnly bool) error {
+func (s rsyncStorageSourceDriver) SendStorageVolume(conn *websocket.Conn, op *operation.Operation, bwlimit string, storage instance.Storage, volumeOnly bool) error {
 	ourMount, err := storage.StoragePoolVolumeMount()
 	if err != nil {
 		return err
@@ -67,7 +69,7 @@ func (s rsyncStorageSourceDriver) SendStorageVolume(conn *websocket.Conn, op *op
 	return nil
 }
 
-func (s rsyncStorageSourceDriver) SendWhileRunning(conn *websocket.Conn, op *operation, bwlimit string, containerOnly bool) error {
+func (s rsyncStorageSourceDriver) SendWhileRunning(conn *websocket.Conn, op *operation.Operation, bwlimit string, containerOnly bool) error {
 	ctName, _, _ := shared.ContainerGetParentAndSnapshotName(s.container.Name())
 
 	if !containerOnly {
@@ -117,12 +119,12 @@ func (s rsyncStorageSourceDriver) Cleanup() {
 	// noop
 }
 
-func rsyncStorageMigrationSource(args MigrationSourceArgs) (MigrationStorageSourceDriver, error) {
+func rsyncStorageMigrationSource(args instance.MigrationSourceArgs) (instance.MigrationStorageSourceDriver, error) {
 	return rsyncStorageSourceDriver{nil, nil, args.RsyncFeatures}, nil
 }
 
-func rsyncRefreshSource(refreshSnapshots []string, args MigrationSourceArgs) (MigrationStorageSourceDriver, error) {
-	var snapshots = []Instance{}
+func rsyncRefreshSource(refreshSnapshots []string, args instance.MigrationSourceArgs) (instance.MigrationStorageSourceDriver, error) {
+	var snapshots = []instance.Instance{}
 	if !args.InstanceOnly {
 		allSnapshots, err := args.Instance.Snapshots()
 		if err != nil {
@@ -142,9 +144,9 @@ func rsyncRefreshSource(refreshSnapshots []string, args MigrationSourceArgs) (Mi
 	return rsyncStorageSourceDriver{args.Instance, snapshots, args.RsyncFeatures}, nil
 }
 
-func rsyncMigrationSource(args MigrationSourceArgs) (MigrationStorageSourceDriver, error) {
+func rsyncMigrationSource(args instance.MigrationSourceArgs) (instance.MigrationStorageSourceDriver, error) {
 	var err error
-	var snapshots = []Instance{}
+	var snapshots = []instance.Instance{}
 	if !args.InstanceOnly {
 		snapshots, err = args.Instance.Snapshots()
 		if err != nil {
@@ -176,7 +178,7 @@ func snapshotProtobufToContainerArgs(project string, containerName string, snap
 	args := db.ContainerArgs{
 		Architecture: int(snap.GetArchitecture()),
 		Config:       config,
-		Type:         instance.TypeContainer,
+		Type:         instancetype.Container,
 		Snapshot:     true,
 		Devices:      devices,
 		Ephemeral:    snap.GetEphemeral(),
@@ -197,7 +199,7 @@ func snapshotProtobufToContainerArgs(project string, containerName string, snap
 	return args
 }
 
-func rsyncStorageMigrationSink(conn *websocket.Conn, op *operation, args MigrationSinkArgs) error {
+func rsyncStorageMigrationSink(conn *websocket.Conn, op *operation.Operation, args instance.MigrationSinkArgs) error {
 	err := args.Storage.StoragePoolVolumeCreate()
 	if err != nil {
 		return err
@@ -258,7 +260,7 @@ func rsyncStorageMigrationSink(conn *websocket.Conn, op *operation, args Migrati
 	return RsyncRecv(path, conn, wrapper, args.RsyncFeatures)
 }
 
-func rsyncMigrationSink(conn *websocket.Conn, op *operation, args MigrationSinkArgs) error {
+func rsyncMigrationSink(conn *websocket.Conn, op *operation.Operation, args instance.MigrationSinkArgs) error {
 	ourStart, err := args.Instance.StorageStart()
 	if err != nil {
 		return err
@@ -286,7 +288,7 @@ func rsyncMigrationSink(conn *websocket.Conn, op *operation, args MigrationSinkA
 		return err
 	}
 
-	isDirBackend := args.Instance.Storage().GetStorageType() == storageTypeDir
+	isDirBackend := args.Instance.Storage().GetStorageType() == instance.StorageTypeDir
 	if isDirBackend {
 		if !args.InstanceOnly {
 			for _, snap := range args.Snapshots {
@@ -321,7 +323,7 @@ func rsyncMigrationSink(conn *websocket.Conn, op *operation, args MigrationSinkA
 				}
 
 				// Try and a load instance
-				s, err := instanceLoadByProjectAndName(args.Instance.DaemonState(),
+				s, err := instance.InstanceLoadByProjectAndName(args.Instance.DaemonState(),
 					args.Instance.Project(), snapArgs.Name)
 				if err != nil {
 					// Create the snapshot since it doesn't seem to exist
@@ -336,7 +338,7 @@ func rsyncMigrationSink(conn *websocket.Conn, op *operation, args MigrationSinkA
 					return err
 				}
 
-				if args.Instance.Type() == instance.TypeContainer {
+				if args.Instance.Type() == instancetype.Container {
 					c := args.Instance.(container)
 					err = resetContainerDiskIdmap(c, args.Idmap)
 					if err != nil {
@@ -390,7 +392,7 @@ func rsyncMigrationSink(conn *websocket.Conn, op *operation, args MigrationSinkA
 					return err
 				}
 
-				if args.Instance.Type() == instance.TypeContainer {
+				if args.Instance.Type() == instancetype.Container {
 					c := args.Instance.(container)
 					err = resetContainerDiskIdmap(c, args.Idmap)
 					if err != nil {
@@ -398,7 +400,7 @@ func rsyncMigrationSink(conn *websocket.Conn, op *operation, args MigrationSinkA
 					}
 				}
 
-				_, err = instanceLoadByProjectAndName(args.Instance.DaemonState(),
+				_, err = instance.InstanceLoadByProjectAndName(args.Instance.DaemonState(),
 					args.Instance.Project(), snapArgs.Name)
 				if err != nil {
 					_, err = containerCreateAsSnapshot(args.Instance.DaemonState(), snapArgs, args.Instance)
@@ -425,7 +427,7 @@ func rsyncMigrationSink(conn *websocket.Conn, op *operation, args MigrationSinkA
 		}
 	}
 
-	if args.Instance.Type() == instance.TypeContainer {
+	if args.Instance.Type() == instancetype.Container {
 		c := args.Instance.(container)
 		err = resetContainerDiskIdmap(c, args.Idmap)
 		if err != nil {
diff --git a/lxd/storage_migration_btrfs.go b/lxd/storage_migration_btrfs.go
index 166fd8599c..0d37354f35 100644
--- a/lxd/storage_migration_btrfs.go
+++ b/lxd/storage_migration_btrfs.go
@@ -9,14 +9,16 @@ import (
 
 	"github.com/gorilla/websocket"
 
+	"github.com/lxc/lxd/lxd/instance"
+	"github.com/lxc/lxd/lxd/operation"
 	driver "github.com/lxc/lxd/lxd/storage"
 	"github.com/lxc/lxd/shared"
 	"github.com/lxc/lxd/shared/logger"
 )
 
 type btrfsMigrationSourceDriver struct {
-	container          Instance
-	snapshots          []Instance
+	container          instance.Instance
+	snapshots          []instance.Instance
 	btrfsSnapshotNames []string
 	btrfs              *storageBtrfs
 	runningSnapName    string
@@ -67,7 +69,7 @@ func (s *btrfsMigrationSourceDriver) send(conn *websocket.Conn, btrfsPath string
 	return err
 }
 
-func (s *btrfsMigrationSourceDriver) SendWhileRunning(conn *websocket.Conn, op *operation, bwlimit string, containerOnly bool) error {
+func (s *btrfsMigrationSourceDriver) SendWhileRunning(conn *websocket.Conn, op *operation.Operation, bwlimit string, containerOnly bool) error {
 	_, containerPool, _ := s.container.Storage().GetContainerPoolInfo()
 	containerName := s.container.Name()
 	containersPath := driver.GetContainerMountPoint("default", containerPool, "")
@@ -178,7 +180,7 @@ func (s *btrfsMigrationSourceDriver) Cleanup() {
 	}
 }
 
-func (s *btrfsMigrationSourceDriver) SendStorageVolume(conn *websocket.Conn, op *operation, bwlimit string, storage storage, volumeOnly bool) error {
+func (s *btrfsMigrationSourceDriver) SendStorageVolume(conn *websocket.Conn, op *operation.Operation, bwlimit string, storage instance.Storage, volumeOnly bool) error {
 	msg := fmt.Sprintf("Function not implemented")
 	logger.Errorf(msg)
 	return fmt.Errorf(msg)
diff --git a/lxd/storage_migration_ceph.go b/lxd/storage_migration_ceph.go
index 90fa2b80a5..2e29ba060b 100644
--- a/lxd/storage_migration_ceph.go
+++ b/lxd/storage_migration_ceph.go
@@ -9,21 +9,23 @@ import (
 	"github.com/gorilla/websocket"
 	"github.com/pborman/uuid"
 
+	"github.com/lxc/lxd/lxd/instance"
+	"github.com/lxc/lxd/lxd/operation"
 	"github.com/lxc/lxd/lxd/project"
 	"github.com/lxc/lxd/shared"
 	"github.com/lxc/lxd/shared/logger"
 )
 
 type rbdMigrationSourceDriver struct {
-	container        Instance
-	snapshots        []Instance
+	container        instance.Instance
+	snapshots        []instance.Instance
 	rbdSnapshotNames []string
 	ceph             *storageCeph
 	runningSnapName  string
 	stoppedSnapName  string
 }
 
-func (s *rbdMigrationSourceDriver) Snapshots() []Instance {
+func (s *rbdMigrationSourceDriver) Snapshots() []instance.Instance {
 	return s.snapshots
 }
 
@@ -72,8 +74,7 @@ func (s *rbdMigrationSourceDriver) SendAfterCheckpoint(conn *websocket.Conn, bwl
 	return nil
 }
 
-func (s *rbdMigrationSourceDriver) SendWhileRunning(conn *websocket.Conn,
-	op *operation, bwlimit string, containerOnly bool) error {
+func (s *rbdMigrationSourceDriver) SendWhileRunning(conn *websocket.Conn, op *operation.Operation, bwlimit string, containerOnly bool) error {
 	containerName := s.container.Name()
 	if s.container.IsSnapshot() {
 		// ContainerSnapshotStart() will create the clone that is
@@ -149,7 +150,7 @@ func (s *rbdMigrationSourceDriver) SendWhileRunning(conn *websocket.Conn,
 	return nil
 }
 
-func (s *rbdMigrationSourceDriver) SendStorageVolume(conn *websocket.Conn, op *operation, bwlimit string, storage storage, volumeOnly bool) error {
+func (s *rbdMigrationSourceDriver) SendStorageVolume(conn *websocket.Conn, op *operation.Operation, bwlimit string, storage instance.Storage, volumeOnly bool) error {
 	msg := fmt.Sprintf("Function not implemented")
 	logger.Errorf(msg)
 	return fmt.Errorf(msg)
diff --git a/lxd/storage_migration_zfs.go b/lxd/storage_migration_zfs.go
index fe94bf6eab..0c8dc6450e 100644
--- a/lxd/storage_migration_zfs.go
+++ b/lxd/storage_migration_zfs.go
@@ -9,14 +9,16 @@ import (
 	"github.com/gorilla/websocket"
 	"github.com/pborman/uuid"
 
+	"github.com/lxc/lxd/lxd/instance"
+	"github.com/lxc/lxd/lxd/operation"
 	"github.com/lxc/lxd/lxd/project"
 	"github.com/lxc/lxd/shared"
 	"github.com/lxc/lxd/shared/logger"
 )
 
 type zfsMigrationSourceDriver struct {
-	instance         Instance
-	snapshots        []Instance
+	instance         instance.Instance
+	snapshots        []instance.Instance
 	zfsSnapshotNames []string
 	zfs              *storageZfs
 	runningSnapName  string
@@ -78,7 +80,7 @@ func (s *zfsMigrationSourceDriver) send(conn *websocket.Conn, zfsName string, zf
 	return err
 }
 
-func (s *zfsMigrationSourceDriver) SendWhileRunning(conn *websocket.Conn, op *operation, bwlimit string, containerOnly bool) error {
+func (s *zfsMigrationSourceDriver) SendWhileRunning(conn *websocket.Conn, op *operation.Operation, bwlimit string, containerOnly bool) error {
 	if s.instance.IsSnapshot() {
 		_, snapOnlyName, _ := shared.ContainerGetParentAndSnapshotName(s.instance.Name())
 		snapshotName := fmt.Sprintf("snapshot-%s", snapOnlyName)
@@ -139,7 +141,7 @@ func (s *zfsMigrationSourceDriver) Cleanup() {
 	}
 }
 
-func (s *zfsMigrationSourceDriver) SendStorageVolume(conn *websocket.Conn, op *operation, bwlimit string, storage storage, volumeOnly bool) error {
+func (s *zfsMigrationSourceDriver) SendStorageVolume(conn *websocket.Conn, op *operation.Operation, bwlimit string, storage instance.Storage, volumeOnly bool) error {
 	msg := fmt.Sprintf("Function not implemented")
 	logger.Errorf(msg)
 	return fmt.Errorf(msg)
diff --git a/lxd/storage_mock.go b/lxd/storage_mock.go
index 3b9520928e..78743585dc 100644
--- a/lxd/storage_mock.go
+++ b/lxd/storage_mock.go
@@ -5,7 +5,9 @@ import (
 
 	"github.com/gorilla/websocket"
 
+	"github.com/lxc/lxd/lxd/instance"
 	"github.com/lxc/lxd/lxd/migration"
+	"github.com/lxc/lxd/lxd/operation"
 	"github.com/lxc/lxd/lxd/state"
 	"github.com/lxc/lxd/shared/api"
 	"github.com/lxc/lxd/shared/ioprogress"
@@ -17,8 +19,8 @@ type storageMock struct {
 }
 
 func (s *storageMock) StorageCoreInit() error {
-	s.sType = storageTypeMock
-	typeName, err := storageTypeToString(s.sType)
+	s.sType = instance.StorageTypeMock
+	typeName, err := instance.StorageTypeToString(s.sType)
 	if err != nil {
 		return err
 	}
@@ -109,77 +111,77 @@ func (s *storageMock) StoragePoolUpdate(writable *api.StoragePoolPut, changedCon
 	return nil
 }
 
-func (s *storageMock) ContainerStorageReady(container Instance) bool {
+func (s *storageMock) ContainerStorageReady(container instance.Instance) bool {
 	return true
 }
 
-func (s *storageMock) ContainerCreate(container Instance) error {
+func (s *storageMock) ContainerCreate(container instance.Instance) error {
 	return nil
 }
 
-func (s *storageMock) ContainerCreateFromImage(container Instance, imageFingerprint string, tracker *ioprogress.ProgressTracker) error {
+func (s *storageMock) ContainerCreateFromImage(container instance.Instance, imageFingerprint string, tracker *ioprogress.ProgressTracker) error {
 	return nil
 }
 
-func (s *storageMock) ContainerDelete(container Instance) error {
+func (s *storageMock) ContainerDelete(container instance.Instance) error {
 	return nil
 }
 
-func (s *storageMock) ContainerCopy(target Instance, source Instance, containerOnly bool) error {
+func (s *storageMock) ContainerCopy(target instance.Instance, source instance.Instance, containerOnly bool) error {
 	return nil
 }
 
-func (s *storageMock) ContainerRefresh(target Instance, source Instance, snapshots []Instance) error {
+func (s *storageMock) ContainerRefresh(target instance.Instance, source instance.Instance, snapshots []instance.Instance) error {
 	return nil
 }
 
-func (s *storageMock) ContainerMount(c Instance) (bool, error) {
+func (s *storageMock) ContainerMount(c instance.Instance) (bool, error) {
 	return true, nil
 }
 
-func (s *storageMock) ContainerUmount(c Instance, path string) (bool, error) {
+func (s *storageMock) ContainerUmount(c instance.Instance, path string) (bool, error) {
 	return true, nil
 }
 
-func (s *storageMock) ContainerRename(container Instance, newName string) error {
+func (s *storageMock) ContainerRename(container instance.Instance, newName string) error {
 	return nil
 }
 
-func (s *storageMock) ContainerRestore(container Instance, sourceContainer Instance) error {
+func (s *storageMock) ContainerRestore(container instance.Instance, sourceContainer instance.Instance) error {
 	return nil
 }
 
-func (s *storageMock) ContainerGetUsage(container Instance) (int64, error) {
+func (s *storageMock) ContainerGetUsage(container instance.Instance) (int64, error) {
 	return 0, nil
 }
-func (s *storageMock) ContainerSnapshotCreate(snapshotContainer Instance, sourceContainer Instance) error {
+func (s *storageMock) ContainerSnapshotCreate(snapshotContainer instance.Instance, sourceContainer instance.Instance) error {
 	return nil
 }
-func (s *storageMock) ContainerSnapshotDelete(snapshotContainer Instance) error {
+func (s *storageMock) ContainerSnapshotDelete(snapshotContainer instance.Instance) error {
 	return nil
 }
 
-func (s *storageMock) ContainerSnapshotRename(snapshotContainer Instance, newName string) error {
+func (s *storageMock) ContainerSnapshotRename(snapshotContainer instance.Instance, newName string) error {
 	return nil
 }
 
-func (s *storageMock) ContainerSnapshotStart(container Instance) (bool, error) {
+func (s *storageMock) ContainerSnapshotStart(container instance.Instance) (bool, error) {
 	return true, nil
 }
 
-func (s *storageMock) ContainerSnapshotStop(container Instance) (bool, error) {
+func (s *storageMock) ContainerSnapshotStop(container instance.Instance) (bool, error) {
 	return true, nil
 }
 
-func (s *storageMock) ContainerSnapshotCreateEmpty(snapshotContainer Instance) error {
+func (s *storageMock) ContainerSnapshotCreateEmpty(snapshotContainer instance.Instance) error {
 	return nil
 }
 
-func (s *storageMock) ContainerBackupCreate(backup backup, sourceContainer Instance) error {
+func (s *storageMock) ContainerBackupCreate(backup instance.Backup, sourceContainer instance.Instance) error {
 	return nil
 }
 
-func (s *storageMock) ContainerBackupLoad(info backupInfo, data io.ReadSeeker, tarArgs []string) error {
+func (s *storageMock) ContainerBackupLoad(info instance.BackupInfo, data io.ReadSeeker, tarArgs []string) error {
 	return nil
 }
 
@@ -207,11 +209,11 @@ func (s *storageMock) PreservesInodes() bool {
 	return false
 }
 
-func (s *storageMock) MigrationSource(args MigrationSourceArgs) (MigrationStorageSourceDriver, error) {
+func (s *storageMock) MigrationSource(args instance.MigrationSourceArgs) (instance.MigrationStorageSourceDriver, error) {
 	return nil, nil
 }
 
-func (s *storageMock) MigrationSink(conn *websocket.Conn, op *operation, args MigrationSinkArgs) error {
+func (s *storageMock) MigrationSink(conn *websocket.Conn, op *operation.Operation, args instance.MigrationSinkArgs) error {
 	return nil
 }
 
@@ -227,11 +229,11 @@ func (s *storageMock) StoragePoolVolumeCopy(source *api.StorageVolumeSource) err
 	return nil
 }
 
-func (s *storageMock) StorageMigrationSource(args MigrationSourceArgs) (MigrationStorageSourceDriver, error) {
+func (s *storageMock) StorageMigrationSource(args instance.MigrationSourceArgs) (instance.MigrationStorageSourceDriver, error) {
 	return nil, nil
 }
 
-func (s *storageMock) StorageMigrationSink(conn *websocket.Conn, op *operation, args MigrationSinkArgs) error {
+func (s *storageMock) StorageMigrationSink(conn *websocket.Conn, op *operation.Operation, args instance.MigrationSinkArgs) error {
 	return nil
 }
 
diff --git a/lxd/storage_pools.go b/lxd/storage_pools.go
index a9c5e60b10..305ae0928a 100644
--- a/lxd/storage_pools.go
+++ b/lxd/storage_pools.go
@@ -12,6 +12,7 @@ import (
 
 	lxd "github.com/lxc/lxd/client"
 	"github.com/lxc/lxd/lxd/cluster"
+	"github.com/lxc/lxd/lxd/daemon"
 	"github.com/lxc/lxd/lxd/db"
 	driver "github.com/lxc/lxd/lxd/storage"
 	"github.com/lxc/lxd/lxd/util"
@@ -41,7 +42,7 @@ var storagePoolCmd = APIEndpoint{
 
 // /1.0/storage-pools
 // List all storage pools.
-func storagePoolsGet(d *Daemon, r *http.Request) Response {
+func storagePoolsGet(d *Daemon, r *http.Request) daemon.Response {
 	recursion := util.IsRecursionRequest(r)
 
 	pools, err := d.cluster.StoragePools()
@@ -80,7 +81,7 @@ func storagePoolsGet(d *Daemon, r *http.Request) Response {
 
 // /1.0/storage-pools
 // Create a storage pool.
-func storagePoolsPost(d *Daemon, r *http.Request) Response {
+func storagePoolsPost(d *Daemon, r *http.Request) daemon.Response {
 	storagePoolCreateLock.Lock()
 	defer storagePoolCreateLock.Unlock()
 
@@ -269,7 +270,7 @@ func storagePoolsPostCluster(d *Daemon, req api.StoragePoolsPost) error {
 
 // /1.0/storage-pools/{name}
 // Get a single storage pool.
-func storagePoolGet(d *Daemon, r *http.Request) Response {
+func storagePoolGet(d *Daemon, r *http.Request) daemon.Response {
 	// If a target was specified, forward the request to the relevant node.
 	response := ForwardedResponseIfTargetIsRemote(d, r)
 	if response != nil {
@@ -313,7 +314,7 @@ func storagePoolGet(d *Daemon, r *http.Request) Response {
 
 // /1.0/storage-pools/{name}
 // Replace pool properties.
-func storagePoolPut(d *Daemon, r *http.Request) Response {
+func storagePoolPut(d *Daemon, r *http.Request) daemon.Response {
 	poolName := mux.Vars(r)["name"]
 
 	// Get the existing storage pool.
@@ -388,7 +389,7 @@ func storagePoolPut(d *Daemon, r *http.Request) Response {
 
 // /1.0/storage-pools/{name}
 // Change pool properties.
-func storagePoolPatch(d *Daemon, r *http.Request) Response {
+func storagePoolPatch(d *Daemon, r *http.Request) daemon.Response {
 	poolName := mux.Vars(r)["name"]
 
 	// Get the existing network
@@ -510,7 +511,7 @@ func storagePoolClusterFillWithNodeConfig(dbConfig, reqConfig map[string]string)
 
 // /1.0/storage-pools/{name}
 // Delete storage pool.
-func storagePoolDelete(d *Daemon, r *http.Request) Response {
+func storagePoolDelete(d *Daemon, r *http.Request) daemon.Response {
 	poolName := mux.Vars(r)["name"]
 
 	poolID, err := d.cluster.StoragePoolGetID(poolName)
@@ -619,7 +620,7 @@ func storagePoolDelete(d *Daemon, r *http.Request) Response {
 	return EmptySyncResponse
 }
 
-func storagePoolDeleteCheckPreconditions(cluster *db.Cluster, poolName string, poolID int64) Response {
+func storagePoolDeleteCheckPreconditions(cluster *db.Cluster, poolName string, poolID int64) daemon.Response {
 	volumeNames, err := cluster.StoragePoolVolumesGetNames(poolID)
 	if err != nil {
 		return InternalError(err)
diff --git a/lxd/storage_pools_config.go b/lxd/storage_pools_config.go
index b44f0659df..a2221101e3 100644
--- a/lxd/storage_pools_config.go
+++ b/lxd/storage_pools_config.go
@@ -7,6 +7,7 @@ import (
 
 	"golang.org/x/sys/unix"
 
+	"github.com/lxc/lxd/lxd/instance"
 	"github.com/lxc/lxd/shared"
 	"github.com/lxc/lxd/shared/units"
 )
@@ -128,7 +129,7 @@ var storagePoolConfigKeys = map[string]func(value string) error{
 
 func storagePoolValidateConfig(name string, driver string, config map[string]string, oldConfig map[string]string) error {
 	err := func(value string) error {
-		return shared.IsOneOf(value, supportedStoragePoolDrivers)
+		return shared.IsOneOf(value, instance.SupportedStoragePoolDrivers)
 	}(driver)
 	if err != nil {
 		return err
diff --git a/lxd/storage_shared.go b/lxd/storage_shared.go
index 11875f06ec..f141b0c596 100644
--- a/lxd/storage_shared.go
+++ b/lxd/storage_shared.go
@@ -1,12 +1,13 @@
 package main
 
 import (
+	"github.com/lxc/lxd/lxd/instance"
 	"github.com/lxc/lxd/lxd/state"
 	"github.com/lxc/lxd/shared/api"
 )
 
 type storageShared struct {
-	sType        storageType
+	sType        instance.StorageType
 	sTypeName    string
 	sTypeVersion string
 
@@ -18,7 +19,7 @@ type storageShared struct {
 	volume *api.StorageVolume
 }
 
-func (s *storageShared) GetStorageType() storageType {
+func (s *storageShared) GetStorageType() instance.StorageType {
 	return s.sType
 }
 
diff --git a/lxd/storage_utils.go b/lxd/storage_utils.go
index fd4ecf11bd..525045ad22 100644
--- a/lxd/storage_utils.go
+++ b/lxd/storage_utils.go
@@ -3,11 +3,12 @@ package main
 import (
 	"fmt"
 
+	"github.com/lxc/lxd/lxd/instance"
 	driver "github.com/lxc/lxd/lxd/storage"
 	"github.com/lxc/lxd/shared/logger"
 )
 
-func shrinkVolumeFilesystem(s storage, volumeType int, fsType string, devPath string, mntpoint string, byteSize int64, data interface{}) (func() (bool, error), error) {
+func shrinkVolumeFilesystem(s instance.Storage, volumeType int, fsType string, devPath string, mntpoint string, byteSize int64, data interface{}) (func() (bool, error), error) {
 	var cleanupFunc func() (bool, error)
 	switch fsType {
 	case "xfs":

From 84511ca05490c6be23b7d73b89dccd2fd2b0ad08 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Wed, 25 Sep 2019 11:41:13 +0100
Subject: [PATCH 31/72] lxd/seccomp: Removes functions that are moved to
 instance package

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/seccomp.go | 310 -------------------------------------------------
 1 file changed, 310 deletions(-)

diff --git a/lxd/seccomp.go b/lxd/seccomp.go
index 05221cab9e..eabd63ee60 100644
--- a/lxd/seccomp.go
+++ b/lxd/seccomp.go
@@ -276,241 +276,6 @@ const LxdSeccompNotifyMknod = C.LXD_SECCOMP_NOTIFY_MKNOD
 const LxdSeccompNotifyMknodat = C.LXD_SECCOMP_NOTIFY_MKNODAT
 const LxdSeccompNotifySetxattr = C.LXD_SECCOMP_NOTIFY_SETXATTR
 
-const SECCOMP_HEADER = `2
-`
-
-const DEFAULT_SECCOMP_POLICY = `reject_force_umount  # comment this to allow umount -f;  not recommended
-[all]
-kexec_load errno 38
-open_by_handle_at errno 38
-init_module errno 38
-finit_module errno 38
-delete_module errno 38
-`
-
-const SECCOMP_NOTIFY_MKNOD = `mknod notify [1,8192,SCMP_CMP_MASKED_EQ,61440]
-mknod notify [1,24576,SCMP_CMP_MASKED_EQ,61440]
-mknodat notify [2,8192,SCMP_CMP_MASKED_EQ,61440]
-mknodat notify [2,24576,SCMP_CMP_MASKED_EQ,61440]
-`
-const SECCOMP_NOTIFY_SETXATTR = `setxattr notify [3,1,SCMP_CMP_EQ]
-`
-
-const COMPAT_BLOCKING_POLICY = `[%s]
-compat_sys_rt_sigaction errno 38
-stub_x32_rt_sigreturn errno 38
-compat_sys_ioctl errno 38
-compat_sys_readv errno 38
-compat_sys_writev errno 38
-compat_sys_recvfrom errno 38
-compat_sys_sendmsg errno 38
-compat_sys_recvmsg errno 38
-stub_x32_execve errno 38
-compat_sys_ptrace errno 38
-compat_sys_rt_sigpending errno 38
-compat_sys_rt_sigtimedwait errno 38
-compat_sys_rt_sigqueueinfo errno 38
-compat_sys_sigaltstack errno 38
-compat_sys_timer_create errno 38
-compat_sys_mq_notify errno 38
-compat_sys_kexec_load errno 38
-compat_sys_waitid errno 38
-compat_sys_set_robust_list errno 38
-compat_sys_get_robust_list errno 38
-compat_sys_vmsplice errno 38
-compat_sys_move_pages errno 38
-compat_sys_preadv64 errno 38
-compat_sys_pwritev64 errno 38
-compat_sys_rt_tgsigqueueinfo errno 38
-compat_sys_recvmmsg errno 38
-compat_sys_sendmmsg errno 38
-compat_sys_process_vm_readv errno 38
-compat_sys_process_vm_writev errno 38
-compat_sys_setsockopt errno 38
-compat_sys_getsockopt errno 38
-compat_sys_io_setup errno 38
-compat_sys_io_submit errno 38
-stub_x32_execveat errno 38
-`
-
-var seccompPath = shared.VarPath("security", "seccomp")
-
-func SeccompProfilePath(c container) string {
-	return path.Join(seccompPath, c.Name())
-}
-
-func seccompContainerNeedsPolicy(c container) bool {
-	config := c.ExpandedConfig()
-
-	// Check for text keys
-	keys := []string{
-		"raw.seccomp",
-		"security.syscalls.whitelist",
-		"security.syscalls.blacklist",
-	}
-
-	for _, k := range keys {
-		_, hasKey := config[k]
-		if hasKey {
-			return true
-		}
-	}
-
-	// Check for boolean keys that default to false
-	keys = []string{
-		"security.syscalls.blacklist_compat",
-		"security.syscalls.intercept.mknod",
-		"security.syscalls.intercept.setxattr",
-	}
-
-	for _, k := range keys {
-		if shared.IsTrue(config[k]) {
-			return true
-		}
-	}
-
-	// Check for boolean keys that default to true
-	keys = []string{
-		"security.syscalls.blacklist_default",
-	}
-
-	for _, k := range keys {
-		value, ok := config[k]
-		if !ok || shared.IsTrue(value) {
-			return true
-		}
-	}
-
-	return false
-}
-
-func seccompContainerNeedsIntercept(c container) (bool, error) {
-	// No need if privileged
-	if c.IsPrivileged() {
-		return false, nil
-	}
-
-	// If nested, assume the host handles it
-	if c.DaemonState().OS.RunningInUserNS {
-		return false, nil
-	}
-
-	config := c.ExpandedConfig()
-
-	keys := []string{
-		"security.syscalls.intercept.mknod",
-		"security.syscalls.intercept.setxattr",
-	}
-
-	needed := false
-	for _, k := range keys {
-		if shared.IsTrue(config[k]) {
-			needed = true
-			break
-		}
-	}
-
-	if needed {
-		if !lxcSupportSeccompNotify(c.DaemonState()) {
-			return needed, fmt.Errorf("System doesn't support syscall interception")
-		}
-	}
-
-	return needed, nil
-}
-
-func seccompGetPolicyContent(c container) (string, error) {
-	config := c.ExpandedConfig()
-
-	// Full policy override
-	raw := config["raw.seccomp"]
-	if raw != "" {
-		return raw, nil
-	}
-
-	// Policy header
-	policy := SECCOMP_HEADER
-	whitelist := config["security.syscalls.whitelist"]
-	if whitelist != "" {
-		policy += "whitelist\n[all]\n"
-		policy += whitelist
-	} else {
-		policy += "blacklist\n"
-
-		default_, ok := config["security.syscalls.blacklist_default"]
-		if !ok || shared.IsTrue(default_) {
-			policy += DEFAULT_SECCOMP_POLICY
-		}
-	}
-
-	// Syscall interception
-	ok, err := seccompContainerNeedsIntercept(c)
-	if err != nil {
-		return "", err
-	}
-
-	if ok {
-		if shared.IsTrue(config["security.syscalls.intercept.mknod"]) {
-			policy += SECCOMP_NOTIFY_MKNOD
-		}
-
-		if shared.IsTrue(config["security.syscalls.intercept.setxattr"]) {
-			policy += SECCOMP_NOTIFY_SETXATTR
-		}
-	}
-
-	if whitelist != "" {
-		return policy, nil
-	}
-
-	// Additional blacklist entries
-	compat := config["security.syscalls.blacklist_compat"]
-	if shared.IsTrue(compat) {
-		arch, err := osarch.ArchitectureName(c.Architecture())
-		if err != nil {
-			return "", err
-		}
-		policy += fmt.Sprintf(COMPAT_BLOCKING_POLICY, arch)
-	}
-
-	blacklist := config["security.syscalls.blacklist"]
-	if blacklist != "" {
-		policy += blacklist
-	}
-
-	return policy, nil
-}
-
-func SeccompCreateProfile(c container) error {
-	/* Unlike apparmor, there is no way to "cache" profiles, and profiles
-	 * are automatically unloaded when a task dies. Thus, we don't need to
-	 * unload them when a container stops, and we don't have to worry about
-	 * the mtime on the file for any compiler purpose, so let's just write
-	 * out the profile.
-	 */
-	if !seccompContainerNeedsPolicy(c) {
-		return nil
-	}
-
-	profile, err := seccompGetPolicyContent(c)
-	if err != nil {
-		return err
-	}
-
-	if err := os.MkdirAll(seccompPath, 0700); err != nil {
-		return err
-	}
-
-	return ioutil.WriteFile(SeccompProfilePath(c), []byte(profile), 0600)
-}
-
-func SeccompDeleteProfile(c container) {
-	/* similar to AppArmor, if we've never started this container, the
-	 * delete can fail and that's ok.
-	 */
-	os.Remove(SeccompProfilePath(c))
-}
-
 type SeccompServer struct {
 	d    *Daemon
 	path string
@@ -726,81 +491,6 @@ func NewSeccompServer(d *Daemon, path string) (*SeccompServer, error) {
 	return &s, nil
 }
 
-func taskIds(pid int) (error, int64, int64, int64, int64) {
-	status, err := ioutil.ReadFile(fmt.Sprintf("/proc/%d/status", pid))
-	if err != nil {
-		return err, -1, -1, -1, -1
-	}
-
-	reUid := regexp.MustCompile("Uid:\\s*([0-9]*)\\s*([0-9]*)\\s*([0-9]*)\\s*([0-9]*)")
-	reGid := regexp.MustCompile("Gid:\\s*([0-9]*)\\s*([0-9]*)\\s*([0-9]*)\\s*([0-9]*)")
-	var gid int64 = -1
-	var uid int64 = -1
-	var fsgid int64 = -1
-	var fsuid int64 = -1
-	uidFound := false
-	gidFound := false
-	for _, line := range strings.Split(string(status), "\n") {
-		if uidFound && gidFound {
-			break
-		}
-
-		if !uidFound {
-			m := reUid.FindStringSubmatch(line)
-			if m != nil && len(m) > 2 {
-				// effective uid
-				result, err := strconv.ParseInt(m[2], 10, 64)
-				if err != nil {
-					return err, -1, -1, -1, -1
-				}
-
-				uid = result
-				uidFound = true
-			}
-
-			if m != nil && len(m) > 4 {
-				// fsuid
-				result, err := strconv.ParseInt(m[4], 10, 64)
-				if err != nil {
-					return err, -1, -1, -1, -1
-				}
-
-				fsuid = result
-			}
-
-			continue
-		}
-
-		if !gidFound {
-			m := reGid.FindStringSubmatch(line)
-			if m != nil && len(m) > 2 {
-				// effective gid
-				result, err := strconv.ParseInt(m[2], 10, 64)
-				if err != nil {
-					return err, -1, -1, -1, -1
-				}
-
-				gid = result
-				gidFound = true
-			}
-
-			if m != nil && len(m) > 4 {
-				// fsgid
-				result, err := strconv.ParseInt(m[4], 10, 64)
-				if err != nil {
-					return err, -1, -1, -1, -1
-				}
-
-				fsgid = result
-			}
-
-			continue
-		}
-	}
-
-	return nil, uid, gid, fsuid, fsgid
-}
-
 func CallForkmknod(c container, dev config.Device, requestPID int) int {
 	rootLink := fmt.Sprintf("/proc/%d/root", requestPID)
 	rootPath, err := os.Readlink(rootLink)

From eca7355242ceb3a4462855b6a65f4042d1ee6c64 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Wed, 25 Sep 2019 11:41:32 +0100
Subject: [PATCH 32/72] lxc/seccomp: Updates used of moved types

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/seccomp.go | 8 +++-----
 1 file changed, 3 insertions(+), 5 deletions(-)

diff --git a/lxd/seccomp.go b/lxd/seccomp.go
index eabd63ee60..86e719871c 100644
--- a/lxd/seccomp.go
+++ b/lxd/seccomp.go
@@ -6,12 +6,10 @@ package main
 import (
 	"fmt"
 	"io"
-	"io/ioutil"
 	"net"
 	"os"
 	"path"
 	"path/filepath"
-	"regexp"
 	"strconv"
 	"strings"
 	"unsafe"
@@ -19,12 +17,12 @@ import (
 	"golang.org/x/sys/unix"
 
 	"github.com/lxc/lxd/lxd/device/config"
+	"github.com/lxc/lxd/lxd/instance"
 	"github.com/lxc/lxd/lxd/util"
 	"github.com/lxc/lxd/shared"
 	log "github.com/lxc/lxd/shared/log15"
 	"github.com/lxc/lxd/shared/logger"
 	"github.com/lxc/lxd/shared/netutils"
-	"github.com/lxc/lxd/shared/osarch"
 )
 
 /*
@@ -498,7 +496,7 @@ func CallForkmknod(c container, dev config.Device, requestPID int) int {
 		return int(-C.EPERM)
 	}
 
-	err, uid, gid, fsuid, fsgid := taskIds(requestPID)
+	err, uid, gid, fsuid, fsgid := instance.TaskIDs(requestPID)
 	if err != nil {
 		return int(-C.EPERM)
 	}
@@ -671,7 +669,7 @@ func (s *SeccompServer) HandleSetxattrSyscall(c container, siov *SeccompIovec) i
 	args := SetxattrArgs{}
 
 	args.pid = int(siov.req.pid)
-	err, uid, gid, fsuid, fsgid := taskIds(args.pid)
+	err, uid, gid, fsuid, fsgid := instance.TaskIDs(args.pid)
 	if err != nil {
 		return int(-C.EPERM)
 	}

From f63b9266147ba9ec6903ad18e154e60d86ee49a9 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Wed, 25 Sep 2019 11:41:47 +0100
Subject: [PATCH 33/72] lxd/rsync: Updates use of moved types

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/rsync.go | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/lxd/rsync.go b/lxd/rsync.go
index 5bfa3c1423..3f1ebe64ff 100644
--- a/lxd/rsync.go
+++ b/lxd/rsync.go
@@ -13,6 +13,7 @@ import (
 	"github.com/gorilla/websocket"
 	"github.com/pborman/uuid"
 
+	"github.com/lxc/lxd/lxd/daemon"
 	"github.com/lxc/lxd/shared"
 	"github.com/lxc/lxd/shared/logger"
 )
@@ -25,7 +26,7 @@ func rsyncLocalCopy(source string, dest string, bwlimit string, xattrs bool) (st
 	}
 
 	rsyncVerbosity := "-q"
-	if debug {
+	if daemon.Debug {
 		rsyncVerbosity = "-vi"
 	}
 

From 2d9e0e309643301cdf1af0c0d2c1d89808473c76 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Wed, 25 Sep 2019 11:42:27 +0100
Subject: [PATCH 34/72] lxd/response: Links SmartError function to instance
 package

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/response.go | 5 ++---
 1 file changed, 2 insertions(+), 3 deletions(-)

diff --git a/lxd/response.go b/lxd/response.go
index 7bca1b4e17..c2d8a31ca2 100644
--- a/lxd/response.go
+++ b/lxd/response.go
@@ -25,9 +25,8 @@ import (
 	"github.com/lxc/lxd/shared/version"
 )
 
-type Response interface {
-	Render(w http.ResponseWriter) error
-	String() string
+func init() {
+	operation.SmartError = SmartError
 }
 
 // Sync response

From a141cd633c781c87ee10a942adff0406618cbdeb Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Wed, 25 Sep 2019 11:42:44 +0100
Subject: [PATCH 35/72] lxd/response: Updates use of moved types

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/response.go | 58 +++++++++++++++++++++++++------------------------
 1 file changed, 30 insertions(+), 28 deletions(-)

diff --git a/lxd/response.go b/lxd/response.go
index c2d8a31ca2..a138bfa2fb 100644
--- a/lxd/response.go
+++ b/lxd/response.go
@@ -17,8 +17,10 @@ import (
 
 	lxd "github.com/lxc/lxd/client"
 	"github.com/lxc/lxd/lxd/cluster"
+	"github.com/lxc/lxd/lxd/daemon"
 	"github.com/lxc/lxd/lxd/db"
-	"github.com/lxc/lxd/lxd/instance"
+	"github.com/lxc/lxd/lxd/instance/instancetype"
+	"github.com/lxc/lxd/lxd/operation"
 	"github.com/lxc/lxd/lxd/util"
 	"github.com/lxc/lxd/shared"
 	"github.com/lxc/lxd/shared/api"
@@ -76,7 +78,7 @@ func (r *syncResponse) Render(w http.ResponseWriter) error {
 		Metadata:   r.metadata,
 	}
 
-	return util.WriteJSON(w, resp, debug)
+	return util.WriteJSON(w, resp, daemon.Debug)
 }
 
 func (r *syncResponse) String() string {
@@ -87,23 +89,23 @@ func (r *syncResponse) String() string {
 	return "failure"
 }
 
-func SyncResponse(success bool, metadata interface{}) Response {
+func SyncResponse(success bool, metadata interface{}) daemon.Response {
 	return &syncResponse{success: success, metadata: metadata}
 }
 
-func SyncResponseETag(success bool, metadata interface{}, etag interface{}) Response {
+func SyncResponseETag(success bool, metadata interface{}, etag interface{}) daemon.Response {
 	return &syncResponse{success: success, metadata: metadata, etag: etag}
 }
 
-func SyncResponseLocation(success bool, metadata interface{}, location string) Response {
+func SyncResponseLocation(success bool, metadata interface{}, location string) daemon.Response {
 	return &syncResponse{success: success, metadata: metadata, location: location}
 }
 
-func SyncResponseRedirect(address string) Response {
+func SyncResponseRedirect(address string) daemon.Response {
 	return &syncResponse{success: true, location: address, code: http.StatusPermanentRedirect}
 }
 
-func SyncResponseHeaders(success bool, metadata interface{}, headers map[string]string) Response {
+func SyncResponseHeaders(success bool, metadata interface{}, headers map[string]string) daemon.Response {
 	return &syncResponse{success: success, metadata: metadata, headers: headers}
 }
 
@@ -153,7 +155,7 @@ func (r *forwardedResponse) String() string {
 
 // ForwardedResponse takes a request directed to a node and forwards it to
 // another node, writing back the response it gegs.
-func ForwardedResponse(client lxd.InstanceServer, request *http.Request) Response {
+func ForwardedResponse(client lxd.InstanceServer, request *http.Request) daemon.Response {
 	return &forwardedResponse{
 		client:  client,
 		request: request,
@@ -162,7 +164,7 @@ func ForwardedResponse(client lxd.InstanceServer, request *http.Request) Respons
 
 // ForwardedResponseIfTargetIsRemote redirects a request to the request has a
 // targetNode parameter pointing to a node which is not the local one.
-func ForwardedResponseIfTargetIsRemote(d *Daemon, request *http.Request) Response {
+func ForwardedResponseIfTargetIsRemote(d *Daemon, request *http.Request) daemon.Response {
 	targetNode := queryParam(request, "target")
 	if targetNode == "" {
 		return nil
@@ -191,7 +193,7 @@ func ForwardedResponseIfTargetIsRemote(d *Daemon, request *http.Request) Respons
 // ForwardedResponseIfContainerIsRemote redirects a request to the node running
 // the container with the given name. If the container is local, nothing gets
 // done and nil is returned.
-func ForwardedResponseIfContainerIsRemote(d *Daemon, r *http.Request, project, name string, instanceType instance.Type) (Response, error) {
+func ForwardedResponseIfContainerIsRemote(d *Daemon, r *http.Request, project, name string, instanceType instancetype.Type) (daemon.Response, error) {
 	cert := d.endpoints.NetworkCert()
 	client, err := cluster.ConnectIfContainerIsRemote(d.cluster, project, name, cert, instanceType)
 	if err != nil {
@@ -210,7 +212,7 @@ func ForwardedResponseIfContainerIsRemote(d *Daemon, r *http.Request, project, n
 //
 // This is used when no targetNode is specified, and saves users some typing
 // when the volume name/type is unique to a node.
-func ForwardedResponseIfVolumeIsRemote(d *Daemon, r *http.Request, poolID int64, volumeName string, volumeType int) Response {
+func ForwardedResponseIfVolumeIsRemote(d *Daemon, r *http.Request, poolID int64, volumeName string, volumeType int) daemon.Response {
 	if queryParam(r, "target") != "" {
 		return nil
 	}
@@ -335,13 +337,13 @@ func (r *fileResponse) String() string {
 	return fmt.Sprintf("%d files", len(r.files))
 }
 
-func FileResponse(r *http.Request, files []fileResponseEntry, headers map[string]string, removeAfterServe bool) Response {
+func FileResponse(r *http.Request, files []fileResponseEntry, headers map[string]string, removeAfterServe bool) daemon.Response {
 	return &fileResponse{r, files, headers, removeAfterServe}
 }
 
 // Operation response
 type operationResponse struct {
-	op *operation
+	op *operation.Operation
 }
 
 func (r *operationResponse) Render(w http.ResponseWriter) error {
@@ -366,7 +368,7 @@ func (r *operationResponse) Render(w http.ResponseWriter) error {
 	w.Header().Set("Location", url)
 	w.WriteHeader(202)
 
-	return util.WriteJSON(w, body, debug)
+	return util.WriteJSON(w, body, daemon.Debug)
 }
 
 func (r *operationResponse) String() string {
@@ -378,7 +380,7 @@ func (r *operationResponse) String() string {
 	return md.ID
 }
 
-func OperationResponse(op *operation) Response {
+func OperationResponse(op *operation.Operation) daemon.Response {
 	return &operationResponse{op}
 }
 
@@ -407,7 +409,7 @@ func (r *forwardedOperationResponse) Render(w http.ResponseWriter) error {
 	w.Header().Set("Location", url)
 	w.WriteHeader(202)
 
-	return util.WriteJSON(w, body, debug)
+	return util.WriteJSON(w, body, daemon.Debug)
 }
 
 func (r *forwardedOperationResponse) String() string {
@@ -416,7 +418,7 @@ func (r *forwardedOperationResponse) String() string {
 
 // ForwardedOperationResponse creates a response that forwards the metadata of
 // an operation created on another node.
-func ForwardedOperationResponse(project string, op *api.Operation) Response {
+func ForwardedOperationResponse(project string, op *api.Operation) daemon.Response {
 	return &forwardedOperationResponse{
 		op:      op,
 		project: project,
@@ -439,7 +441,7 @@ func (r *errorResponse) Render(w http.ResponseWriter) error {
 	buf := &bytes.Buffer{}
 	output = buf
 	var captured *bytes.Buffer
-	if debug {
+	if daemon.Debug {
 		captured = &bytes.Buffer{}
 		output = io.MultiWriter(buf, captured)
 	}
@@ -450,7 +452,7 @@ func (r *errorResponse) Render(w http.ResponseWriter) error {
 		return err
 	}
 
-	if debug {
+	if daemon.Debug {
 		shared.DebugJson(captured)
 	}
 
@@ -462,7 +464,7 @@ func (r *errorResponse) Render(w http.ResponseWriter) error {
 	return nil
 }
 
-func NotImplemented(err error) Response {
+func NotImplemented(err error) daemon.Response {
 	message := "not implemented"
 	if err != nil {
 		message = err.Error()
@@ -470,7 +472,7 @@ func NotImplemented(err error) Response {
 	return &errorResponse{http.StatusNotImplemented, message}
 }
 
-func NotFound(err error) Response {
+func NotFound(err error) daemon.Response {
 	message := "not found"
 	if err != nil {
 		message = err.Error()
@@ -478,7 +480,7 @@ func NotFound(err error) Response {
 	return &errorResponse{http.StatusNotFound, message}
 }
 
-func Forbidden(err error) Response {
+func Forbidden(err error) daemon.Response {
 	message := "not authorized"
 	if err != nil {
 		message = err.Error()
@@ -486,7 +488,7 @@ func Forbidden(err error) Response {
 	return &errorResponse{http.StatusForbidden, message}
 }
 
-func Conflict(err error) Response {
+func Conflict(err error) daemon.Response {
 	message := "already exists"
 	if err != nil {
 		message = err.Error()
@@ -494,7 +496,7 @@ func Conflict(err error) Response {
 	return &errorResponse{http.StatusConflict, message}
 }
 
-func Unavailable(err error) Response {
+func Unavailable(err error) daemon.Response {
 	message := "unavailable"
 	if err != nil {
 		message = err.Error()
@@ -502,22 +504,22 @@ func Unavailable(err error) Response {
 	return &errorResponse{http.StatusServiceUnavailable, message}
 }
 
-func BadRequest(err error) Response {
+func BadRequest(err error) daemon.Response {
 	return &errorResponse{http.StatusBadRequest, err.Error()}
 }
 
-func InternalError(err error) Response {
+func InternalError(err error) daemon.Response {
 	return &errorResponse{http.StatusInternalServerError, err.Error()}
 }
 
-func PreconditionFailed(err error) Response {
+func PreconditionFailed(err error) daemon.Response {
 	return &errorResponse{http.StatusPreconditionFailed, err.Error()}
 }
 
 /*
  * SmartError returns the right error message based on err.
  */
-func SmartError(err error) Response {
+func SmartError(err error) daemon.Response {
 	if err == nil {
 		return EmptySyncResponse
 	}

From d030373a48331d022d20b6e9941c93a330af4aed Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Wed, 25 Sep 2019 11:43:11 +0100
Subject: [PATCH 36/72] lxd/resources: Updates use of moved types

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/resources.go | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)

diff --git a/lxd/resources.go b/lxd/resources.go
index fba15ac86f..fd4aa6c3cb 100644
--- a/lxd/resources.go
+++ b/lxd/resources.go
@@ -5,6 +5,7 @@ import (
 
 	"github.com/gorilla/mux"
 
+	"github.com/lxc/lxd/lxd/daemon"
 	"github.com/lxc/lxd/lxd/resources"
 )
 
@@ -22,7 +23,7 @@ var storagePoolResourcesCmd = APIEndpoint{
 
 // /1.0/resources
 // Get system resources
-func api10ResourcesGet(d *Daemon, r *http.Request) Response {
+func api10ResourcesGet(d *Daemon, r *http.Request) daemon.Response {
 	// If a target was specified, forward the request to the relevant node.
 	response := ForwardedResponseIfTargetIsRemote(d, r)
 	if response != nil {
@@ -40,7 +41,7 @@ func api10ResourcesGet(d *Daemon, r *http.Request) Response {
 
 // /1.0/storage-pools/{name}/resources
 // Get resources for a specific storage pool
-func storagePoolResourcesGet(d *Daemon, r *http.Request) Response {
+func storagePoolResourcesGet(d *Daemon, r *http.Request) daemon.Response {
 	// If a target was specified, forward the request to the relevant node.
 	response := ForwardedResponseIfTargetIsRemote(d, r)
 	if response != nil {

From 35fe5223f56f26af2ab8560f86199c316c9d8866 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Wed, 25 Sep 2019 11:43:39 +0100
Subject: [PATCH 37/72] lxd/profiles/utils: Updates use of moved types

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/profiles_utils.go | 11 ++++++-----
 1 file changed, 6 insertions(+), 5 deletions(-)

diff --git a/lxd/profiles_utils.go b/lxd/profiles_utils.go
index de2db4349b..8f76cdb29e 100644
--- a/lxd/profiles_utils.go
+++ b/lxd/profiles_utils.go
@@ -7,6 +7,7 @@ import (
 	"github.com/lxc/lxd/lxd/db"
 	"github.com/lxc/lxd/lxd/db/query"
 	deviceConfig "github.com/lxc/lxd/lxd/device/config"
+	"github.com/lxc/lxd/lxd/instance"
 	"github.com/lxc/lxd/shared"
 	"github.com/lxc/lxd/shared/api"
 	"github.com/pkg/errors"
@@ -14,13 +15,13 @@ import (
 
 func doProfileUpdate(d *Daemon, project, name string, id int64, profile *api.Profile, req api.ProfilePut) error {
 	// Sanity checks
-	err := containerValidConfig(d.os, req.Config, true, false)
+	err := instance.ContainerValidConfig(d.os, req.Config, true, false)
 	if err != nil {
 		return err
 	}
 
 	// Validate container devices with an empty instanceName to indicate profile validation.
-	err = containerValidDevices(d.State(), d.cluster, "", deviceConfig.NewDevices(req.Devices), false)
+	err = instance.ContainerValidDevices(d.State(), d.cluster, "", deviceConfig.NewDevices(req.Devices), false)
 	if err != nil {
 		return err
 	}
@@ -206,10 +207,10 @@ func doProfileUpdateContainer(d *Daemon, name string, old api.ProfilePut, nodeNa
 		}
 	}
 
-	c := containerLXCInstantiate(d.State(), args)
+	c := instance.ContainerLXCInstantiate(d.State(), args)
 
-	c.expandConfig(profiles)
-	c.expandDevices(profiles)
+	c.ExpandConfig(profiles)
+	c.ExpandDevices(profiles)
 
 	return c.Update(db.ContainerArgs{
 		Architecture: c.Architecture(),

From f6ee330eb3721420415968fa2d47848d6bad2d4b Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Wed, 25 Sep 2019 11:43:54 +0100
Subject: [PATCH 38/72] lxd/profiles: Updates use of moved types

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/profiles.go | 20 +++++++++++---------
 1 file changed, 11 insertions(+), 9 deletions(-)

diff --git a/lxd/profiles.go b/lxd/profiles.go
index 6c6e7c6c17..34004332a5 100644
--- a/lxd/profiles.go
+++ b/lxd/profiles.go
@@ -13,8 +13,10 @@ import (
 
 	lxd "github.com/lxc/lxd/client"
 	"github.com/lxc/lxd/lxd/cluster"
+	"github.com/lxc/lxd/lxd/daemon"
 	"github.com/lxc/lxd/lxd/db"
 	deviceConfig "github.com/lxc/lxd/lxd/device/config"
+	"github.com/lxc/lxd/lxd/instance"
 	"github.com/lxc/lxd/lxd/util"
 	"github.com/lxc/lxd/shared"
 	"github.com/lxc/lxd/shared/api"
@@ -39,7 +41,7 @@ var profileCmd = APIEndpoint{
 }
 
 /* This is used for both profiles post and profile put */
-func profilesGet(d *Daemon, r *http.Request) Response {
+func profilesGet(d *Daemon, r *http.Request) daemon.Response {
 	project := projectParam(r)
 
 	recursion := util.IsRecursionRequest(r)
@@ -81,7 +83,7 @@ func profilesGet(d *Daemon, r *http.Request) Response {
 	return SyncResponse(true, result)
 }
 
-func profilesPost(d *Daemon, r *http.Request) Response {
+func profilesPost(d *Daemon, r *http.Request) daemon.Response {
 	project := projectParam(r)
 	req := api.ProfilesPost{}
 	if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
@@ -101,13 +103,13 @@ func profilesPost(d *Daemon, r *http.Request) Response {
 		return BadRequest(fmt.Errorf("Invalid profile name '%s'", req.Name))
 	}
 
-	err := containerValidConfig(d.os, req.Config, true, false)
+	err := instance.ContainerValidConfig(d.os, req.Config, true, false)
 	if err != nil {
 		return BadRequest(err)
 	}
 
 	// Validate container devices with an empty instanceName to indicate profile validation.
-	err = containerValidDevices(d.State(), d.cluster, "", deviceConfig.NewDevices(req.Devices), false)
+	err = instance.ContainerValidDevices(d.State(), d.cluster, "", deviceConfig.NewDevices(req.Devices), false)
 	if err != nil {
 		return BadRequest(err)
 	}
@@ -146,7 +148,7 @@ func profilesPost(d *Daemon, r *http.Request) Response {
 	return SyncResponseLocation(true, nil, fmt.Sprintf("/%s/profiles/%s", version.APIVersion, req.Name))
 }
 
-func profileGet(d *Daemon, r *http.Request) Response {
+func profileGet(d *Daemon, r *http.Request) daemon.Response {
 	project := projectParam(r)
 	name := mux.Vars(r)["name"]
 
@@ -188,7 +190,7 @@ func profileGet(d *Daemon, r *http.Request) Response {
 	return SyncResponseETag(true, resp, etag)
 }
 
-func profilePut(d *Daemon, r *http.Request) Response {
+func profilePut(d *Daemon, r *http.Request) daemon.Response {
 	// Get the project
 	project := projectParam(r)
 
@@ -268,7 +270,7 @@ func profilePut(d *Daemon, r *http.Request) Response {
 	return SmartError(err)
 }
 
-func profilePatch(d *Daemon, r *http.Request) Response {
+func profilePatch(d *Daemon, r *http.Request) daemon.Response {
 	// Get the project
 	project := projectParam(r)
 
@@ -361,7 +363,7 @@ func profilePatch(d *Daemon, r *http.Request) Response {
 }
 
 // The handler for the post operation.
-func profilePost(d *Daemon, r *http.Request) Response {
+func profilePost(d *Daemon, r *http.Request) daemon.Response {
 	project := projectParam(r)
 	name := mux.Vars(r)["name"]
 
@@ -413,7 +415,7 @@ func profilePost(d *Daemon, r *http.Request) Response {
 }
 
 // The handler for the delete operation.
-func profileDelete(d *Daemon, r *http.Request) Response {
+func profileDelete(d *Daemon, r *http.Request) daemon.Response {
 	project := projectParam(r)
 	name := mux.Vars(r)["name"]
 

From 3694497ce222ec826502318899607975568d1ad0 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Wed, 25 Sep 2019 11:44:29 +0100
Subject: [PATCH 39/72] lxd/patches: Updates use of moved types

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/patches.go | 49 +++++++++++++++++++++++++------------------------
 1 file changed, 25 insertions(+), 24 deletions(-)

diff --git a/lxd/patches.go b/lxd/patches.go
index e5253902b4..09cd144e2c 100644
--- a/lxd/patches.go
+++ b/lxd/patches.go
@@ -17,6 +17,7 @@ import (
 	"github.com/lxc/lxd/lxd/db"
 	"github.com/lxc/lxd/lxd/db/query"
 	deviceConfig "github.com/lxc/lxd/lxd/device/config"
+	"github.com/lxc/lxd/lxd/instance"
 	driver "github.com/lxc/lxd/lxd/storage"
 	"github.com/lxc/lxd/shared"
 	log "github.com/lxc/lxd/shared/log15"
@@ -135,12 +136,12 @@ func patchRenameCustomVolumeLVs(name string, d *Daemon) error {
 			return err
 		}
 
-		sType, err := storageStringToType(pool.Driver)
+		sType, err := instance.StorageStringToType(pool.Driver)
 		if err != nil {
 			return err
 		}
 
-		if sType != storageTypeLvm {
+		if sType != instance.StorageTypeLvm {
 			continue
 		}
 
@@ -258,21 +259,21 @@ func patchStorageApi(name string, d *Daemon) error {
 	lvmVgName := daemonConfig["storage.lvm_vg_name"]
 	zfsPoolName := daemonConfig["storage.zfs_pool_name"]
 	defaultPoolName := "default"
-	preStorageApiStorageType := storageTypeDir
+	preStorageApiStorageType := instance.StorageTypeDir
 
 	if lvmVgName != "" {
-		preStorageApiStorageType = storageTypeLvm
+		preStorageApiStorageType = instance.StorageTypeLvm
 		defaultPoolName = lvmVgName
 	} else if zfsPoolName != "" {
-		preStorageApiStorageType = storageTypeZfs
+		preStorageApiStorageType = instance.StorageTypeZfs
 		defaultPoolName = zfsPoolName
 	} else if d.os.BackingFS == "btrfs" {
-		preStorageApiStorageType = storageTypeBtrfs
+		preStorageApiStorageType = instance.StorageTypeBtrfs
 	} else {
 		// Dir storage pool.
 	}
 
-	defaultStorageTypeName, err := storageTypeToString(preStorageApiStorageType)
+	defaultStorageTypeName, err := instance.StorageTypeToString(preStorageApiStorageType)
 	if err != nil {
 		return err
 	}
@@ -321,13 +322,13 @@ func patchStorageApi(name string, d *Daemon) error {
 	// If any of these are actually called, there's no way back.
 	poolName := defaultPoolName
 	switch preStorageApiStorageType {
-	case storageTypeBtrfs:
+	case instance.StorageTypeBtrfs:
 		err = upgradeFromStorageTypeBtrfs(name, d, defaultPoolName, defaultStorageTypeName, cRegular, cSnapshots, imgPublic, imgPrivate)
-	case storageTypeDir:
+	case instance.StorageTypeDir:
 		err = upgradeFromStorageTypeDir(name, d, defaultPoolName, defaultStorageTypeName, cRegular, cSnapshots, imgPublic, imgPrivate)
-	case storageTypeLvm:
+	case instance.StorageTypeLvm:
 		err = upgradeFromStorageTypeLvm(name, d, defaultPoolName, defaultStorageTypeName, cRegular, cSnapshots, imgPublic, imgPrivate)
-	case storageTypeZfs:
+	case instance.StorageTypeZfs:
 		// The user is using a zfs dataset. This case needs to be
 		// handled with care:
 
@@ -1150,7 +1151,7 @@ func upgradeFromStorageTypeLvm(name string, d *Daemon, defaultPoolName string, d
 				}
 
 				// Load the container from the database.
-				ctStruct, err := instanceLoadByProjectAndName(d.State(), "default", ct)
+				ctStruct, err := instance.InstanceLoadByProjectAndName(d.State(), "default", ct)
 				if err != nil {
 					logger.Errorf("Failed to load LVM container %s: %s", ct, err)
 					return err
@@ -1303,7 +1304,7 @@ func upgradeFromStorageTypeLvm(name string, d *Daemon, defaultPoolName string, d
 					}
 
 					// Load the snapshot from the database.
-					csStruct, err := instanceLoadByProjectAndName(d.State(), "default", cs)
+					csStruct, err := instance.InstanceLoadByProjectAndName(d.State(), "default", cs)
 					if err != nil {
 						logger.Errorf("Failed to load LVM container %s: %s", cs, err)
 						return err
@@ -1878,7 +1879,7 @@ func updatePoolPropertyForAllObjects(d *Daemon, poolName string, allcontainers [
 
 	// Make sure all containers and snapshots have a valid disk configuration
 	for _, ct := range allcontainers {
-		c, err := instanceLoadByProjectAndName(d.State(), "default", ct)
+		c, err := instance.InstanceLoadByProjectAndName(d.State(), "default", ct)
 		if err != nil {
 			continue
 		}
@@ -1981,7 +1982,7 @@ func patchContainerConfigRegen(name string, d *Daemon) error {
 
 	for _, ct := range cts {
 		// Load the container from the database.
-		c, err := instanceLoadByProjectAndName(d.State(), "default", ct)
+		c, err := instance.InstanceLoadByProjectAndName(d.State(), "default", ct)
 		if err != nil {
 			logger.Errorf("Failed to open container '%s': %v", ct, err)
 			continue
@@ -1991,12 +1992,12 @@ func patchContainerConfigRegen(name string, d *Daemon) error {
 			continue
 		}
 
-		lxcCt, ok := c.(*containerLXC)
+		lxcCt, ok := c.(*instance.ContainerLXC)
 		if !ok {
 			continue
 		}
 
-		err = lxcCt.initLXC(true)
+		err = lxcCt.InitLXC(true)
 		if err != nil {
 			logger.Errorf("Failed to generate LXC config for '%s': %v", ct, err)
 			continue
@@ -2004,7 +2005,7 @@ func patchContainerConfigRegen(name string, d *Daemon) error {
 
 		// Generate the LXC config
 		configPath := filepath.Join(lxcCt.LogPath(), "lxc.conf")
-		err = lxcCt.c.SaveConfigFile(configPath)
+		err = lxcCt.SaveLXCConfigFile(configPath)
 		if err != nil {
 			os.Remove(configPath)
 			logger.Errorf("Failed to save LXC config for '%s': %v", ct, err)
@@ -2760,7 +2761,7 @@ func patchDevicesNewNamingScheme(name string, d *Daemon) error {
 		}
 
 		// Load the container from the database.
-		c, err := instanceLoadByProjectAndName(d.State(), "default", ct)
+		c, err := instance.InstanceLoadByProjectAndName(d.State(), "default", ct)
 		if err != nil {
 			logger.Errorf("Failed to load container %s: %s", ct, err)
 			return err
@@ -2982,7 +2983,7 @@ func patchStorageApiPermissions(name string, d *Daemon) error {
 
 	for _, ct := range cRegular {
 		// load the container from the database
-		ctStruct, err := instanceLoadByProjectAndName(d.State(), "default", ct)
+		ctStruct, err := instance.InstanceLoadByProjectAndName(d.State(), "default", ct)
 		if err != nil {
 			return err
 		}
@@ -3234,17 +3235,17 @@ func patchStorageApiRenameContainerSnapshotsDir(name string, d *Daemon) error {
 				// Disable the read-only properties
 				if hasBtrfs {
 					path := snapshotsDir.Name()
-					subvols, _ := btrfsSubVolumesGet(path)
+					subvols, _ := driver.BTRFSSubVolumesGet(path)
 					for _, subvol := range subvols {
 						subvol = filepath.Join(path, subvol)
 						newSubvol := filepath.Join(shared.VarPath("storage-pools", poolName, "containers-snapshots", entry), subvol)
 
-						if !btrfsSubVolumeIsRo(subvol) {
+						if !driver.BTRFSSubVolumeIsRo(subvol) {
 							continue
 						}
 
-						btrfsSubVolumeMakeRw(subvol)
-						defer btrfsSubVolumeMakeRo(newSubvol)
+						driver.BTRFSSubVolumeMakeRw(subvol)
+						defer driver.BTRFSSubVolumeMakeRo(newSubvol)
 					}
 				}
 

From 455971838693a5b95af1f47b45feef9d660037f5 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Wed, 25 Sep 2019 11:45:08 +0100
Subject: [PATCH 40/72] lxd/operations: Removes functions and types moved to
 operation package

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/operations.go | 441 ----------------------------------------------
 1 file changed, 441 deletions(-)

diff --git a/lxd/operations.go b/lxd/operations.go
index d0908e31e3..ffd7bab4e1 100644
--- a/lxd/operations.go
+++ b/lxd/operations.go
@@ -48,447 +48,6 @@ var operationWebsocket = APIEndpoint{
 	Get: APIEndpointAction{Handler: operationWebsocketGet, AllowUntrusted: true},
 }
 
-var operationsLock sync.Mutex
-var operations map[string]*operation = make(map[string]*operation)
-
-type operationClass int
-
-const (
-	operationClassTask      operationClass = 1
-	operationClassWebsocket operationClass = 2
-	operationClassToken     operationClass = 3
-)
-
-func (t operationClass) String() string {
-	return map[operationClass]string{
-		operationClassTask:      "task",
-		operationClassWebsocket: "websocket",
-		operationClassToken:     "token",
-	}[t]
-}
-
-type operation struct {
-	project     string
-	id          string
-	class       operationClass
-	createdAt   time.Time
-	updatedAt   time.Time
-	status      api.StatusCode
-	url         string
-	resources   map[string][]string
-	metadata    map[string]interface{}
-	err         string
-	readonly    bool
-	canceler    *cancel.Canceler
-	description string
-	permission  string
-
-	// Those functions are called at various points in the operation lifecycle
-	onRun     func(*operation) error
-	onCancel  func(*operation) error
-	onConnect func(*operation, *http.Request, http.ResponseWriter) error
-
-	// Channels used for error reporting and state tracking of background actions
-	chanDone chan error
-
-	// Locking for concurent access to the operation
-	lock sync.Mutex
-
-	cluster *db.Cluster
-}
-
-func (op *operation) done() {
-	if op.readonly {
-		return
-	}
-
-	op.lock.Lock()
-	op.readonly = true
-	op.onRun = nil
-	op.onCancel = nil
-	op.onConnect = nil
-	close(op.chanDone)
-	op.lock.Unlock()
-
-	time.AfterFunc(time.Second*5, func() {
-		operationsLock.Lock()
-		_, ok := operations[op.id]
-		if !ok {
-			operationsLock.Unlock()
-			return
-		}
-
-		delete(operations, op.id)
-		operationsLock.Unlock()
-
-		err := op.cluster.Transaction(func(tx *db.ClusterTx) error {
-			return tx.OperationRemove(op.id)
-		})
-		if err != nil {
-			logger.Warnf("Failed to delete operation %s: %s", op.id, err)
-		}
-	})
-}
-
-func (op *operation) Run() (chan error, error) {
-	if op.status != api.Pending {
-		return nil, fmt.Errorf("Only pending operations can be started")
-	}
-
-	chanRun := make(chan error, 1)
-
-	op.lock.Lock()
-	op.status = api.Running
-
-	if op.onRun != nil {
-		go func(op *operation, chanRun chan error) {
-			err := op.onRun(op)
-			if err != nil {
-				op.lock.Lock()
-				op.status = api.Failure
-				op.err = SmartError(err).String()
-				op.lock.Unlock()
-				op.done()
-				chanRun <- err
-
-				logger.Debugf("Failure for %s operation: %s: %s", op.class.String(), op.id, err)
-
-				_, md, _ := op.Render()
-				eventSend(op.project, "operation", md)
-				return
-			}
-
-			op.lock.Lock()
-			op.status = api.Success
-			op.lock.Unlock()
-			op.done()
-			chanRun <- nil
-
-			op.lock.Lock()
-			logger.Debugf("Success for %s operation: %s", op.class.String(), op.id)
-			_, md, _ := op.Render()
-			eventSend(op.project, "operation", md)
-			op.lock.Unlock()
-		}(op, chanRun)
-	}
-	op.lock.Unlock()
-
-	logger.Debugf("Started %s operation: %s", op.class.String(), op.id)
-	_, md, _ := op.Render()
-	eventSend(op.project, "operation", md)
-
-	return chanRun, nil
-}
-
-func (op *operation) Cancel() (chan error, error) {
-	if op.status != api.Running {
-		return nil, fmt.Errorf("Only running operations can be cancelled")
-	}
-
-	if !op.mayCancel() {
-		return nil, fmt.Errorf("This operation can't be cancelled")
-	}
-
-	chanCancel := make(chan error, 1)
-
-	op.lock.Lock()
-	oldStatus := op.status
-	op.status = api.Cancelling
-	op.lock.Unlock()
-
-	if op.onCancel != nil {
-		go func(op *operation, oldStatus api.StatusCode, chanCancel chan error) {
-			err := op.onCancel(op)
-			if err != nil {
-				op.lock.Lock()
-				op.status = oldStatus
-				op.lock.Unlock()
-				chanCancel <- err
-
-				logger.Debugf("Failed to cancel %s operation: %s: %s", op.class.String(), op.id, err)
-				_, md, _ := op.Render()
-				eventSend(op.project, "operation", md)
-				return
-			}
-
-			op.lock.Lock()
-			op.status = api.Cancelled
-			op.lock.Unlock()
-			op.done()
-			chanCancel <- nil
-
-			logger.Debugf("Cancelled %s operation: %s", op.class.String(), op.id)
-			_, md, _ := op.Render()
-			eventSend(op.project, "operation", md)
-		}(op, oldStatus, chanCancel)
-	}
-
-	logger.Debugf("Cancelling %s operation: %s", op.class.String(), op.id)
-	_, md, _ := op.Render()
-	eventSend(op.project, "operation", md)
-
-	if op.canceler != nil {
-		err := op.canceler.Cancel()
-		if err != nil {
-			return nil, err
-		}
-	}
-
-	if op.onCancel == nil {
-		op.lock.Lock()
-		op.status = api.Cancelled
-		op.lock.Unlock()
-		op.done()
-		chanCancel <- nil
-	}
-
-	logger.Debugf("Cancelled %s operation: %s", op.class.String(), op.id)
-	_, md, _ = op.Render()
-	eventSend(op.project, "operation", md)
-
-	return chanCancel, nil
-}
-
-func (op *operation) Connect(r *http.Request, w http.ResponseWriter) (chan error, error) {
-	if op.class != operationClassWebsocket {
-		return nil, fmt.Errorf("Only websocket operations can be connected")
-	}
-
-	if op.status != api.Running {
-		return nil, fmt.Errorf("Only running operations can be connected")
-	}
-
-	chanConnect := make(chan error, 1)
-
-	op.lock.Lock()
-
-	go func(op *operation, chanConnect chan error) {
-		err := op.onConnect(op, r, w)
-		if err != nil {
-			chanConnect <- err
-
-			logger.Debugf("Failed to handle %s operation: %s: %s", op.class.String(), op.id, err)
-			return
-		}
-
-		chanConnect <- nil
-
-		logger.Debugf("Handled %s operation: %s", op.class.String(), op.id)
-	}(op, chanConnect)
-	op.lock.Unlock()
-
-	logger.Debugf("Connected %s operation: %s", op.class.String(), op.id)
-
-	return chanConnect, nil
-}
-
-func (op *operation) mayCancel() bool {
-	if op.class == operationClassToken {
-		return true
-	}
-
-	if op.onCancel != nil {
-		return true
-	}
-
-	if op.canceler != nil && op.canceler.Cancelable() {
-		return true
-	}
-
-	return false
-}
-
-func (op *operation) Render() (string, *api.Operation, error) {
-	// Setup the resource URLs
-	resources := op.resources
-	if resources != nil {
-		tmpResources := make(map[string][]string)
-		for key, value := range resources {
-			var values []string
-			for _, c := range value {
-				values = append(values, fmt.Sprintf("/%s/%s/%s", version.APIVersion, key, c))
-			}
-			tmpResources[key] = values
-		}
-		resources = tmpResources
-	}
-
-	// Local server name
-	var err error
-	var serverName string
-	err = op.cluster.Transaction(func(tx *db.ClusterTx) error {
-		serverName, err = tx.NodeName()
-		return err
-	})
-	if err != nil {
-		return "", nil, err
-	}
-
-	return op.url, &api.Operation{
-		ID:          op.id,
-		Class:       op.class.String(),
-		Description: op.description,
-		CreatedAt:   op.createdAt,
-		UpdatedAt:   op.updatedAt,
-		Status:      op.status.String(),
-		StatusCode:  op.status,
-		Resources:   resources,
-		Metadata:    op.metadata,
-		MayCancel:   op.mayCancel(),
-		Err:         op.err,
-		Location:    serverName,
-	}, nil
-}
-
-func (op *operation) WaitFinal(timeout int) (bool, error) {
-	// Check current state
-	if op.status.IsFinal() {
-		return true, nil
-	}
-
-	// Wait indefinitely
-	if timeout == -1 {
-		<-op.chanDone
-		return true, nil
-	}
-
-	// Wait until timeout
-	if timeout > 0 {
-		timer := time.NewTimer(time.Duration(timeout) * time.Second)
-		select {
-		case <-op.chanDone:
-			return true, nil
-
-		case <-timer.C:
-			return false, nil
-		}
-	}
-
-	return false, nil
-}
-
-func (op *operation) UpdateResources(opResources map[string][]string) error {
-	if op.status != api.Pending && op.status != api.Running {
-		return fmt.Errorf("Only pending or running operations can be updated")
-	}
-
-	if op.readonly {
-		return fmt.Errorf("Read-only operations can't be updated")
-	}
-
-	op.lock.Lock()
-	op.updatedAt = time.Now()
-	op.resources = opResources
-	op.lock.Unlock()
-
-	logger.Debugf("Updated resources for %s operation: %s", op.class.String(), op.id)
-	_, md, _ := op.Render()
-	eventSend(op.project, "operation", md)
-
-	return nil
-}
-
-func (op *operation) UpdateMetadata(opMetadata interface{}) error {
-	if op.status != api.Pending && op.status != api.Running {
-		return fmt.Errorf("Only pending or running operations can be updated")
-	}
-
-	if op.readonly {
-		return fmt.Errorf("Read-only operations can't be updated")
-	}
-
-	newMetadata, err := shared.ParseMetadata(opMetadata)
-	if err != nil {
-		return err
-	}
-
-	op.lock.Lock()
-	op.updatedAt = time.Now()
-	op.metadata = newMetadata
-	op.lock.Unlock()
-
-	logger.Debugf("Updated metadata for %s operation: %s", op.class.String(), op.id)
-	_, md, _ := op.Render()
-	eventSend(op.project, "operation", md)
-
-	return nil
-}
-
-func operationCreate(cluster *db.Cluster, project string, opClass operationClass, opType db.OperationType, opResources map[string][]string, opMetadata interface{}, onRun func(*operation) error, onCancel func(*operation) error, onConnect func(*operation, *http.Request, http.ResponseWriter) error) (*operation, error) {
-	// Main attributes
-	op := operation{}
-	op.project = project
-	op.id = uuid.NewRandom().String()
-	op.description = opType.Description()
-	op.permission = opType.Permission()
-	op.class = opClass
-	op.createdAt = time.Now()
-	op.updatedAt = op.createdAt
-	op.status = api.Pending
-	op.url = fmt.Sprintf("/%s/operations/%s", version.APIVersion, op.id)
-	op.resources = opResources
-	op.chanDone = make(chan error)
-	op.cluster = cluster
-
-	newMetadata, err := shared.ParseMetadata(opMetadata)
-	if err != nil {
-		return nil, err
-	}
-	op.metadata = newMetadata
-
-	// Callback functions
-	op.onRun = onRun
-	op.onCancel = onCancel
-	op.onConnect = onConnect
-
-	// Sanity check
-	if op.class != operationClassWebsocket && op.onConnect != nil {
-		return nil, fmt.Errorf("Only websocket operations can have a Connect hook")
-	}
-
-	if op.class == operationClassWebsocket && op.onConnect == nil {
-		return nil, fmt.Errorf("Websocket operations must have a Connect hook")
-	}
-
-	if op.class == operationClassToken && op.onRun != nil {
-		return nil, fmt.Errorf("Token operations can't have a Run hook")
-	}
-
-	if op.class == operationClassToken && op.onCancel != nil {
-		return nil, fmt.Errorf("Token operations can't have a Cancel hook")
-	}
-
-	operationsLock.Lock()
-	operations[op.id] = &op
-	operationsLock.Unlock()
-
-	err = op.cluster.Transaction(func(tx *db.ClusterTx) error {
-		_, err := tx.OperationAdd(project, op.id, opType)
-		return err
-	})
-	if err != nil {
-		return nil, errors.Wrapf(err, "failed to add operation %s to database", op.id)
-	}
-
-	logger.Debugf("New %s operation: %s", op.class.String(), op.id)
-	_, md, _ := op.Render()
-	eventSend(op.project, "operation", md)
-
-	return &op, nil
-}
-
-func operationGetInternal(id string) (*operation, error) {
-	operationsLock.Lock()
-	op, ok := operations[id]
-	operationsLock.Unlock()
-
-	if !ok {
-		return nil, fmt.Errorf("Operation '%s' doesn't exist", id)
-	}
-
-	return op, nil
-}
-
 // API functions
 func operationGet(d *Daemon, r *http.Request) Response {
 	id := mux.Vars(r)["id"]

From 5c31adefc2a28b81b4390c10b368af6e7c891773 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Wed, 25 Sep 2019 11:45:31 +0100
Subject: [PATCH 41/72] lxd/operations: Updates use of moved types

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/operations.go | 56 ++++++++++++++++++++++-------------------------
 1 file changed, 26 insertions(+), 30 deletions(-)

diff --git a/lxd/operations.go b/lxd/operations.go
index ffd7bab4e1..54cf7aac30 100644
--- a/lxd/operations.go
+++ b/lxd/operations.go
@@ -4,23 +4,19 @@ import (
 	"fmt"
 	"net/http"
 	"strings"
-	"sync"
-	"time"
 
 	"github.com/gorilla/mux"
 	"github.com/gorilla/websocket"
-	"github.com/pborman/uuid"
-	"github.com/pkg/errors"
 
 	"github.com/lxc/lxd/lxd/cluster"
+	"github.com/lxc/lxd/lxd/daemon"
 	"github.com/lxc/lxd/lxd/db"
 	"github.com/lxc/lxd/lxd/node"
+	"github.com/lxc/lxd/lxd/operation"
 	"github.com/lxc/lxd/lxd/util"
 	"github.com/lxc/lxd/shared"
 	"github.com/lxc/lxd/shared/api"
-	"github.com/lxc/lxd/shared/cancel"
 	"github.com/lxc/lxd/shared/logger"
-	"github.com/lxc/lxd/shared/version"
 )
 
 var operationCmd = APIEndpoint{
@@ -49,13 +45,13 @@ var operationWebsocket = APIEndpoint{
 }
 
 // API functions
-func operationGet(d *Daemon, r *http.Request) Response {
+func operationGet(d *Daemon, r *http.Request) daemon.Response {
 	id := mux.Vars(r)["id"]
 
 	var body *api.Operation
 
 	// First check if the query is for a local operation from this node
-	op, err := operationGetInternal(id)
+	op, err := operation.OperationGetInternal(id)
 	if err == nil {
 		_, body, err = op.Render()
 		if err != nil {
@@ -94,19 +90,19 @@ func operationGet(d *Daemon, r *http.Request) Response {
 	return SyncResponse(true, body)
 }
 
-func operationDelete(d *Daemon, r *http.Request) Response {
+func operationDelete(d *Daemon, r *http.Request) daemon.Response {
 	id := mux.Vars(r)["id"]
 
 	// First check if the query is for a local operation from this node
-	op, err := operationGetInternal(id)
+	op, err := operation.OperationGetInternal(id)
 	if err == nil {
-		if op.permission != "" {
-			project := op.project
+		if op.Permission != "" {
+			project := op.Project
 			if project == "" {
 				project = "default"
 			}
 
-			if !d.userHasPermission(r, project, op.permission) {
+			if !d.userHasPermission(r, project, op.Permission) {
 				return Forbidden(nil)
 			}
 		}
@@ -148,30 +144,30 @@ func operationDelete(d *Daemon, r *http.Request) Response {
 	return EmptySyncResponse
 }
 
-func operationsGet(d *Daemon, r *http.Request) Response {
+func operationsGet(d *Daemon, r *http.Request) daemon.Response {
 	project := projectParam(r)
 	recursion := util.IsRecursionRequest(r)
 
 	localOperationURLs := func() (shared.Jmap, error) {
 		// Get all the operations
-		operationsLock.Lock()
-		ops := operations
-		operationsLock.Unlock()
+		operation.OperationsLock.Lock()
+		ops := operation.Operations
+		operation.OperationsLock.Unlock()
 
 		// Build a list of URLs
 		body := shared.Jmap{}
 
 		for _, v := range ops {
-			if v.project != "" && v.project != project {
+			if v.Project != "" && v.Project != project {
 				continue
 			}
-			status := strings.ToLower(v.status.String())
+			status := strings.ToLower(v.Status.String())
 			_, ok := body[status]
 			if !ok {
 				body[status] = make([]string, 0)
 			}
 
-			body[status] = append(body[status].([]string), v.url)
+			body[status] = append(body[status].([]string), v.URL)
 		}
 
 		return body, nil
@@ -179,18 +175,18 @@ func operationsGet(d *Daemon, r *http.Request) Response {
 
 	localOperations := func() (shared.Jmap, error) {
 		// Get all the operations
-		operationsLock.Lock()
-		ops := operations
-		operationsLock.Unlock()
+		operation.OperationsLock.Lock()
+		ops := operation.Operations
+		operation.OperationsLock.Unlock()
 
 		// Build a list of operations
 		body := shared.Jmap{}
 
 		for _, v := range ops {
-			if v.project != "" && v.project != project {
+			if v.Project != "" && v.Project != project {
 				continue
 			}
-			status := strings.ToLower(v.status.String())
+			status := strings.ToLower(v.Status.String())
 			_, ok := body[status]
 			if !ok {
 				body[status] = make([]*api.Operation, 0)
@@ -320,7 +316,7 @@ func operationsGet(d *Daemon, r *http.Request) Response {
 	return SyncResponse(true, md)
 }
 
-func operationWaitGet(d *Daemon, r *http.Request) Response {
+func operationWaitGet(d *Daemon, r *http.Request) daemon.Response {
 	id := mux.Vars(r)["id"]
 
 	timeout, err := shared.AtoiEmptyDefault(r.FormValue("timeout"), -1)
@@ -329,7 +325,7 @@ func operationWaitGet(d *Daemon, r *http.Request) Response {
 	}
 
 	// First check if the query is for a local operation from this node
-	op, err := operationGetInternal(id)
+	op, err := operation.OperationGetInternal(id)
 	if err == nil {
 		_, err = op.WaitFinal(timeout)
 		if err != nil {
@@ -375,7 +371,7 @@ func operationWaitGet(d *Daemon, r *http.Request) Response {
 
 type operationWebSocket struct {
 	req *http.Request
-	op  *operation
+	op  *operation.Operation
 }
 
 func (r *operationWebSocket) Render(w http.ResponseWriter) error {
@@ -416,11 +412,11 @@ func (r *forwardedOperationWebSocket) String() string {
 	return r.id
 }
 
-func operationWebsocketGet(d *Daemon, r *http.Request) Response {
+func operationWebsocketGet(d *Daemon, r *http.Request) daemon.Response {
 	id := mux.Vars(r)["id"]
 
 	// First check if the query is for a local operation from this node
-	op, err := operationGetInternal(id)
+	op, err := operation.OperationGetInternal(id)
 	if err == nil {
 		return &operationWebSocket{r, op}
 	}

From 936c33ab053cafd624e25572715e526483eefb0e Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Wed, 25 Sep 2019 11:46:39 +0100
Subject: [PATCH 42/72] lxd/devices: Removes functions and vars moved to other
 packages

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/devices.go | 132 -------------------------------------------------
 1 file changed, 132 deletions(-)

diff --git a/lxd/devices.go b/lxd/devices.go
index aeecf4a69a..07906f9a38 100644
--- a/lxd/devices.go
+++ b/lxd/devices.go
@@ -22,8 +22,6 @@ import (
 	log "github.com/lxc/lxd/shared/log15"
 )
 
-var deviceSchedRebalance = make(chan []string, 2)
-
 type deviceTaskCPU struct {
 	id    int
 	strId string
@@ -491,133 +489,3 @@ func deviceEventListener(s *state.State) {
 		}
 	}
 }
-
-// devicesRegister calls the Register() function on all supported devices so they receive events.
-func devicesRegister(s *state.State) {
-	instances, err := instanceLoadNodeAll(s)
-	if err != nil {
-		logger.Error("Problem loading containers list", log.Ctx{"err": err})
-		return
-	}
-
-	for _, instanceIf := range instances {
-		c, ok := instanceIf.(*containerLXC)
-		if !ok {
-			logger.Errorf("Instance is not container type")
-			continue
-		}
-
-		if !c.IsRunning() {
-			continue
-		}
-
-		devices := c.ExpandedDevices()
-		for _, dev := range devices.Sorted() {
-			d, _, err := c.deviceLoad(dev.Name, dev.Config)
-			if err == device.ErrUnsupportedDevType {
-				continue
-			}
-
-			if err != nil {
-				logger.Error("Failed to load device to register", log.Ctx{"err": err, "container": c.Name(), "device": dev.Name})
-				continue
-			}
-
-			// Check whether device wants to register for any events.
-			err = d.Register()
-			if err != nil {
-				logger.Error("Failed to register device", log.Ctx{"err": err, "container": c.Name(), "device": dev.Name})
-				continue
-			}
-		}
-	}
-}
-
-func deviceTaskSchedulerTrigger(srcType string, srcName string, srcStatus string) {
-	// Spawn a go routine which then triggers the scheduler
-	select {
-	case deviceSchedRebalance <- []string{srcType, srcName, srcStatus}:
-	default:
-		// Channel is full, drop the event
-	}
-}
-
-func deviceNextInterfaceHWAddr() (string, error) {
-	// Generate a new random MAC address using the usual prefix
-	ret := bytes.Buffer{}
-	for _, c := range "00:16:3e:xx:xx:xx" {
-		if c == 'x' {
-			c, err := rand.Int(rand.Reader, big.NewInt(16))
-			if err != nil {
-				return "", err
-			}
-			ret.WriteString(fmt.Sprintf("%x", c.Int64()))
-		} else {
-			ret.WriteString(string(c))
-		}
-	}
-
-	return ret.String(), nil
-}
-
-func deviceParseCPU(cpuAllowance string, cpuPriority string) (string, string, string, error) {
-	var err error
-
-	// Parse priority
-	cpuShares := 0
-	cpuPriorityInt := 10
-	if cpuPriority != "" {
-		cpuPriorityInt, err = strconv.Atoi(cpuPriority)
-		if err != nil {
-			return "", "", "", err
-		}
-	}
-	cpuShares -= 10 - cpuPriorityInt
-
-	// Parse allowance
-	cpuCfsQuota := "-1"
-	cpuCfsPeriod := "100000"
-
-	if cpuAllowance != "" {
-		if strings.HasSuffix(cpuAllowance, "%") {
-			// Percentage based allocation
-			percent, err := strconv.Atoi(strings.TrimSuffix(cpuAllowance, "%"))
-			if err != nil {
-				return "", "", "", err
-			}
-
-			cpuShares += (10 * percent) + 24
-		} else {
-			// Time based allocation
-			fields := strings.SplitN(cpuAllowance, "/", 2)
-			if len(fields) != 2 {
-				return "", "", "", fmt.Errorf("Invalid allowance: %s", cpuAllowance)
-			}
-
-			quota, err := strconv.Atoi(strings.TrimSuffix(fields[0], "ms"))
-			if err != nil {
-				return "", "", "", err
-			}
-
-			period, err := strconv.Atoi(strings.TrimSuffix(fields[1], "ms"))
-			if err != nil {
-				return "", "", "", err
-			}
-
-			// Set limit in ms
-			cpuCfsQuota = fmt.Sprintf("%d", quota*1000)
-			cpuCfsPeriod = fmt.Sprintf("%d", period*1000)
-			cpuShares += 1024
-		}
-	} else {
-		// Default is 100%
-		cpuShares += 1024
-	}
-
-	// Deal with a potential negative score
-	if cpuShares < 0 {
-		cpuShares = 0
-	}
-
-	return fmt.Sprintf("%d", cpuShares), cpuCfsQuota, cpuCfsPeriod, nil
-}

From d125fc70215da2722c7f9a2e419ec0c95e0ef056 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Wed, 25 Sep 2019 11:47:00 +0100
Subject: [PATCH 43/72] lxd/devices: Updates use of moved types

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/devices.go | 18 ++++++++----------
 1 file changed, 8 insertions(+), 10 deletions(-)

diff --git a/lxd/devices.go b/lxd/devices.go
index 07906f9a38..138ae6be6d 100644
--- a/lxd/devices.go
+++ b/lxd/devices.go
@@ -1,11 +1,8 @@
 package main
 
 import (
-	"bytes"
-	"crypto/rand"
 	"fmt"
 	"io/ioutil"
-	"math/big"
 	"os"
 	"path"
 	"sort"
@@ -15,6 +12,7 @@ import (
 	"golang.org/x/sys/unix"
 
 	"github.com/lxc/lxd/lxd/device"
+	"github.com/lxc/lxd/lxd/instance"
 	"github.com/lxc/lxd/lxd/state"
 	"github.com/lxc/lxd/shared"
 	"github.com/lxc/lxd/shared/logger"
@@ -292,14 +290,14 @@ func deviceTaskBalance(s *state.State) {
 	}
 
 	// Iterate through the instances
-	instances, err := instanceLoadNodeAll(s)
+	instances, err := instance.InstanceLoadNodeAll(s)
 	if err != nil {
 		logger.Error("Problem loading instances list", log.Ctx{"err": err})
 		return
 	}
 
-	fixedInstances := map[int][]Instance{}
-	balancedInstances := map[Instance]int{}
+	fixedInstances := map[int][]instance.Instance{}
+	balancedInstances := map[instance.Instance]int{}
 	for _, c := range instances {
 		conf := c.ExpandedConfig()
 		cpulimit, ok := conf["limits.cpu"]
@@ -331,14 +329,14 @@ func deviceTaskBalance(s *state.State) {
 				if ok {
 					fixedInstances[nr] = append(fixedInstances[nr], c)
 				} else {
-					fixedInstances[nr] = []Instance{c}
+					fixedInstances[nr] = []instance.Instance{c}
 				}
 			}
 		}
 	}
 
 	// Balance things
-	pinning := map[Instance][]string{}
+	pinning := map[instance.Instance][]string{}
 	usage := map[int]deviceTaskCPU{}
 
 	for _, id := range cpus {
@@ -414,7 +412,7 @@ func deviceNetworkPriority(s *state.State, netif string) {
 		return
 	}
 
-	instances, err := instanceLoadNodeAll(s)
+	instances, err := instance.InstanceLoadNodeAll(s)
 	if err != nil {
 		return
 	}
@@ -474,7 +472,7 @@ func deviceEventListener(s *state.State) {
 			networkAutoAttach(s.Cluster, e[0])
 		case e := <-chUSB:
 			device.USBRunHandlers(s, &e)
-		case e := <-deviceSchedRebalance:
+		case e := <-device.DeviceSchedRebalance:
 			if len(e) != 3 {
 				logger.Errorf("Scheduler: received an invalid rebalance event")
 				continue

From c39d042020a034f6a8e8348c1710ddb6f7ddca8e Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Wed, 25 Sep 2019 11:47:48 +0100
Subject: [PATCH 44/72] lxd/device/device/utils/network: Adds
 NetworkNextInterfaceHWAddr

Moved from main package.

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/device/device_utils_network.go | 21 +++++++++++++++++++++
 1 file changed, 21 insertions(+)

diff --git a/lxd/device/device_utils_network.go b/lxd/device/device_utils_network.go
index 073d1720e8..81e2a4b0aa 100644
--- a/lxd/device/device_utils_network.go
+++ b/lxd/device/device_utils_network.go
@@ -2,10 +2,12 @@ package device
 
 import (
 	"bufio"
+	"bytes"
 	"crypto/rand"
 	"encoding/hex"
 	"fmt"
 	"io/ioutil"
+	"math/big"
 	"net"
 	"os"
 	"regexp"
@@ -694,3 +696,22 @@ func networkParsePortRange(r string) (int64, int64, error) {
 
 	return base, size, nil
 }
+
+// NetworkNextInterfaceHWAddr generates an Ethernet MAC address with a "00:16:3e" prefix.
+func NetworkNextInterfaceHWAddr() (string, error) {
+	// Generate a new random MAC address using the usual prefix.
+	ret := bytes.Buffer{}
+	for _, c := range "00:16:3e:xx:xx:xx" {
+		if c == 'x' {
+			c, err := rand.Int(rand.Reader, big.NewInt(16))
+			if err != nil {
+				return "", err
+			}
+			ret.WriteString(fmt.Sprintf("%x", c.Int64()))
+		} else {
+			ret.WriteString(string(c))
+		}
+	}
+
+	return ret.String(), nil
+}

From 7085dfeb6ef8487ee8921f1f118a6aebdc71ccee Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Wed, 25 Sep 2019 11:48:09 +0100
Subject: [PATCH 45/72] lxd/device*: Updates use of moved types

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/device/device_instance_id.go  | 4 ++--
 lxd/device/disk.go                | 4 ++--
 lxd/device/gpu.go                 | 4 ++--
 lxd/device/infiniband_physical.go | 4 ++--
 lxd/device/infiniband_sriov.go    | 4 ++--
 lxd/device/nic_bridged.go         | 4 ++--
 lxd/device/nic_ipvlan.go          | 4 ++--
 lxd/device/nic_macvlan.go         | 4 ++--
 lxd/device/nic_p2p.go             | 4 ++--
 lxd/device/nic_physical.go        | 4 ++--
 lxd/device/nic_sriov.go           | 4 ++--
 lxd/device/proxy.go               | 4 ++--
 lxd/device/unix_common.go         | 4 ++--
 lxd/device/usb.go                 | 4 ++--
 14 files changed, 28 insertions(+), 28 deletions(-)

diff --git a/lxd/device/device_instance_id.go b/lxd/device/device_instance_id.go
index c92bb6ae95..4cf76ee89e 100644
--- a/lxd/device/device_instance_id.go
+++ b/lxd/device/device_instance_id.go
@@ -2,7 +2,7 @@ package device
 
 import (
 	"github.com/lxc/lxd/lxd/device/config"
-	"github.com/lxc/lxd/lxd/instance"
+	"github.com/lxc/lxd/lxd/instance/instancetype"
 )
 
 // InstanceIdentifier is an interface that allows us to identify an Instance and its properties.
@@ -10,7 +10,7 @@ import (
 // independent of when they're called in the instance lifecycle.
 type InstanceIdentifier interface {
 	Name() string
-	Type() instance.Type
+	Type() instancetype.Type
 	Project() string
 	DevicesPath() string
 	RootfsPath() string
diff --git a/lxd/device/disk.go b/lxd/device/disk.go
index 6866f3f032..30745f67b6 100644
--- a/lxd/device/disk.go
+++ b/lxd/device/disk.go
@@ -14,7 +14,7 @@ import (
 
 	"github.com/lxc/lxd/lxd/db"
 	"github.com/lxc/lxd/lxd/device/config"
-	"github.com/lxc/lxd/lxd/instance"
+	"github.com/lxc/lxd/lxd/instance/instancetype"
 	"github.com/lxc/lxd/lxd/util"
 	"github.com/lxc/lxd/shared"
 	"github.com/lxc/lxd/shared/logger"
@@ -44,7 +44,7 @@ func (d *disk) isRequired() bool {
 
 // validateConfig checks the supplied config for correctness.
 func (d *disk) validateConfig() error {
-	if d.instance.Type() != instance.TypeContainer {
+	if d.instance.Type() != instancetype.Container {
 		return ErrUnsupportedDevType
 	}
 
diff --git a/lxd/device/gpu.go b/lxd/device/gpu.go
index fdcfa69846..0d4f44b82d 100644
--- a/lxd/device/gpu.go
+++ b/lxd/device/gpu.go
@@ -11,7 +11,7 @@ import (
 
 	"golang.org/x/sys/unix"
 
-	"github.com/lxc/lxd/lxd/instance"
+	"github.com/lxc/lxd/lxd/instance/instancetype"
 	"github.com/lxc/lxd/lxd/resources"
 	"github.com/lxc/lxd/shared"
 )
@@ -31,7 +31,7 @@ type gpu struct {
 
 // validateConfig checks the supplied config for correctness.
 func (d *gpu) validateConfig() error {
-	if d.instance.Type() != instance.TypeContainer {
+	if d.instance.Type() != instancetype.Container {
 		return ErrUnsupportedDevType
 	}
 
diff --git a/lxd/device/infiniband_physical.go b/lxd/device/infiniband_physical.go
index 9c181be6e8..77cd035c50 100644
--- a/lxd/device/infiniband_physical.go
+++ b/lxd/device/infiniband_physical.go
@@ -3,7 +3,7 @@ package device
 import (
 	"fmt"
 
-	"github.com/lxc/lxd/lxd/instance"
+	"github.com/lxc/lxd/lxd/instance/instancetype"
 	"github.com/lxc/lxd/lxd/resources"
 	"github.com/lxc/lxd/shared"
 )
@@ -14,7 +14,7 @@ type infinibandPhysical struct {
 
 // validateConfig checks the supplied config for correctness.
 func (d *infinibandPhysical) validateConfig() error {
-	if d.instance.Type() != instance.TypeContainer {
+	if d.instance.Type() != instancetype.Container {
 		return ErrUnsupportedDevType
 	}
 
diff --git a/lxd/device/infiniband_sriov.go b/lxd/device/infiniband_sriov.go
index 6d96b0c4aa..4ac6594dd4 100644
--- a/lxd/device/infiniband_sriov.go
+++ b/lxd/device/infiniband_sriov.go
@@ -3,7 +3,7 @@ package device
 import (
 	"fmt"
 
-	"github.com/lxc/lxd/lxd/instance"
+	"github.com/lxc/lxd/lxd/instance/instancetype"
 	"github.com/lxc/lxd/lxd/resources"
 	"github.com/lxc/lxd/shared"
 	"github.com/lxc/lxd/shared/api"
@@ -15,7 +15,7 @@ type infinibandSRIOV struct {
 
 // validateConfig checks the supplied config for correctness.
 func (d *infinibandSRIOV) validateConfig() error {
-	if d.instance.Type() != instance.TypeContainer {
+	if d.instance.Type() != instancetype.Container {
 		return ErrUnsupportedDevType
 	}
 
diff --git a/lxd/device/nic_bridged.go b/lxd/device/nic_bridged.go
index f1e834d263..fde042545b 100644
--- a/lxd/device/nic_bridged.go
+++ b/lxd/device/nic_bridged.go
@@ -21,7 +21,7 @@ import (
 
 	"github.com/lxc/lxd/lxd/device/config"
 	"github.com/lxc/lxd/lxd/dnsmasq"
-	"github.com/lxc/lxd/lxd/instance"
+	"github.com/lxc/lxd/lxd/instance/instancetype"
 	"github.com/lxc/lxd/lxd/iptables"
 	"github.com/lxc/lxd/shared"
 	"github.com/lxc/lxd/shared/logger"
@@ -47,7 +47,7 @@ type nicBridged struct {
 
 // validateConfig checks the supplied config for correctness.
 func (d *nicBridged) validateConfig() error {
-	if d.instance.Type() != instance.TypeContainer {
+	if d.instance.Type() != instancetype.Container {
 		return ErrUnsupportedDevType
 	}
 
diff --git a/lxd/device/nic_ipvlan.go b/lxd/device/nic_ipvlan.go
index 66b547c12b..226d8aa7d3 100644
--- a/lxd/device/nic_ipvlan.go
+++ b/lxd/device/nic_ipvlan.go
@@ -4,7 +4,7 @@ import (
 	"fmt"
 	"strings"
 
-	"github.com/lxc/lxd/lxd/instance"
+	"github.com/lxc/lxd/lxd/instance/instancetype"
 	"github.com/lxc/lxd/shared"
 )
 
@@ -18,7 +18,7 @@ func (d *nicIPVLAN) CanHotPlug() (bool, []string) {
 
 // validateConfig checks the supplied config for correctness.
 func (d *nicIPVLAN) validateConfig() error {
-	if d.instance.Type() != instance.TypeContainer {
+	if d.instance.Type() != instancetype.Container {
 		return ErrUnsupportedDevType
 	}
 
diff --git a/lxd/device/nic_macvlan.go b/lxd/device/nic_macvlan.go
index 60f731f52e..e5f92565aa 100644
--- a/lxd/device/nic_macvlan.go
+++ b/lxd/device/nic_macvlan.go
@@ -3,7 +3,7 @@ package device
 import (
 	"fmt"
 
-	"github.com/lxc/lxd/lxd/instance"
+	"github.com/lxc/lxd/lxd/instance/instancetype"
 	"github.com/lxc/lxd/shared"
 )
 
@@ -13,7 +13,7 @@ type nicMACVLAN struct {
 
 // validateConfig checks the supplied config for correctness.
 func (d *nicMACVLAN) validateConfig() error {
-	if d.instance.Type() != instance.TypeContainer {
+	if d.instance.Type() != instancetype.Container {
 		return ErrUnsupportedDevType
 	}
 
diff --git a/lxd/device/nic_p2p.go b/lxd/device/nic_p2p.go
index 35b7d7e815..007975f638 100644
--- a/lxd/device/nic_p2p.go
+++ b/lxd/device/nic_p2p.go
@@ -4,7 +4,7 @@ import (
 	"fmt"
 
 	"github.com/lxc/lxd/lxd/device/config"
-	"github.com/lxc/lxd/lxd/instance"
+	"github.com/lxc/lxd/lxd/instance/instancetype"
 	"github.com/lxc/lxd/shared"
 )
 
@@ -14,7 +14,7 @@ type nicP2P struct {
 
 // validateConfig checks the supplied config for correctness.
 func (d *nicP2P) validateConfig() error {
-	if d.instance.Type() != instance.TypeContainer {
+	if d.instance.Type() != instancetype.Container {
 		return ErrUnsupportedDevType
 	}
 
diff --git a/lxd/device/nic_physical.go b/lxd/device/nic_physical.go
index 41e8ce4b90..1a935518cc 100644
--- a/lxd/device/nic_physical.go
+++ b/lxd/device/nic_physical.go
@@ -3,7 +3,7 @@ package device
 import (
 	"fmt"
 
-	"github.com/lxc/lxd/lxd/instance"
+	"github.com/lxc/lxd/lxd/instance/instancetype"
 	"github.com/lxc/lxd/shared"
 )
 
@@ -13,7 +13,7 @@ type nicPhysical struct {
 
 // validateConfig checks the supplied config for correctness.
 func (d *nicPhysical) validateConfig() error {
-	if d.instance.Type() != instance.TypeContainer {
+	if d.instance.Type() != instancetype.Container {
 		return ErrUnsupportedDevType
 	}
 
diff --git a/lxd/device/nic_sriov.go b/lxd/device/nic_sriov.go
index c63401acfa..c6b51980fc 100644
--- a/lxd/device/nic_sriov.go
+++ b/lxd/device/nic_sriov.go
@@ -13,7 +13,7 @@ import (
 	"strings"
 	"time"
 
-	"github.com/lxc/lxd/lxd/instance"
+	"github.com/lxc/lxd/lxd/instance/instancetype"
 	"github.com/lxc/lxd/shared"
 )
 
@@ -23,7 +23,7 @@ type nicSRIOV struct {
 
 // validateConfig checks the supplied config for correctness.
 func (d *nicSRIOV) validateConfig() error {
-	if d.instance.Type() != instance.TypeContainer {
+	if d.instance.Type() != instancetype.Container {
 		return ErrUnsupportedDevType
 	}
 
diff --git a/lxd/device/proxy.go b/lxd/device/proxy.go
index 2efd0f9b09..ab58dfe335 100644
--- a/lxd/device/proxy.go
+++ b/lxd/device/proxy.go
@@ -15,7 +15,7 @@ import (
 	"golang.org/x/sys/unix"
 	"gopkg.in/lxc/go-lxc.v2"
 
-	"github.com/lxc/lxd/lxd/instance"
+	"github.com/lxc/lxd/lxd/instance/instancetype"
 	"github.com/lxc/lxd/lxd/iptables"
 	"github.com/lxc/lxd/lxd/project"
 	"github.com/lxc/lxd/shared"
@@ -40,7 +40,7 @@ type proxyProcInfo struct {
 
 // validateConfig checks the supplied config for correctness.
 func (d *proxy) validateConfig() error {
-	if d.instance.Type() != instance.TypeContainer {
+	if d.instance.Type() != instancetype.Container {
 		return ErrUnsupportedDevType
 	}
 
diff --git a/lxd/device/unix_common.go b/lxd/device/unix_common.go
index 09ae89dcdf..85ee0a3cfe 100644
--- a/lxd/device/unix_common.go
+++ b/lxd/device/unix_common.go
@@ -6,7 +6,7 @@ import (
 	"strings"
 
 	"github.com/lxc/lxd/lxd/device/config"
-	"github.com/lxc/lxd/lxd/instance"
+	"github.com/lxc/lxd/lxd/instance/instancetype"
 	"github.com/lxc/lxd/shared"
 )
 
@@ -39,7 +39,7 @@ func (d *unixCommon) isRequired() bool {
 
 // validateConfig checks the supplied config for correctness.
 func (d *unixCommon) validateConfig() error {
-	if d.instance.Type() != instance.TypeContainer {
+	if d.instance.Type() != instancetype.Container {
 		return ErrUnsupportedDevType
 	}
 
diff --git a/lxd/device/usb.go b/lxd/device/usb.go
index 4a1ad538f5..3e8b01a558 100644
--- a/lxd/device/usb.go
+++ b/lxd/device/usb.go
@@ -8,7 +8,7 @@ import (
 	"strings"
 
 	"github.com/lxc/lxd/lxd/device/config"
-	"github.com/lxc/lxd/lxd/instance"
+	"github.com/lxc/lxd/lxd/instance/instancetype"
 	"github.com/lxc/lxd/shared"
 )
 
@@ -43,7 +43,7 @@ func (d *usb) isRequired() bool {
 
 // validateConfig checks the supplied config for correctness.
 func (d *usb) validateConfig() error {
-	if d.instance.Type() != instance.TypeContainer {
+	if d.instance.Type() != instancetype.Container {
 		return ErrUnsupportedDevType
 	}
 

From 83c17712b8c100e1327b0c1b3c5f98e0895d3f53 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Wed, 25 Sep 2019 11:48:48 +0100
Subject: [PATCH 46/72] lxd/networks/utils: Links networkUpdateStatic to
 instance package

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/networks_utils.go | 4 ++++
 1 file changed, 4 insertions(+)

diff --git a/lxd/networks_utils.go b/lxd/networks_utils.go
index f6a939aef2..64bfa182ae 100644
--- a/lxd/networks_utils.go
+++ b/lxd/networks_utils.go
@@ -33,6 +33,10 @@ import (
 	"github.com/lxc/lxd/shared/logger"
 )
 
+func init() {
+	instance.NetworkUpdateStatic = networkUpdateStatic
+}
+
 var forkdnsServersLock sync.Mutex
 
 func networkAutoAttach(cluster *db.Cluster, devName string) error {

From 87611c0a220c2a80f224081ad41b9b00640cc398 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Wed, 25 Sep 2019 11:49:04 +0100
Subject: [PATCH 47/72] lxd/networks/utils: Updates use of moved types

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/networks_utils.go | 9 +++++----
 1 file changed, 5 insertions(+), 4 deletions(-)

diff --git a/lxd/networks_utils.go b/lxd/networks_utils.go
index 64bfa182ae..f6ecd725bf 100644
--- a/lxd/networks_utils.go
+++ b/lxd/networks_utils.go
@@ -26,6 +26,7 @@ import (
 	"github.com/lxc/lxd/lxd/device"
 	"github.com/lxc/lxd/lxd/dnsmasq"
 	"github.com/lxc/lxd/lxd/instance"
+	"github.com/lxc/lxd/lxd/instance/instancetype"
 	"github.com/lxc/lxd/lxd/project"
 	"github.com/lxc/lxd/lxd/state"
 	"github.com/lxc/lxd/shared"
@@ -94,7 +95,7 @@ func networkGetInterfaces(cluster *db.Cluster) ([]string, error) {
 	return networks, nil
 }
 
-func networkIsInUse(c Instance, name string) bool {
+func networkIsInUse(c instance.Instance, name string) bool {
 	for _, d := range c.ExpandedDevices() {
 		if d["type"] != "nic" {
 			continue
@@ -643,7 +644,7 @@ func networkUpdateStatic(s *state.State, networkName string) error {
 	}
 
 	// Get all the instances
-	insts, err := instanceLoadNodeAll(s)
+	insts, err := instance.InstanceLoadNodeAll(s)
 	if err != nil {
 		return err
 	}
@@ -658,9 +659,9 @@ func networkUpdateStatic(s *state.State, networkName string) error {
 				continue
 			}
 
-			if inst.Type() == instance.TypeContainer {
+			if inst.Type() == instancetype.Container {
 				// Fill in the hwaddr from volatile
-				d, err = inst.(*containerLXC).fillNetworkDevice(k, d)
+				d, err = inst.(*instance.ContainerLXC).FillNetworkDevice(k, d)
 				if err != nil {
 					continue
 				}

From 6e0518450de11fd5c87a4e219744e828be08acc5 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Wed, 25 Sep 2019 11:49:26 +0100
Subject: [PATCH 48/72] lxd/networks: Updates use of moved types

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/networks.go | 28 +++++++++++++++-------------
 1 file changed, 15 insertions(+), 13 deletions(-)

diff --git a/lxd/networks.go b/lxd/networks.go
index 4058127e7a..b13be809e2 100644
--- a/lxd/networks.go
+++ b/lxd/networks.go
@@ -19,10 +19,12 @@ import (
 
 	lxd "github.com/lxc/lxd/client"
 	"github.com/lxc/lxd/lxd/cluster"
+	"github.com/lxc/lxd/lxd/daemon"
 	"github.com/lxc/lxd/lxd/db"
 	"github.com/lxc/lxd/lxd/device"
 	"github.com/lxc/lxd/lxd/dnsmasq"
 	"github.com/lxc/lxd/lxd/instance"
+	"github.com/lxc/lxd/lxd/instance/instancetype"
 	"github.com/lxc/lxd/lxd/iptables"
 	"github.com/lxc/lxd/lxd/node"
 	"github.com/lxc/lxd/lxd/state"
@@ -66,7 +68,7 @@ var networkStateCmd = APIEndpoint{
 }
 
 // API endpoints
-func networksGet(d *Daemon, r *http.Request) Response {
+func networksGet(d *Daemon, r *http.Request) daemon.Response {
 	recursion := util.IsRecursionRequest(r)
 
 	ifs, err := networkGetInterfaces(d.cluster)
@@ -95,7 +97,7 @@ func networksGet(d *Daemon, r *http.Request) Response {
 	return SyncResponse(true, resultMap)
 }
 
-func networksPost(d *Daemon, r *http.Request) Response {
+func networksPost(d *Daemon, r *http.Request) daemon.Response {
 	networkCreateLock.Lock()
 	defer networkCreateLock.Unlock()
 
@@ -348,7 +350,7 @@ func doNetworksCreate(d *Daemon, req api.NetworksPost, withDatabase bool) error
 	return nil
 }
 
-func networkGet(d *Daemon, r *http.Request) Response {
+func networkGet(d *Daemon, r *http.Request) daemon.Response {
 	// If a target was specified, forward the request to the relevant node.
 	response := ForwardedResponseIfTargetIsRemote(d, r)
 	if response != nil {
@@ -454,7 +456,7 @@ func doNetworkGet(d *Daemon, name string) (api.Network, error) {
 	return n, nil
 }
 
-func networkDelete(d *Daemon, r *http.Request) Response {
+func networkDelete(d *Daemon, r *http.Request) daemon.Response {
 	name := mux.Vars(r)["name"]
 	state := d.State()
 
@@ -514,7 +516,7 @@ func networkDelete(d *Daemon, r *http.Request) Response {
 	return EmptySyncResponse
 }
 
-func networkPost(d *Daemon, r *http.Request) Response {
+func networkPost(d *Daemon, r *http.Request) daemon.Response {
 	// FIXME: renaming a network is currently not supported in clustering
 	//        mode. The difficulty is that network.Start() depends on the
 	//        network having already been renamed in the database, which is
@@ -575,7 +577,7 @@ func networkPost(d *Daemon, r *http.Request) Response {
 	return SyncResponseLocation(true, nil, fmt.Sprintf("/%s/networks/%s", version.APIVersion, req.Name))
 }
 
-func networkPut(d *Daemon, r *http.Request) Response {
+func networkPut(d *Daemon, r *http.Request) daemon.Response {
 	name := mux.Vars(r)["name"]
 
 	// Get the existing network
@@ -614,7 +616,7 @@ func networkPut(d *Daemon, r *http.Request) Response {
 	return doNetworkUpdate(d, name, dbInfo.Config, req)
 }
 
-func networkPatch(d *Daemon, r *http.Request) Response {
+func networkPatch(d *Daemon, r *http.Request) daemon.Response {
 	name := mux.Vars(r)["name"]
 
 	// Get the existing network
@@ -665,7 +667,7 @@ func networkPatch(d *Daemon, r *http.Request) Response {
 	return doNetworkUpdate(d, name, dbInfo.Config, req)
 }
 
-func doNetworkUpdate(d *Daemon, name string, oldConfig map[string]string, req api.NetworkPut) Response {
+func doNetworkUpdate(d *Daemon, name string, oldConfig map[string]string, req api.NetworkPut) daemon.Response {
 	// Validate the configuration
 	err := networkValidateConfig(name, req.Config)
 	if err != nil {
@@ -693,7 +695,7 @@ func doNetworkUpdate(d *Daemon, name string, oldConfig map[string]string, req ap
 	return EmptySyncResponse
 }
 
-func networkLeasesGet(d *Daemon, r *http.Request) Response {
+func networkLeasesGet(d *Daemon, r *http.Request) daemon.Response {
 	name := mux.Vars(r)["name"]
 	project := projectParam(r)
 
@@ -728,8 +730,8 @@ func networkLeasesGet(d *Daemon, r *http.Request) Response {
 				}
 
 				// Fill in the hwaddr from volatile
-				if inst.Type() == instance.TypeContainer {
-					d, err = inst.(*containerLXC).fillNetworkDevice(k, d)
+				if inst.Type() == instancetype.Container {
+					d, err = inst.(*instance.ContainerLXC).FillNetworkDevice(k, d)
 					if err != nil {
 						continue
 					}
@@ -919,7 +921,7 @@ func networkShutdown(s *state.State) error {
 	return nil
 }
 
-func networkStateGet(d *Daemon, r *http.Request) Response {
+func networkStateGet(d *Daemon, r *http.Request) daemon.Response {
 	// If a target was specified, forward the request to the relevant node.
 	response := ForwardedResponseIfTargetIsRemote(d, r)
 	if response != nil {
@@ -1305,7 +1307,7 @@ func (n *network) Start() error {
 		dnsmasqCmd = append(dnsmasqCmd, "--dhcp-rapid-commit")
 	}
 
-	if !debug {
+	if !daemon.Debug {
 		// --quiet options are only supported on >2.67
 		minVer, _ := version.NewDottedVersion("2.67")
 

From b76b7aa08da0a6638c531c7597aafeabbdcbba3d Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Wed, 25 Sep 2019 11:50:11 +0100
Subject: [PATCH 49/72] lxd/migrate: Removes MigrationSinkArgs and
 MigrationSourceArgs

These have been moved to instance package.

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/migrate.go | 36 ------------------------------------
 1 file changed, 36 deletions(-)

diff --git a/lxd/migrate.go b/lxd/migrate.go
index 6f5ead8459..8922394884 100644
--- a/lxd/migrate.go
+++ b/lxd/migrate.go
@@ -254,42 +254,6 @@ type migrationSink struct {
 	refresh      bool
 }
 
-type MigrationSinkArgs struct {
-	// General migration fields
-	Dialer  websocket.Dialer
-	Push    bool
-	Secrets map[string]string
-	Url     string
-
-	// Instance specific fields
-	Instance     Instance
-	InstanceOnly bool
-	Idmap        *idmap.IdmapSet
-	Live         bool
-	Refresh      bool
-	Snapshots    []*migration.Snapshot
-
-	// Storage specific fields
-	Storage    storage
-	VolumeOnly bool
-
-	// Transport specific fields
-	RsyncFeatures []string
-}
-
-type MigrationSourceArgs struct {
-	// Instance specific fields
-	Instance     Instance
-	InstanceOnly bool
-
-	// Transport specific fields
-	RsyncFeatures []string
-	ZfsFeatures   []string
-
-	// Volume specific fields
-	VolumeOnly bool
-}
-
 func (c *migrationSink) connectWithSecret(secret string) (*websocket.Conn, error) {
 	query := url.Values{"secret": []string{secret}}
 

From 0e2c6ce5444c84bd4b0a22936a5c4617407cb808 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Wed, 25 Sep 2019 11:50:36 +0100
Subject: [PATCH 50/72] lxd/migrate*: Updates use of moved types

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/migrate.go                 |  11 +--
 lxd/migrate_container.go       | 124 +++++++++++++++++----------------
 lxd/migrate_storage_volumes.go |  14 ++--
 3 files changed, 77 insertions(+), 72 deletions(-)

diff --git a/lxd/migrate.go b/lxd/migrate.go
index 8922394884..8d343b3ba5 100644
--- a/lxd/migrate.go
+++ b/lxd/migrate.go
@@ -18,9 +18,10 @@ import (
 	"github.com/golang/protobuf/proto"
 	"github.com/gorilla/websocket"
 
+	"github.com/lxc/lxd/lxd/instance"
 	"github.com/lxc/lxd/lxd/migration"
+	"github.com/lxc/lxd/lxd/operation"
 	"github.com/lxc/lxd/shared"
-	"github.com/lxc/lxd/shared/idmap"
 	"github.com/lxc/lxd/shared/logger"
 )
 
@@ -38,10 +39,10 @@ type migrationFields struct {
 	// container specific fields
 	live         bool
 	instanceOnly bool
-	instance     Instance
+	instance     instance.Instance
 
 	// storage specific fields
-	storage    storage
+	storage    instance.Storage
 	volumeOnly bool
 }
 
@@ -145,7 +146,7 @@ func (s *migrationSourceWs) Metadata() interface{} {
 	return secrets
 }
 
-func (s *migrationSourceWs) Connect(op *operation, r *http.Request, w http.ResponseWriter) error {
+func (s *migrationSourceWs) Connect(op *operation.Operation, r *http.Request, w http.ResponseWriter) error {
 	secret := r.FormValue("secret")
 	if secret == "" {
 		return fmt.Errorf("missing secret")
@@ -281,7 +282,7 @@ func (s *migrationSink) Metadata() interface{} {
 	return secrets
 }
 
-func (s *migrationSink) Connect(op *operation, r *http.Request, w http.ResponseWriter) error {
+func (s *migrationSink) Connect(op *operation.Operation, r *http.Request, w http.ResponseWriter) error {
 	secret := r.FormValue("secret")
 	if secret == "" {
 		return fmt.Errorf("missing secret")
diff --git a/lxd/migrate_container.go b/lxd/migrate_container.go
index 4d893b87c7..e03cba4ec3 100644
--- a/lxd/migrate_container.go
+++ b/lxd/migrate_container.go
@@ -17,7 +17,9 @@ import (
 
 	"github.com/lxc/lxd/lxd/db"
 	"github.com/lxc/lxd/lxd/instance"
+	"github.com/lxc/lxd/lxd/instance/instancetype"
 	"github.com/lxc/lxd/lxd/migration"
+	"github.com/lxc/lxd/lxd/operation"
 	"github.com/lxc/lxd/lxd/util"
 	"github.com/lxc/lxd/shared"
 	"github.com/lxc/lxd/shared/api"
@@ -25,7 +27,7 @@ import (
 	"github.com/lxc/lxd/shared/logger"
 )
 
-func NewMigrationSource(inst Instance, stateful bool, instanceOnly bool) (*migrationSourceWs, error) {
+func NewMigrationSource(inst instance.Instance, stateful bool, instanceOnly bool) (*migrationSourceWs, error) {
 	ret := migrationSourceWs{migrationFields{instance: inst}, make(chan bool, 1)}
 	ret.instanceOnly = instanceOnly
 
@@ -78,7 +80,7 @@ fi
 	return err
 }
 
-func snapshotToProtobuf(c Instance) *migration.Snapshot {
+func snapshotToProtobuf(c instance.Instance) *migration.Snapshot {
 	config := []*migration.Config{}
 	for k, v := range c.LocalConfig() {
 		kCopy := string(k)
@@ -124,18 +126,18 @@ func snapshotToProtobuf(c Instance) *migration.Snapshot {
 func (s *migrationSourceWs) checkForPreDumpSupport() (bool, int) {
 	// Ask CRIU if this architecture/kernel/criu combination
 	// supports pre-copy (dirty memory tracking)
-	criuMigrationArgs := CriuMigrationArgs{
-		cmd:          lxc.MIGRATE_FEATURE_CHECK,
-		stateDir:     "",
-		function:     "feature-check",
-		stop:         false,
-		actionScript: false,
-		dumpDir:      "",
-		preDumpDir:   "",
-		features:     lxc.FEATURE_MEM_TRACK,
-	}
-
-	if s.instance.Type() != instance.TypeContainer {
+	criuMigrationArgs := instance.CriuMigrationArgs{
+		Cmd:          lxc.MIGRATE_FEATURE_CHECK,
+		StateDir:     "",
+		Function:     "feature-check",
+		Stop:         false,
+		ActionScript: false,
+		DumpDir:      "",
+		PreDumpDir:   "",
+		Features:     lxc.FEATURE_MEM_TRACK,
+	}
+
+	if s.instance.Type() != instancetype.Container {
 		return false, 0
 	}
 
@@ -237,21 +239,21 @@ type preDumpLoopArgs struct {
 // of memory pages transferred by pre-dumping has been reached.
 func (s *migrationSourceWs) preDumpLoop(args *preDumpLoopArgs) (bool, error) {
 	// Do a CRIU pre-dump
-	criuMigrationArgs := CriuMigrationArgs{
-		cmd:          lxc.MIGRATE_PRE_DUMP,
-		stop:         false,
-		actionScript: false,
-		preDumpDir:   args.preDumpDir,
-		dumpDir:      args.dumpDir,
-		stateDir:     args.checkpointDir,
-		function:     "migration",
+	criuMigrationArgs := instance.CriuMigrationArgs{
+		Cmd:          lxc.MIGRATE_PRE_DUMP,
+		Stop:         false,
+		ActionScript: false,
+		PreDumpDir:   args.preDumpDir,
+		DumpDir:      args.dumpDir,
+		StateDir:     args.checkpointDir,
+		Function:     "migration",
 	}
 
 	logger.Debugf("Doing another pre-dump in %s", args.preDumpDir)
 
 	final := args.final
 
-	if s.instance.Type() != instance.TypeContainer {
+	if s.instance.Type() != instancetype.Container {
 		return false, fmt.Errorf("Instance not container type")
 	}
 
@@ -328,7 +330,7 @@ func (s *migrationSourceWs) preDumpLoop(args *preDumpLoopArgs) (bool, error) {
 	return final, nil
 }
 
-func (s *migrationSourceWs) Do(migrateOp *operation) error {
+func (s *migrationSourceWs) Do(migrateOp *operation.Operation) error {
 	<-s.allConnected
 
 	criuType := migration.CRIUType_CRIU_RSYNC.Enum()
@@ -339,7 +341,7 @@ func (s *migrationSourceWs) Do(migrateOp *operation) error {
 		}
 	}
 
-	if s.instance.Type() != instance.TypeContainer {
+	if s.instance.Type() != instancetype.Container {
 		return fmt.Errorf("Instance not container type")
 	}
 
@@ -444,7 +446,7 @@ func (s *migrationSourceWs) Do(migrateOp *operation) error {
 	zfsFeatures := header.GetZfsFeaturesSlice()
 
 	// Set source args
-	sourceArgs := MigrationSourceArgs{
+	sourceArgs := instance.MigrationSourceArgs{
 		Instance:      s.instance,
 		InstanceOnly:  s.instanceOnly,
 		RsyncFeatures: rsyncFeatures,
@@ -543,14 +545,14 @@ func (s *migrationSourceWs) Do(migrateOp *operation) error {
 			}
 
 			state := s.instance.DaemonState()
-			actionScriptOp, err := operationCreate(
+			actionScriptOp, err := operation.OperationCreate(
 				state.Cluster,
 				s.instance.Project(),
-				operationClassWebsocket,
+				operation.OperationClassWebsocket,
 				db.OperationContainerLiveMigrate,
 				nil,
 				nil,
-				func(op *operation) error {
+				func(op *operation.Operation) error {
 					result := <-restoreSuccess
 					if !result {
 						return fmt.Errorf("restore failed, failing CRIU")
@@ -558,7 +560,7 @@ func (s *migrationSourceWs) Do(migrateOp *operation) error {
 					return nil
 				},
 				nil,
-				func(op *operation, r *http.Request, w http.ResponseWriter) error {
+				func(op *operation.Operation, r *http.Request, w http.ResponseWriter) error {
 					secret := r.FormValue("secret")
 					if secret == "" {
 						return fmt.Errorf("missing secret")
@@ -584,7 +586,7 @@ func (s *migrationSourceWs) Do(migrateOp *operation) error {
 				return abort(err)
 			}
 
-			err = writeActionScript(checkpointDir, actionScriptOp.url, actionScriptOpSecret, state.OS.ExecPath)
+			err = writeActionScript(checkpointDir, actionScriptOp.URL, actionScriptOpSecret, state.OS.ExecPath)
 			if err != nil {
 				os.RemoveAll(checkpointDir)
 				return abort(err)
@@ -627,14 +629,14 @@ func (s *migrationSourceWs) Do(migrateOp *operation) error {
 			}
 
 			go func() {
-				criuMigrationArgs := CriuMigrationArgs{
-					cmd:          lxc.MIGRATE_DUMP,
-					stop:         true,
-					actionScript: true,
-					preDumpDir:   preDumpDir,
-					dumpDir:      "final",
-					stateDir:     checkpointDir,
-					function:     "migration",
+				criuMigrationArgs := instance.CriuMigrationArgs{
+					Cmd:          lxc.MIGRATE_DUMP,
+					Stop:         true,
+					ActionScript: true,
+					PreDumpDir:   preDumpDir,
+					DumpDir:      "final",
+					StateDir:     checkpointDir,
+					Function:     "migration",
 				}
 
 				// Do the final CRIU dump. This is needs no special
@@ -654,14 +656,14 @@ func (s *migrationSourceWs) Do(migrateOp *operation) error {
 		} else {
 			logger.Debugf("The version of liblxc is older than 2.0.4 and the live migration will probably fail")
 			defer os.RemoveAll(checkpointDir)
-			criuMigrationArgs := CriuMigrationArgs{
-				cmd:          lxc.MIGRATE_DUMP,
-				stateDir:     checkpointDir,
-				function:     "migration",
-				stop:         true,
-				actionScript: false,
-				dumpDir:      "final",
-				preDumpDir:   "",
+			criuMigrationArgs := instance.CriuMigrationArgs{
+				Cmd:          lxc.MIGRATE_DUMP,
+				StateDir:     checkpointDir,
+				Function:     "migration",
+				Stop:         true,
+				ActionScript: false,
+				DumpDir:      "final",
+				PreDumpDir:   "",
 			}
 
 			err = c.Migrate(&criuMigrationArgs)
@@ -716,7 +718,7 @@ func (s *migrationSourceWs) Do(migrateOp *operation) error {
 	return nil
 }
 
-func NewMigrationSink(args *MigrationSinkArgs) (*migrationSink, error) {
+func NewMigrationSink(args *instance.MigrationSinkArgs) (*migrationSink, error) {
 	sink := migrationSink{
 		src:     migrationFields{instance: args.Instance, instanceOnly: args.InstanceOnly},
 		dest:    migrationFields{instanceOnly: args.InstanceOnly},
@@ -775,8 +777,8 @@ func NewMigrationSink(args *MigrationSinkArgs) (*migrationSink, error) {
 	return &sink, nil
 }
 
-func (c *migrationSink) Do(migrateOp *operation) error {
-	if c.src.instance.Type() != instance.TypeContainer {
+func (c *migrationSink) Do(migrateOp *operation.Operation) error {
+	if c.src.instance.Type() != instancetype.Container {
 		return fmt.Errorf("Instance not container type")
 	}
 
@@ -1009,7 +1011,7 @@ func (c *migrationSink) Do(migrateOp *operation) error {
 				sendFinalFsDelta = true
 			}
 
-			args := MigrationSinkArgs{
+			args := instance.MigrationSinkArgs{
 				Instance:      c.src.instance,
 				InstanceOnly:  c.src.instanceOnly,
 				Idmap:         srcIdmap,
@@ -1101,14 +1103,14 @@ func (c *migrationSink) Do(migrateOp *operation) error {
 		}
 
 		if live {
-			criuMigrationArgs := CriuMigrationArgs{
-				cmd:          lxc.MIGRATE_RESTORE,
-				stateDir:     imagesDir,
-				function:     "migration",
-				stop:         false,
-				actionScript: false,
-				dumpDir:      "final",
-				preDumpDir:   "",
+			criuMigrationArgs := instance.CriuMigrationArgs{
+				Cmd:          lxc.MIGRATE_RESTORE,
+				StateDir:     imagesDir,
+				Function:     "migration",
+				Stop:         false,
+				ActionScript: false,
+				DumpDir:      "final",
+				PreDumpDir:   "",
 			}
 
 			// Currently we only do a single CRIU pre-dump so we
@@ -1163,12 +1165,12 @@ func (s *migrationSourceWs) ConnectContainerTarget(target api.InstancePostTarget
 	return s.ConnectTarget(target.Certificate, target.Operation, target.Websockets)
 }
 
-func migrationCompareSnapshots(sourceSnapshots []*migration.Snapshot, targetSnapshots []Instance) ([]*migration.Snapshot, []Instance) {
+func migrationCompareSnapshots(sourceSnapshots []*migration.Snapshot, targetSnapshots []instance.Instance) ([]*migration.Snapshot, []instance.Instance) {
 	// Compare source and target
 	sourceSnapshotsTime := map[string]int64{}
 	targetSnapshotsTime := map[string]int64{}
 
-	toDelete := []Instance{}
+	toDelete := []instance.Instance{}
 	toSync := []*migration.Snapshot{}
 
 	for _, snap := range sourceSnapshots {
diff --git a/lxd/migrate_storage_volumes.go b/lxd/migrate_storage_volumes.go
index 027fe08367..f1492f9802 100644
--- a/lxd/migrate_storage_volumes.go
+++ b/lxd/migrate_storage_volumes.go
@@ -6,13 +6,15 @@ import (
 	"github.com/golang/protobuf/proto"
 	"github.com/gorilla/websocket"
 
+	"github.com/lxc/lxd/lxd/instance"
 	"github.com/lxc/lxd/lxd/migration"
+	"github.com/lxc/lxd/lxd/operation"
 	"github.com/lxc/lxd/shared"
 	"github.com/lxc/lxd/shared/api"
 	"github.com/lxc/lxd/shared/logger"
 )
 
-func NewStorageMigrationSource(storage storage, volumeOnly bool) (*migrationSourceWs, error) {
+func NewStorageMigrationSource(storage instance.Storage, volumeOnly bool) (*migrationSourceWs, error) {
 	ret := migrationSourceWs{migrationFields{storage: storage}, make(chan bool, 1)}
 	ret.volumeOnly = volumeOnly
 
@@ -32,7 +34,7 @@ func NewStorageMigrationSource(storage storage, volumeOnly bool) (*migrationSour
 	return &ret, nil
 }
 
-func (s *migrationSourceWs) DoStorage(migrateOp *operation) error {
+func (s *migrationSourceWs) DoStorage(migrateOp *operation.Operation) error {
 	<-s.allConnected
 
 	// Storage needs to start unconditionally now, since we need to
@@ -123,7 +125,7 @@ func (s *migrationSourceWs) DoStorage(migrateOp *operation) error {
 	zfsFeatures := header.GetZfsFeaturesSlice()
 
 	// Set source args
-	sourceArgs := MigrationSourceArgs{
+	sourceArgs := instance.MigrationSourceArgs{
 		RsyncFeatures: rsyncFeatures,
 		ZfsFeatures:   zfsFeatures,
 		VolumeOnly:    s.volumeOnly,
@@ -179,7 +181,7 @@ func (s *migrationSourceWs) DoStorage(migrateOp *operation) error {
 	return nil
 }
 
-func NewStorageMigrationSink(args *MigrationSinkArgs) (*migrationSink, error) {
+func NewStorageMigrationSink(args *instance.MigrationSinkArgs) (*migrationSink, error) {
 	sink := migrationSink{
 		src:    migrationFields{storage: args.Storage, volumeOnly: args.VolumeOnly},
 		dest:   migrationFields{storage: args.Storage, volumeOnly: args.VolumeOnly},
@@ -223,7 +225,7 @@ func NewStorageMigrationSink(args *MigrationSinkArgs) (*migrationSink, error) {
 	return &sink, nil
 }
 
-func (c *migrationSink) DoStorage(migrateOp *operation) error {
+func (c *migrationSink) DoStorage(migrateOp *operation.Operation) error {
 	var err error
 
 	if c.push {
@@ -333,7 +335,7 @@ func (c *migrationSink) DoStorage(migrateOp *operation) error {
 				fsConn = c.src.fsConn
 			}
 
-			args := MigrationSinkArgs{
+			args := instance.MigrationSinkArgs{
 				Storage:       c.dest.storage,
 				RsyncFeatures: rsyncFeatures,
 				Snapshots:     header.Snapshots,

From 438ac79d97447a1c0b807d1e538499fad5935f54 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Wed, 25 Sep 2019 11:53:51 +0100
Subject: [PATCH 51/72] lxd/main/test: Updates use of moved types

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/main_test.go | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/lxd/main_test.go b/lxd/main_test.go
index a3da2eef9c..6528d278d4 100644
--- a/lxd/main_test.go
+++ b/lxd/main_test.go
@@ -11,6 +11,7 @@ import (
 	"github.com/stretchr/testify/suite"
 
 	deviceConfig "github.com/lxc/lxd/lxd/device/config"
+	"github.com/lxc/lxd/lxd/instance"
 	"github.com/lxc/lxd/shared"
 	"github.com/lxc/lxd/shared/idmap"
 )
@@ -68,7 +69,7 @@ func (suite *lxdTestSuite) SetupTest() {
 	// the next function.
 	poolConfig := map[string]string{}
 
-	mockStorage, _ := storageTypeToString(storageTypeMock)
+	mockStorage, _ := instance.StorageTypeToString(instance.StorageTypeMock)
 	// Create the database entry for the storage pool.
 	poolDescription := fmt.Sprintf("%s storage pool", lxdTestSuiteDefaultStoragePool)
 	_, err = dbStoragePoolCreateAndUpdateCache(suite.d.cluster, lxdTestSuiteDefaultStoragePool, poolDescription, mockStorage, poolConfig)

From 75680d9c3b991badaf39f87d9d0dafcabba6cae9 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Wed, 25 Sep 2019 11:54:40 +0100
Subject: [PATCH 52/72] lxc/main: Moves verbose and debug global vars to daemon
 package

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/main.go | 9 +++------
 1 file changed, 3 insertions(+), 6 deletions(-)

diff --git a/lxd/main.go b/lxd/main.go
index af52a93423..99f6506ee3 100644
--- a/lxd/main.go
+++ b/lxd/main.go
@@ -7,15 +7,12 @@ import (
 
 	"github.com/spf13/cobra"
 
+	"github.com/lxc/lxd/lxd/daemon"
 	"github.com/lxc/lxd/shared/logger"
 	"github.com/lxc/lxd/shared/logging"
 	"github.com/lxc/lxd/shared/version"
 )
 
-// Global variables
-var debug bool
-var verbose bool
-
 // Initialize the random number generator
 func init() {
 	rand.Seed(time.Now().UTC().UnixNano())
@@ -36,8 +33,8 @@ type cmdGlobal struct {
 
 func (c *cmdGlobal) Run(cmd *cobra.Command, args []string) error {
 	// Set logging global variables
-	debug = c.flagLogDebug
-	verbose = c.flagLogVerbose
+	daemon.Debug = c.flagLogDebug
+	daemon.Verbose = c.flagLogVerbose
 
 	// Setup logger
 	syslog := ""

From 15556e6c13daa96a0499825ca6e6ce29f9c3c3b1 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Wed, 25 Sep 2019 11:54:59 +0100
Subject: [PATCH 53/72] lxd/main*: Updates use of moved types

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/main_activateifneeded.go | 5 +++--
 lxd/main_init.go             | 3 ++-
 lxd/main_init_auto.go        | 3 ++-
 3 files changed, 7 insertions(+), 4 deletions(-)

diff --git a/lxd/main_activateifneeded.go b/lxd/main_activateifneeded.go
index 63f0d7ab4a..e2602c26b9 100644
--- a/lxd/main_activateifneeded.go
+++ b/lxd/main_activateifneeded.go
@@ -11,6 +11,7 @@ import (
 	lxd "github.com/lxc/lxd/client"
 	"github.com/lxc/lxd/lxd/db"
 	"github.com/lxc/lxd/lxd/instance"
+	"github.com/lxc/lxd/lxd/instance/instancetype"
 	"github.com/lxc/lxd/lxd/node"
 	"github.com/lxc/lxd/shared"
 	"github.com/lxc/lxd/shared/idmap"
@@ -112,7 +113,7 @@ func (c *cmdActivateifneeded) Run(cmd *cobra.Command, args []string) error {
 
 	var containers []db.Instance
 	err = d.cluster.Transaction(func(tx *db.ClusterTx) error {
-		filter := db.InstanceFilter{Type: instance.TypeContainer}
+		filter := db.InstanceFilter{Type: instancetype.Container}
 		var err error
 		containers, err = tx.InstanceList(filter)
 		return err
@@ -122,7 +123,7 @@ func (c *cmdActivateifneeded) Run(cmd *cobra.Command, args []string) error {
 	}
 
 	for _, container := range containers {
-		c, err := instanceLoadByProjectAndName(d.State(), container.Project, container.Name)
+		c, err := instance.InstanceLoadByProjectAndName(d.State(), container.Project, container.Name)
 		if err != nil {
 			sqldb.Close()
 			return err
diff --git a/lxd/main_init.go b/lxd/main_init.go
index a0a26cb157..92f8665443 100644
--- a/lxd/main_init.go
+++ b/lxd/main_init.go
@@ -7,6 +7,7 @@ import (
 	"github.com/spf13/cobra"
 
 	"github.com/lxc/lxd/client"
+	"github.com/lxc/lxd/lxd/instance"
 	"github.com/lxc/lxd/lxd/util"
 	"github.com/lxc/lxd/shared"
 )
@@ -164,7 +165,7 @@ func (c *cmdInit) availableStorageDrivers(poolType string) []string {
 	}
 
 	// Check available backends
-	for _, driver := range supportedStoragePoolDrivers {
+	for _, driver := range instance.SupportedStoragePoolDrivers {
 		if poolType == "remote" && !shared.StringInSlice(driver, []string{"ceph", "cephfs"}) {
 			continue
 		}
diff --git a/lxd/main_init_auto.go b/lxd/main_init_auto.go
index 4022c91f30..474ee3dc3b 100644
--- a/lxd/main_init_auto.go
+++ b/lxd/main_init_auto.go
@@ -7,13 +7,14 @@ import (
 	"github.com/spf13/cobra"
 
 	"github.com/lxc/lxd/client"
+	"github.com/lxc/lxd/lxd/instance"
 	"github.com/lxc/lxd/shared"
 	"github.com/lxc/lxd/shared/api"
 )
 
 func (c *cmdInit) RunAuto(cmd *cobra.Command, args []string, d lxd.InstanceServer) (*cmdInitData, error) {
 	// Sanity checks
-	if c.flagStorageBackend != "" && !shared.StringInSlice(c.flagStorageBackend, supportedStoragePoolDrivers) {
+	if c.flagStorageBackend != "" && !shared.StringInSlice(c.flagStorageBackend, instance.SupportedStoragePoolDrivers) {
 		return nil, fmt.Errorf("The requested backend '%s' isn't supported by lxd init", c.flagStorageBackend)
 	}
 

From 2ae8393a8a32042e7fe8df2ef01a53addda77457 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Wed, 25 Sep 2019 11:55:18 +0100
Subject: [PATCH 54/72] lxd/logging: Updates use of moved types

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/logging.go | 9 +++++----
 1 file changed, 5 insertions(+), 4 deletions(-)

diff --git a/lxd/logging.go b/lxd/logging.go
index 162ad51a3d..2ea84d9369 100644
--- a/lxd/logging.go
+++ b/lxd/logging.go
@@ -7,7 +7,8 @@ import (
 	"time"
 
 	"github.com/lxc/lxd/lxd/db"
-	"github.com/lxc/lxd/lxd/instance"
+	"github.com/lxc/lxd/lxd/instance/instancetype"
+	"github.com/lxc/lxd/lxd/operation"
 	"github.com/lxc/lxd/lxd/state"
 	"github.com/lxc/lxd/lxd/task"
 	"github.com/lxc/lxd/shared"
@@ -20,11 +21,11 @@ import (
 // and will run once every 24h.
 func expireLogsTask(state *state.State) (task.Func, task.Schedule) {
 	f := func(ctx context.Context) {
-		opRun := func(op *operation) error {
+		opRun := func(op *operation.Operation) error {
 			return expireLogs(ctx, state)
 		}
 
-		op, err := operationCreate(state.Cluster, "", operationClassTask, db.OperationLogsExpire, nil, nil, opRun, nil, nil)
+		op, err := operation.OperationCreate(state.Cluster, "", operation.OperationClassTask, db.OperationLogsExpire, nil, nil, opRun, nil, nil)
 		if err != nil {
 			logger.Error("Failed to start log expiry operation", log.Ctx{"err": err})
 			return
@@ -53,7 +54,7 @@ func expireLogs(ctx context.Context, state *state.State) error {
 	var containers []string
 	ch := make(chan struct{})
 	go func() {
-		containers, err = state.Cluster.ContainersNodeList(instance.TypeContainer)
+		containers, err = state.Cluster.ContainersNodeList(instancetype.Container)
 		ch <- struct{}{}
 	}()
 	select {

From 9ac1125e418de3a75569ef4d11df474e954b5ad6 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Wed, 25 Sep 2019 11:56:07 +0100
Subject: [PATCH 55/72] lxd/instance/instance: Adds instance generic loader
 functions

- Also defines some linked function endpoints.
- Adds WriteBackupFile function.

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/instance/instance.go | 310 +++++++++++++++++++++++++++++++++++----
 1 file changed, 280 insertions(+), 30 deletions(-)

diff --git a/lxd/instance/instance.go b/lxd/instance/instance.go
index 90e73ea918..ef0186fae5 100644
--- a/lxd/instance/instance.go
+++ b/lxd/instance/instance.go
@@ -1,52 +1,302 @@
 package instance
 
 import (
-	"fmt"
+	"os"
+	"path/filepath"
+	"strings"
 
+	"github.com/pkg/errors"
+	yaml "gopkg.in/yaml.v2"
+
+	"github.com/lxc/lxd/lxd/db"
+	"github.com/lxc/lxd/lxd/instance/instancetype"
+	"github.com/lxc/lxd/lxd/state"
+	"github.com/lxc/lxd/shared"
 	"github.com/lxc/lxd/shared/api"
+	log "github.com/lxc/lxd/shared/log15"
+	"github.com/lxc/lxd/shared/logger"
 )
 
-// Type indicates the type of instance.
-type Type int
+var StorageVolumeFillDefault func(name string, config map[string]string, parentPool *api.StoragePool) error
+var StoragePoolVolumeContainerCreateInit func(s *state.State, project string, poolName string, containerName string) (Storage, error)
+var StoragePoolVolumeContainerLoadInit func(s *state.State, project, containerName string) (Storage, error)
+var EventSendLifecycle func(project, action, source string, context map[string]interface{}) error
+var NetworkUpdateStatic func(s *state.State, networkName string) error
+var DevLXDEventSend func(c Instance, eventType string, eventMessage interface{}) error
 
-const (
-	// TypeAny represents any type of instance.
-	TypeAny = Type(-1)
+// InstanceLoadAll Legacy interface.
+func InstanceLoadAll(s *state.State) ([]Instance, error) {
+	return instanceLoadByProject(s, "default")
+}
 
-	// TypeContainer represents a container instance type.
-	TypeContainer = Type(0)
+func instanceLoadByProject(s *state.State, project string) ([]Instance, error) {
+	// Get all the containers
+	var cts []db.Instance
+	err := s.Cluster.Transaction(func(tx *db.ClusterTx) error {
+		filter := db.InstanceFilter{
+			Project: project,
+			Type:    instancetype.Container,
+		}
+		var err error
+		cts, err = tx.InstanceList(filter)
+		if err != nil {
+			return err
+		}
 
-	// TypeVM represents a virtual-machine instance type.
-	TypeVM = Type(1)
-)
+		return nil
+	})
+	if err != nil {
+		return nil, err
+	}
+
+	return instanceLoadAllInternal(cts, s)
+}
+
+func instanceLoadAllInternal(dbInstances []db.Instance, s *state.State) ([]Instance, error) {
+	// Figure out what profiles are in use
+	profiles := map[string]map[string]api.Profile{}
+	for _, instArgs := range dbInstances {
+		projectProfiles, ok := profiles[instArgs.Project]
+		if !ok {
+			projectProfiles = map[string]api.Profile{}
+			profiles[instArgs.Project] = projectProfiles
+		}
+		for _, profile := range instArgs.Profiles {
+			_, ok := projectProfiles[profile]
+			if !ok {
+				projectProfiles[profile] = api.Profile{}
+			}
+		}
+	}
+
+	// Get the profile data
+	for project, projectProfiles := range profiles {
+		for name := range projectProfiles {
+			_, profile, err := s.Cluster.ProfileGet(project, name)
+			if err != nil {
+				return nil, err
+			}
+
+			projectProfiles[name] = *profile
+		}
+	}
+
+	// Load the instances structs
+	instances := []Instance{}
+	for _, dbInstance := range dbInstances {
+		// Figure out the instances's profiles
+		cProfiles := []api.Profile{}
+		for _, name := range dbInstance.Profiles {
+			cProfiles = append(cProfiles, profiles[dbInstance.Project][name])
+		}
+
+		if dbInstance.Type == instancetype.Container {
+			args := db.ContainerToArgs(&dbInstance)
+			ct, err := ContainerLXCLoad(s, args, cProfiles)
+			if err != nil {
+				return nil, err
+			}
+			instances = append(instances, ct)
+		} else {
+			// TODO add virtual machine load here.
+			continue
+		}
+
+	}
+
+	return instances, nil
+}
+
+func InstanceLoadByProjectAndName(s *state.State, project, name string) (Instance, error) {
+	// Get the DB record
+	var container *db.Instance
+	err := s.Cluster.Transaction(func(tx *db.ClusterTx) error {
+		var err error
+
+		if strings.Contains(name, shared.SnapshotDelimiter) {
+			parts := strings.SplitN(name, shared.SnapshotDelimiter, 2)
+			instanceName := parts[0]
+			snapshotName := parts[1]
+
+			instance, err := tx.InstanceGet(project, instanceName)
+			if err != nil {
+				return errors.Wrapf(err, "Failed to fetch instance %q in project %q", name, project)
+			}
+
+			snapshot, err := tx.InstanceSnapshotGet(project, instanceName, snapshotName)
+			if err != nil {
+				return errors.Wrapf(err, "Failed to fetch snapshot %q of instance %q in project %q", snapshotName, instanceName, project)
+			}
+
+			c := db.InstanceSnapshotToInstance(instance, snapshot)
+			container = &c
+		} else {
+			container, err = tx.InstanceGet(project, name)
+			if err != nil {
+				return errors.Wrapf(err, "Failed to fetch container %q in project %q", name, project)
+			}
+		}
+
+		return nil
+	})
+	if err != nil {
+		return nil, err
+	}
+
+	args := db.ContainerToArgs(container)
+
+	c, err := ContainerLXCLoad(s, args, nil)
+	if err != nil {
+		return nil, errors.Wrap(err, "Failed to load container")
+	}
+
+	return c, nil
+}
+
+func InstanceLoadById(s *state.State, id int) (Instance, error) {
+	// Get the DB record
+	project, name, err := s.Cluster.ContainerProjectAndName(id)
+	if err != nil {
+		return nil, err
+	}
+
+	return InstanceLoadByProjectAndName(s, project, name)
+}
 
-// New validates the supplied string against the allowed types of instance and returns the internal
-// representation of that type. If empty string is supplied then the type returned is TypeContainer.
-// If an invalid name is supplied an error will be returned.
-func New(name string) (Type, error) {
-	// If "container" or "" is supplied, return type as TypeContainer.
-	if api.InstanceType(name) == api.InstanceTypeContainer || name == "" {
-		return TypeContainer, nil
+func InstanceDeleteSnapshots(s *state.State, project, name string) error {
+	results, err := s.Cluster.ContainerGetSnapshots(project, name)
+	if err != nil {
+		return err
 	}
 
-	// If "virtual-machine" is supplied, return type as TypeVM.
-	if api.InstanceType(name) == api.InstanceTypeVM {
-		return TypeVM, nil
+	for _, sname := range results {
+		sc, err := InstanceLoadByProjectAndName(s, project, sname)
+		if err != nil {
+			logger.Error(
+				"InstanceDeleteSnapshots: Failed to load the snapshot container",
+				log.Ctx{"instance": name, "snapshot": sname, "err": err})
+
+			continue
+		}
+
+		if err := sc.Delete(); err != nil {
+			logger.Error(
+				"InstanceDeleteSnapshots: Failed to delete a snapshot container",
+				log.Ctx{"instance": name, "snapshot": sname, "err": err})
+		}
 	}
 
-	return -1, fmt.Errorf("Invalid instance type")
+	return nil
+}
+
+// Load all instances of this nodes.
+func InstanceLoadNodeAll(s *state.State) ([]Instance, error) {
+	// Get all the container arguments
+	var cts []db.Instance
+	err := s.Cluster.Transaction(func(tx *db.ClusterTx) error {
+		var err error
+		cts, err = tx.ContainerNodeList()
+		if err != nil {
+			return err
+		}
+
+		return nil
+	})
+	if err != nil {
+		return nil, err
+	}
+
+	return instanceLoadAllInternal(cts, s)
+}
+
+type BackupFile struct {
+	Container *api.Instance           `yaml:"container"`
+	Snapshots []*api.InstanceSnapshot `yaml:"snapshots"`
+	Pool      *api.StoragePool        `yaml:"pool"`
+	Volume    *api.StorageVolume      `yaml:"volume"`
 }
 
-// String converts the internal representation of instance type to a string used in API requests.
-// Returns empty string if value is not a valid instance type.
-func (instanceType Type) String() string {
-	if instanceType == TypeContainer {
-		return string(api.InstanceTypeContainer)
+func WriteBackupFile(c Instance) error {
+	// We only write backup files out for actual containers
+	if c.IsSnapshot() {
+		return nil
+	}
+
+	// Immediately return if the container directory doesn't exist yet
+	if !shared.PathExists(c.Path()) {
+		return os.ErrNotExist
+	}
+
+	// Generate the YAML
+	ci, _, err := c.Render()
+	if err != nil {
+		return errors.Wrap(err, "Failed to render container metadata")
+	}
+
+	snapshots, err := c.Snapshots()
+	if err != nil {
+		return errors.Wrap(err, "Failed to get snapshots")
+	}
+
+	var sis []*api.InstanceSnapshot
+
+	for _, s := range snapshots {
+		si, _, err := s.Render()
+		if err != nil {
+			return err
+		}
+
+		sis = append(sis, si.(*api.InstanceSnapshot))
+	}
+
+	poolName, err := c.StoragePool()
+	if err != nil {
+		return err
+	}
+
+	s := c.DaemonState()
+	poolID, pool, err := s.Cluster.StoragePoolGet(poolName)
+	if err != nil {
+		return err
+	}
+
+	_, volume, err := s.Cluster.StoragePoolNodeVolumeGetTypeByProject(c.Project(), c.Name(), db.StoragePoolVolumeTypeContainer, poolID)
+	if err != nil {
+		return err
+	}
+
+	data, err := yaml.Marshal(&BackupFile{
+		Container: ci.(*api.Instance),
+		Snapshots: sis,
+		Pool:      pool,
+		Volume:    volume,
+	})
+	if err != nil {
+		return err
+	}
+
+	// Ensure the container is currently mounted
+	if !shared.PathExists(c.RootfsPath()) {
+		logger.Debug("Unable to update backup.yaml at this time", log.Ctx{"name": c.Name(), "project": c.Project()})
+		return nil
+	}
+
+	// Write the YAML
+	f, err := os.Create(filepath.Join(c.Path(), "backup.yaml"))
+	if err != nil {
+		return err
+	}
+	defer f.Close()
+
+	err = f.Chmod(0400)
+	if err != nil {
+		return err
 	}
 
-	if instanceType == TypeVM {
-		return string(api.InstanceTypeVM)
+	err = shared.WriteAll(f, data)
+	if err != nil {
+		return err
 	}
 
-	return ""
+	return nil
 }

From 2a2db143d723d6272145103bd0a3157f2b07c9d1 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Wed, 25 Sep 2019 11:57:02 +0100
Subject: [PATCH 56/72] lxd/images: Updates use of moved types

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/images.go | 97 ++++++++++++++++++++++++++-------------------------
 1 file changed, 50 insertions(+), 47 deletions(-)

diff --git a/lxd/images.go b/lxd/images.go
index 330c6afbd3..01898b8677 100644
--- a/lxd/images.go
+++ b/lxd/images.go
@@ -27,9 +27,12 @@ import (
 
 	lxd "github.com/lxc/lxd/client"
 	"github.com/lxc/lxd/lxd/cluster"
+	"github.com/lxc/lxd/lxd/daemon"
 	"github.com/lxc/lxd/lxd/db"
 	"github.com/lxc/lxd/lxd/instance"
+	"github.com/lxc/lxd/lxd/instance/instancetype"
 	"github.com/lxc/lxd/lxd/node"
+	"github.com/lxc/lxd/lxd/operation"
 	"github.com/lxc/lxd/lxd/state"
 	"github.com/lxc/lxd/lxd/task"
 	"github.com/lxc/lxd/lxd/util"
@@ -103,10 +106,10 @@ var imageAliasCmd = APIEndpoint{
    end for whichever finishes last. */
 var imagePublishLock sync.Mutex
 
-func unpackImage(imagefname string, destpath string, sType storageType, runningInUserns bool, tracker *ioprogress.ProgressTracker) error {
+func unpackImage(imagefname string, destpath string, sType instance.StorageType, runningInUserns bool, tracker *ioprogress.ProgressTracker) error {
 	blockBackend := false
 
-	if sType == storageTypeLvm || sType == storageTypeCeph {
+	if sType == instance.StorageTypeLvm || sType == instance.StorageTypeCeph {
 		blockBackend = true
 	}
 
@@ -154,7 +157,7 @@ func compressFile(compress string, infile io.Reader, outfile io.Writer) error {
  * This function takes a container or snapshot from the local image server and
  * exports it as an image.
  */
-func imgPostContInfo(d *Daemon, r *http.Request, req api.ImagesPost, op *operation, builddir string) (*api.Image, error) {
+func imgPostContInfo(d *Daemon, r *http.Request, req api.ImagesPost, op *operation.Operation, builddir string) (*api.Image, error) {
 	info := api.Image{}
 	info.Type = "container"
 	info.Properties = map[string]string{}
@@ -186,7 +189,7 @@ func imgPostContInfo(d *Daemon, r *http.Request, req api.ImagesPost, op *operati
 		info.Public = false
 	}
 
-	c, err := instanceLoadByProjectAndName(d.State(), project, name)
+	c, err := instance.InstanceLoadByProjectAndName(d.State(), project, name)
 	if err != nil {
 		return nil, err
 	}
@@ -315,7 +318,7 @@ func imgPostContInfo(d *Daemon, r *http.Request, req api.ImagesPost, op *operati
 	return &info, nil
 }
 
-func imgPostRemoteInfo(d *Daemon, req api.ImagesPost, op *operation, project string) (*api.Image, error) {
+func imgPostRemoteInfo(d *Daemon, req api.ImagesPost, op *operation.Operation, project string) (*api.Image, error) {
 	var err error
 	var hash string
 
@@ -353,7 +356,7 @@ func imgPostRemoteInfo(d *Daemon, req api.ImagesPost, op *operation, project str
 	return info, nil
 }
 
-func imgPostURLInfo(d *Daemon, req api.ImagesPost, op *operation, project string) (*api.Image, error) {
+func imgPostURLInfo(d *Daemon, req api.ImagesPost, op *operation.Operation, project string) (*api.Image, error) {
 	var err error
 
 	if req.Source.URL == "" {
@@ -475,9 +478,9 @@ func getImgPostInfo(d *Daemon, r *http.Request, builddir string, project string,
 		}
 
 		if part.FormName() == "rootfs" {
-			info.Type = instance.TypeContainer.String()
+			info.Type = instancetype.Container.String()
 		} else if part.FormName() == "rootfs.img" {
-			info.Type = instance.TypeVM.String()
+			info.Type = instancetype.VM.String()
 		} else {
 			logger.Error("Invalid multipart image")
 			return nil, fmt.Errorf("Invalid multipart image")
@@ -640,7 +643,7 @@ func imageCreateInPool(d *Daemon, info *api.Image, storagePool string) error {
 	return nil
 }
 
-func imagesPost(d *Daemon, r *http.Request) Response {
+func imagesPost(d *Daemon, r *http.Request) daemon.Response {
 	instanceType, err := urlInstanceTypeDetect(r)
 	if err != nil {
 		return SmartError(err)
@@ -719,7 +722,7 @@ func imagesPost(d *Daemon, r *http.Request) Response {
 	}
 
 	// Begin background operation
-	run := func(op *operation) error {
+	run := func(op *operation.Operation) error {
 		var err error
 		var info *api.Image
 
@@ -785,7 +788,7 @@ func imagesPost(d *Daemon, r *http.Request) Response {
 		return nil
 	}
 
-	op, err := operationCreate(d.cluster, project, operationClassTask, db.OperationImageDownload, nil, nil, run, nil, nil)
+	op, err := operation.OperationCreate(d.cluster, project, operation.OperationClassTask, db.OperationImageDownload, nil, nil, run, nil, nil)
 	if err != nil {
 		cleanup(builddir, post)
 		return InternalError(err)
@@ -865,12 +868,12 @@ func getImageMetadata(fname string) (*api.ImageMetadata, string, error) {
 
 		if strings.HasPrefix(hdr.Name, "rootfs/") || strings.HasPrefix(hdr.Name, "./rootfs/") {
 			hasRoot = true
-			imageType = instance.TypeContainer.String()
+			imageType = instancetype.Container.String()
 		}
 
 		if hdr.Name == "rootfs.img" || hdr.Name == "./rootfs.img" {
 			hasRoot = true
-			imageType = instance.TypeVM.String()
+			imageType = instancetype.VM.String()
 		}
 
 		if hasMeta && hasRoot {
@@ -926,7 +929,7 @@ func doImagesGet(d *Daemon, recursion bool, project string, public bool) (interf
 	return resultMap, nil
 }
 
-func imagesGet(d *Daemon, r *http.Request) Response {
+func imagesGet(d *Daemon, r *http.Request) daemon.Response {
 	project := projectParam(r)
 	public := d.checkTrustedClient(r) != nil || AllowProjectPermission("images", "view")(d, r) != EmptySyncResponse
 
@@ -939,11 +942,11 @@ func imagesGet(d *Daemon, r *http.Request) Response {
 
 func autoUpdateImagesTask(d *Daemon) (task.Func, task.Schedule) {
 	f := func(ctx context.Context) {
-		opRun := func(op *operation) error {
+		opRun := func(op *operation.Operation) error {
 			return autoUpdateImages(ctx, d)
 		}
 
-		op, err := operationCreate(d.cluster, "", operationClassTask, db.OperationImagesUpdate, nil, nil, opRun, nil, nil)
+		op, err := operation.OperationCreate(d.cluster, "", operation.OperationClassTask, db.OperationImagesUpdate, nil, nil, opRun, nil, nil)
 		if err != nil {
 			logger.Error("Failed to start image update operation", log.Ctx{"err": err})
 			return
@@ -1040,7 +1043,7 @@ func autoUpdateImagesInProject(ctx context.Context, d *Daemon, project string) e
 
 // Update a single image.  The operation can be nil, if no progress tracking is needed.
 // Returns whether the image has been updated.
-func autoUpdateImage(d *Daemon, op *operation, id int, info *api.Image, project string) error {
+func autoUpdateImage(d *Daemon, op *operation.Operation, id int, info *api.Image, project string) error {
 	fingerprint := info.Fingerprint
 	_, source, err := d.cluster.ImageSourceGet(id)
 	if err != nil {
@@ -1167,11 +1170,11 @@ func autoUpdateImage(d *Daemon, op *operation, id int, info *api.Image, project
 
 func pruneExpiredImagesTask(d *Daemon) (task.Func, task.Schedule) {
 	f := func(ctx context.Context) {
-		opRun := func(op *operation) error {
+		opRun := func(op *operation.Operation) error {
 			return pruneExpiredImages(ctx, d)
 		}
 
-		op, err := operationCreate(d.cluster, "", operationClassTask, db.OperationImagesExpire, nil, nil, opRun, nil, nil)
+		op, err := operation.OperationCreate(d.cluster, "", operation.OperationClassTask, db.OperationImagesExpire, nil, nil, opRun, nil, nil)
 		if err != nil {
 			logger.Error("Failed to start expired image operation", log.Ctx{"err": err})
 			return
@@ -1220,7 +1223,7 @@ func pruneExpiredImagesTask(d *Daemon) (task.Func, task.Schedule) {
 }
 
 func pruneLeftoverImages(d *Daemon) {
-	opRun := func(op *operation) error {
+	opRun := func(op *operation.Operation) error {
 		// Get all images
 		images, err := d.cluster.ImagesGet("default", false)
 		if err != nil {
@@ -1249,7 +1252,7 @@ func pruneLeftoverImages(d *Daemon) {
 		return nil
 	}
 
-	op, err := operationCreate(d.cluster, "", operationClassTask, db.OperationImagesPruneLeftover, nil, nil, opRun, nil, nil)
+	op, err := operation.OperationCreate(d.cluster, "", operation.OperationClassTask, db.OperationImagesPruneLeftover, nil, nil, opRun, nil, nil)
 	if err != nil {
 		logger.Error("Failed to start image leftover cleanup operation", log.Ctx{"err": err})
 		return
@@ -1355,7 +1358,7 @@ func doDeleteImageFromPool(state *state.State, fingerprint string, storagePool s
 	return nil
 }
 
-func imageDelete(d *Daemon, r *http.Request) Response {
+func imageDelete(d *Daemon, r *http.Request) daemon.Response {
 	project := projectParam(r)
 	fingerprint := mux.Vars(r)["fingerprint"]
 
@@ -1451,7 +1454,7 @@ func imageDelete(d *Daemon, r *http.Request) Response {
 		return nil
 	}
 
-	rmimg := func(op *operation) error {
+	rmimg := func(op *operation.Operation) error {
 		if isClusterNotification(r) {
 			return deleteFromDisk()
 		}
@@ -1462,7 +1465,7 @@ func imageDelete(d *Daemon, r *http.Request) Response {
 	resources := map[string][]string{}
 	resources["images"] = []string{fingerprint}
 
-	op, err := operationCreate(d.cluster, project, operationClassTask, db.OperationImageDelete, resources, nil, rmimg, nil, nil)
+	op, err := operation.OperationCreate(d.cluster, project, operation.OperationClassTask, db.OperationImageDelete, resources, nil, rmimg, nil, nil)
 	if err != nil {
 		return InternalError(err)
 	}
@@ -1491,7 +1494,7 @@ func imageDeleteFromDisk(fingerprint string) {
 	}
 }
 
-func doImageGet(db *db.Cluster, project, fingerprint string, public bool) (*api.Image, Response) {
+func doImageGet(db *db.Cluster, project, fingerprint string, public bool) (*api.Image, daemon.Response) {
 	_, imgInfo, err := db.ImageGet(project, fingerprint, public, false)
 	if err != nil {
 		return nil, SmartError(err)
@@ -1501,12 +1504,12 @@ func doImageGet(db *db.Cluster, project, fingerprint string, public bool) (*api.
 }
 
 func imageValidSecret(fingerprint string, secret string) bool {
-	for _, op := range operations {
-		if op.resources == nil {
+	for _, op := range operation.Operations {
+		if op.Resources == nil {
 			continue
 		}
 
-		opImages, ok := op.resources["images"]
+		opImages, ok := op.Resources["images"]
 		if !ok {
 			continue
 		}
@@ -1515,7 +1518,7 @@ func imageValidSecret(fingerprint string, secret string) bool {
 			continue
 		}
 
-		opSecret, ok := op.metadata["secret"]
+		opSecret, ok := op.Metadata["secret"]
 		if !ok {
 			continue
 		}
@@ -1530,7 +1533,7 @@ func imageValidSecret(fingerprint string, secret string) bool {
 	return false
 }
 
-func imageGet(d *Daemon, r *http.Request) Response {
+func imageGet(d *Daemon, r *http.Request) daemon.Response {
 	project := projectParam(r)
 	fingerprint := mux.Vars(r)["fingerprint"]
 	public := d.checkTrustedClient(r) != nil || AllowProjectPermission("images", "view")(d, r) != EmptySyncResponse
@@ -1549,7 +1552,7 @@ func imageGet(d *Daemon, r *http.Request) Response {
 	return SyncResponseETag(true, info, etag)
 }
 
-func imagePut(d *Daemon, r *http.Request) Response {
+func imagePut(d *Daemon, r *http.Request) daemon.Response {
 	// Get current value
 	project := projectParam(r)
 	fingerprint := mux.Vars(r)["fingerprint"]
@@ -1578,7 +1581,7 @@ func imagePut(d *Daemon, r *http.Request) Response {
 	return EmptySyncResponse
 }
 
-func imagePatch(d *Daemon, r *http.Request) Response {
+func imagePatch(d *Daemon, r *http.Request) daemon.Response {
 	// Get current value
 	project := projectParam(r)
 	fingerprint := mux.Vars(r)["fingerprint"]
@@ -1645,7 +1648,7 @@ func imagePatch(d *Daemon, r *http.Request) Response {
 	return EmptySyncResponse
 }
 
-func imageAliasesPost(d *Daemon, r *http.Request) Response {
+func imageAliasesPost(d *Daemon, r *http.Request) daemon.Response {
 	project := projectParam(r)
 	req := api.ImageAliasesPost{}
 	if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
@@ -1679,7 +1682,7 @@ func imageAliasesPost(d *Daemon, r *http.Request) Response {
 	return SyncResponseLocation(true, nil, fmt.Sprintf("/%s/images/aliases/%s", version.APIVersion, req.Name))
 }
 
-func imageAliasesGet(d *Daemon, r *http.Request) Response {
+func imageAliasesGet(d *Daemon, r *http.Request) daemon.Response {
 	project := projectParam(r)
 	recursion := util.IsRecursionRequest(r)
 
@@ -1710,7 +1713,7 @@ func imageAliasesGet(d *Daemon, r *http.Request) Response {
 	return SyncResponse(true, responseMap)
 }
 
-func imageAliasGet(d *Daemon, r *http.Request) Response {
+func imageAliasGet(d *Daemon, r *http.Request) daemon.Response {
 	project := projectParam(r)
 	name := mux.Vars(r)["name"]
 	public := d.checkTrustedClient(r) != nil || AllowProjectPermission("images", "view")(d, r) != EmptySyncResponse
@@ -1723,7 +1726,7 @@ func imageAliasGet(d *Daemon, r *http.Request) Response {
 	return SyncResponseETag(true, alias, alias)
 }
 
-func imageAliasDelete(d *Daemon, r *http.Request) Response {
+func imageAliasDelete(d *Daemon, r *http.Request) daemon.Response {
 	project := projectParam(r)
 	name := mux.Vars(r)["name"]
 	_, _, err := d.cluster.ImageAliasGet(project, name, true)
@@ -1739,7 +1742,7 @@ func imageAliasDelete(d *Daemon, r *http.Request) Response {
 	return EmptySyncResponse
 }
 
-func imageAliasPut(d *Daemon, r *http.Request) Response {
+func imageAliasPut(d *Daemon, r *http.Request) daemon.Response {
 	// Get current value
 	project := projectParam(r)
 	name := mux.Vars(r)["name"]
@@ -1776,7 +1779,7 @@ func imageAliasPut(d *Daemon, r *http.Request) Response {
 	return EmptySyncResponse
 }
 
-func imageAliasPatch(d *Daemon, r *http.Request) Response {
+func imageAliasPatch(d *Daemon, r *http.Request) daemon.Response {
 	// Get current value
 	project := projectParam(r)
 	name := mux.Vars(r)["name"]
@@ -1829,7 +1832,7 @@ func imageAliasPatch(d *Daemon, r *http.Request) Response {
 	return EmptySyncResponse
 }
 
-func imageAliasPost(d *Daemon, r *http.Request) Response {
+func imageAliasPost(d *Daemon, r *http.Request) daemon.Response {
 	project := projectParam(r)
 	name := mux.Vars(r)["name"]
 
@@ -1857,7 +1860,7 @@ func imageAliasPost(d *Daemon, r *http.Request) Response {
 	return SyncResponseLocation(true, nil, fmt.Sprintf("/%s/images/aliases/%s", version.APIVersion, req.Name))
 }
 
-func imageExport(d *Daemon, r *http.Request) Response {
+func imageExport(d *Daemon, r *http.Request) daemon.Response {
 	project := projectParam(r)
 	fingerprint := mux.Vars(r)["fingerprint"]
 
@@ -1941,7 +1944,7 @@ func imageExport(d *Daemon, r *http.Request) Response {
 	return FileResponse(r, files, nil, false)
 }
 
-func imageSecret(d *Daemon, r *http.Request) Response {
+func imageSecret(d *Daemon, r *http.Request) daemon.Response {
 	project := projectParam(r)
 	fingerprint := mux.Vars(r)["fingerprint"]
 	_, imgInfo, err := d.cluster.ImageGet(project, fingerprint, false, false)
@@ -1961,7 +1964,7 @@ func imageSecret(d *Daemon, r *http.Request) Response {
 	resources := map[string][]string{}
 	resources["images"] = []string{imgInfo.Fingerprint}
 
-	op, err := operationCreate(d.cluster, project, operationClassToken, db.OperationImageToken, resources, meta, nil, nil, nil)
+	op, err := operation.OperationCreate(d.cluster, project, operation.OperationClassToken, db.OperationImageToken, resources, meta, nil, nil, nil)
 	if err != nil {
 		return InternalError(err)
 	}
@@ -2025,7 +2028,7 @@ func imageImportFromNode(imagesDir string, client lxd.InstanceServer, fingerprin
 	return nil
 }
 
-func imageRefresh(d *Daemon, r *http.Request) Response {
+func imageRefresh(d *Daemon, r *http.Request) daemon.Response {
 	project := projectParam(r)
 	fingerprint := mux.Vars(r)["fingerprint"]
 	imageId, imageInfo, err := d.cluster.ImageGet(project, fingerprint, false, false)
@@ -2034,11 +2037,11 @@ func imageRefresh(d *Daemon, r *http.Request) Response {
 	}
 
 	// Begin background operation
-	run := func(op *operation) error {
+	run := func(op *operation.Operation) error {
 		return autoUpdateImage(d, op, imageId, imageInfo, project)
 	}
 
-	op, err := operationCreate(d.cluster, project, operationClassTask, db.OperationImageRefresh, nil, nil, run, nil, nil)
+	op, err := operation.OperationCreate(d.cluster, project, operation.OperationClassTask, db.OperationImageRefresh, nil, nil, run, nil, nil)
 	if err != nil {
 		return InternalError(err)
 	}
@@ -2067,11 +2070,11 @@ func autoSyncImagesTask(d *Daemon) (task.Func, task.Schedule) {
 			return
 		}
 
-		opRun := func(op *operation) error {
+		opRun := func(op *operation.Operation) error {
 			return autoSyncImages(ctx, d)
 		}
 
-		op, err := operationCreate(d.cluster, "", operationClassTask, db.OperationImagesSynchronize, nil, nil, opRun, nil, nil)
+		op, err := operation.OperationCreate(d.cluster, "", operation.OperationClassTask, db.OperationImagesSynchronize, nil, nil, opRun, nil, nil)
 		if err != nil {
 			logger.Error("Failed to start image synchronization operation", log.Ctx{"err": err})
 			return

From f6d1e1157c75b4819affc0d4b87c1e28fe32a1b9 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Wed, 25 Sep 2019 11:57:35 +0100
Subject: [PATCH 57/72] lxd/events: Adds linked functions

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/events.go | 6 ++++++
 1 file changed, 6 insertions(+)

diff --git a/lxd/events.go b/lxd/events.go
index 8040d20a4a..2f6bd8056e 100644
--- a/lxd/events.go
+++ b/lxd/events.go
@@ -18,6 +18,12 @@ import (
 	"github.com/lxc/lxd/shared/logger"
 )
 
+func init() {
+	instance.EventSendLifecycle = eventSendLifecycle
+	operation.EventSend = eventSend
+
+}
+
 var eventsCmd = APIEndpoint{
 	Path: "events",
 

From 3a89d68c3b71ea9a8b332568a09395889ca2befe Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Wed, 25 Sep 2019 11:57:50 +0100
Subject: [PATCH 58/72] lxd/events: Updates use of moved types

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/events.go | 9 ++++++---
 1 file changed, 6 insertions(+), 3 deletions(-)

diff --git a/lxd/events.go b/lxd/events.go
index 2f6bd8056e..6be6ee23d3 100644
--- a/lxd/events.go
+++ b/lxd/events.go
@@ -12,7 +12,10 @@ import (
 	log "github.com/lxc/lxd/shared/log15"
 	"github.com/pborman/uuid"
 
+	"github.com/lxc/lxd/lxd/daemon"
 	"github.com/lxc/lxd/lxd/db"
+	"github.com/lxc/lxd/lxd/instance"
+	"github.com/lxc/lxd/lxd/operation"
 	"github.com/lxc/lxd/shared"
 	"github.com/lxc/lxd/shared/api"
 	"github.com/lxc/lxd/shared/logger"
@@ -148,7 +151,7 @@ func eventsSocket(d *Daemon, r *http.Request, w http.ResponseWriter) error {
 	return nil
 }
 
-func eventsGet(d *Daemon, r *http.Request) Response {
+func eventsGet(d *Daemon, r *http.Request) daemon.Response {
 	return &eventsServe{req: r, d: d}
 }
 
@@ -244,11 +247,11 @@ func eventForward(id int64, event api.Event) {
 			return
 		}
 
-		if !debug && logEntry.Level == "dbug" {
+		if !daemon.Debug && logEntry.Level == "dbug" {
 			return
 		}
 
-		if !debug && !verbose && logEntry.Level == "info" {
+		if !daemon.Debug && !daemon.Verbose && logEntry.Level == "info" {
 			return
 		}
 	}

From 1ed2aa66d311d426692a8c24c1d57bb8bf19f646 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Wed, 25 Sep 2019 11:58:35 +0100
Subject: [PATCH 59/72] lxd/devlxd: Links devlxdEventSend to instance package

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/devlxd.go | 4 ++++
 1 file changed, 4 insertions(+)

diff --git a/lxd/devlxd.go b/lxd/devlxd.go
index 7ceeb96685..cb7ec5671c 100644
--- a/lxd/devlxd.go
+++ b/lxd/devlxd.go
@@ -26,6 +26,10 @@ import (
 	"github.com/lxc/lxd/shared/version"
 )
 
+func init() {
+	instance.DevLXDEventSend = devlxdEventSend
+}
+
 // DevLxdServer creates an http.Server capable of handling requests against the
 // /dev/lxd Unix socket endpoint created inside containers.
 func DevLxdServer(d *Daemon) *http.Server {

From 04060750b3326dadb726c25c259a5f3f2815eea7 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Wed, 25 Sep 2019 11:58:49 +0100
Subject: [PATCH 60/72] lxd/devlxd: Updates use of moved types

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/devlxd.go | 14 ++++++++------
 1 file changed, 8 insertions(+), 6 deletions(-)

diff --git a/lxd/devlxd.go b/lxd/devlxd.go
index cb7ec5671c..c735166efc 100644
--- a/lxd/devlxd.go
+++ b/lxd/devlxd.go
@@ -19,7 +19,9 @@ import (
 	"github.com/gorilla/websocket"
 	"github.com/pborman/uuid"
 
+	"github.com/lxc/lxd/lxd/daemon"
 	"github.com/lxc/lxd/lxd/instance"
+	"github.com/lxc/lxd/lxd/instance/instancetype"
 	"github.com/lxc/lxd/lxd/util"
 	"github.com/lxc/lxd/shared"
 	"github.com/lxc/lxd/shared/logger"
@@ -145,7 +147,7 @@ var devlxdEventsGet = devLxdHandler{"/1.0/events", func(d *Daemon, c container,
 	return &devLxdResponse{"websocket", http.StatusOK, "websocket"}
 }}
 
-func devlxdEventSend(c container, eventType string, eventMessage interface{}) error {
+func devlxdEventSend(c instance.Instance, eventType string, eventMessage interface{}) error {
 	event := shared.Jmap{}
 	event["type"] = eventType
 	event["timestamp"] = time.Now()
@@ -252,7 +254,7 @@ func hoistReq(f func(*Daemon, container, http.ResponseWriter, *http.Request) *de
 			http.Error(w, fmt.Sprintf("%s", resp.content), resp.code)
 		} else if resp.ctype == "json" {
 			w.Header().Set("Content-Type", "application/json")
-			util.WriteJSON(w, resp.content, debug)
+			util.WriteJSON(w, resp.content, daemon.Debug)
 		} else if resp.ctype != "websocket" {
 			w.Header().Set("Content-Type", "application/octet-stream")
 			fmt.Fprintf(w, resp.content.(string))
@@ -449,12 +451,12 @@ func findContainerForPid(pid int32, d *Daemon) (container, error) {
 				project = strings.Split(name, "_")[0]
 			}
 
-			inst, err := instanceLoadByProjectAndName(d.State(), project, name)
+			inst, err := instance.InstanceLoadByProjectAndName(d.State(), project, name)
 			if err != nil {
 				return nil, err
 			}
 
-			if inst.Type() != instance.TypeContainer {
+			if inst.Type() != instancetype.Container {
 				return nil, fmt.Errorf("Instance is not container type")
 			}
 
@@ -487,13 +489,13 @@ func findContainerForPid(pid int32, d *Daemon) (container, error) {
 		return nil, err
 	}
 
-	instances, err := instanceLoadNodeAll(d.State())
+	instances, err := instance.InstanceLoadNodeAll(d.State())
 	if err != nil {
 		return nil, err
 	}
 
 	for _, inst := range instances {
-		if inst.Type() != instance.TypeContainer {
+		if inst.Type() != instancetype.Container {
 			continue
 		}
 

From cf3df582e2a4a3caaa349be549b9e3fdf445ede2 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Wed, 25 Sep 2019 11:59:40 +0100
Subject: [PATCH 61/72] lxd/db*: Updates use of moved types

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/db/containers.go       | 32 ++++++++++++++++----------------
 lxd/db/containers_test.go  | 20 ++++++++++----------
 lxd/db/images.go           | 14 +++++++-------
 lxd/db/instances.mapper.go |  4 ++--
 lxd/db/snapshots_test.go   |  6 +++---
 5 files changed, 38 insertions(+), 38 deletions(-)

diff --git a/lxd/db/containers.go b/lxd/db/containers.go
index 9bda7f6e3f..45aa647c3a 100644
--- a/lxd/db/containers.go
+++ b/lxd/db/containers.go
@@ -10,7 +10,7 @@ import (
 	"github.com/lxc/lxd/lxd/db/query"
 	"github.com/lxc/lxd/lxd/device/config"
 	deviceConfig "github.com/lxc/lxd/lxd/device/config"
-	"github.com/lxc/lxd/lxd/instance"
+	"github.com/lxc/lxd/lxd/instance/instancetype"
 	"github.com/lxc/lxd/shared"
 	"github.com/lxc/lxd/shared/api"
 	"github.com/lxc/lxd/shared/logger"
@@ -70,7 +70,7 @@ type Instance struct {
 	Project      string `db:"primary=yes&join=projects.name"`
 	Name         string `db:"primary=yes"`
 	Node         string `db:"join=nodes.name"`
-	Type         instance.Type
+	Type         instancetype.Type
 	Snapshot     bool
 	Architecture int
 	Ephemeral    bool
@@ -89,7 +89,7 @@ type InstanceFilter struct {
 	Project string
 	Name    string
 	Node    string
-	Type    instance.Type
+	Type    instancetype.Type
 }
 
 // ContainerToArgs is a convenience to convert the new Container db struct into
@@ -127,7 +127,7 @@ type ContainerArgs struct {
 	// Don't set manually
 	ID       int
 	Node     string
-	Type     instance.Type
+	Type     instancetype.Type
 	Snapshot bool
 
 	// Creation only
@@ -168,14 +168,14 @@ SELECT instances.name FROM instances
   JOIN projects ON projects.id = instances.project_id
   WHERE projects.name = ? AND instances.type = ?
 `
-	return query.SelectStrings(c.tx, stmt, project, instance.TypeContainer)
+	return query.SelectStrings(c.tx, stmt, project, instancetype.Container)
 }
 
 // ContainerNodeAddress returns the address of the node hosting the container
 // with the given name in the given project.
 //
 // It returns the empty string if the container is hosted on this node.
-func (c *ClusterTx) ContainerNodeAddress(project string, name string, instanceType instance.Type) (string, error) {
+func (c *ClusterTx) ContainerNodeAddress(project string, name string, instanceType instancetype.Type) (string, error) {
 	var stmt string
 
 	args := make([]interface{}, 0, 4) // Expect up to 4 filters.
@@ -186,7 +186,7 @@ func (c *ClusterTx) ContainerNodeAddress(project string, name string, instanceTy
 	args = append(args, project)
 
 	// Instance type filter.
-	if instanceType != instance.TypeAny {
+	if instanceType != instancetype.Any {
 		filters.WriteString(" AND instances.type = ?")
 		args = append(args, instanceType)
 	}
@@ -264,7 +264,7 @@ SELECT nodes.id, nodes.address
 // string, to distinguish it from remote nodes.
 //
 // Containers whose node is down are addeded to the special address "0.0.0.0".
-func (c *ClusterTx) ContainersListByNodeAddress(project string, instanceType instance.Type) (map[string][]string, error) {
+func (c *ClusterTx) ContainersListByNodeAddress(project string, instanceType instancetype.Type) (map[string][]string, error) {
 	offlineThreshold, err := c.NodeOfflineThreshold()
 	if err != nil {
 		return nil, err
@@ -278,7 +278,7 @@ func (c *ClusterTx) ContainersListByNodeAddress(project string, instanceType ins
 	args = append(args, project)
 
 	// Instance type filter.
-	if instanceType != instance.TypeAny {
+	if instanceType != instancetype.Any {
 		filters.WriteString(" AND instances.type = ?")
 		args = append(args, instanceType)
 	}
@@ -364,7 +364,7 @@ func (c *ClusterTx) ContainerListExpanded() ([]Instance, error) {
 
 // ContainersByNodeName returns a map associating each container to the name of
 // its node.
-func (c *ClusterTx) ContainersByNodeName(project string, instanceType instance.Type) (map[string]string, error) {
+func (c *ClusterTx) ContainersByNodeName(project string, instanceType instancetype.Type) (map[string]string, error) {
 	args := make([]interface{}, 0, 2) // Expect up to 2 filters.
 	var filters strings.Builder
 
@@ -373,7 +373,7 @@ func (c *ClusterTx) ContainersByNodeName(project string, instanceType instance.T
 	args = append(args, project)
 
 	// Instance type filter.
-	if instanceType != instance.TypeAny {
+	if instanceType != instancetype.Any {
 		filters.WriteString(" AND instances.type = ?")
 		args = append(args, instanceType)
 	}
@@ -532,14 +532,14 @@ func (c *ClusterTx) ContainerNodeList() ([]Instance, error) {
 	}
 	filter := InstanceFilter{
 		Node: node,
-		Type: instance.TypeContainer,
+		Type: instancetype.Container,
 	}
 
 	return c.InstanceList(filter)
 }
 
 // ContainerNodeProjectList returns all container objects on the local node within the given project.
-func (c *ClusterTx) ContainerNodeProjectList(project string, instanceType instance.Type) ([]Instance, error) {
+func (c *ClusterTx) ContainerNodeProjectList(project string, instanceType instancetype.Type) ([]Instance, error) {
 	node, err := c.NodeName()
 	if err != nil {
 		return nil, errors.Wrap(err, "Local node name")
@@ -839,7 +839,7 @@ func (c *Cluster) ContainerConfig(id int) (map[string]string, error) {
 // use it for new code.
 func (c *Cluster) LegacyContainersList() ([]string, error) {
 	q := fmt.Sprintf("SELECT name FROM instances WHERE type=? ORDER BY name")
-	inargs := []interface{}{instance.TypeContainer}
+	inargs := []interface{}{instancetype.Container}
 	var container string
 	outfmt := []interface{}{container}
 	result, err := queryScan(c.db, q, inargs, outfmt)
@@ -866,7 +866,7 @@ FROM instances_snapshots
 JOIN instances ON instances.id = instances_snapshots.instance_id
 WHERE type=? ORDER BY instances.name, instances_snapshots.name
 `)
-	inargs := []interface{}{instance.TypeContainer}
+	inargs := []interface{}{instancetype.Container}
 	var container string
 	var snapshot string
 	outfmt := []interface{}{container, snapshot}
@@ -885,7 +885,7 @@ WHERE type=? ORDER BY instances.name, instances_snapshots.name
 
 // ContainersNodeList returns the names of all the containers of the given type
 // running on the local node.
-func (c *Cluster) ContainersNodeList(instanceType instance.Type) ([]string, error) {
+func (c *Cluster) ContainersNodeList(instanceType instancetype.Type) ([]string, error) {
 	q := fmt.Sprintf("SELECT name FROM instances WHERE type=? AND node_id=? ORDER BY name")
 	inargs := []interface{}{instanceType, c.nodeID}
 	var container string
diff --git a/lxd/db/containers_test.go b/lxd/db/containers_test.go
index ef829b9e37..be914912bb 100644
--- a/lxd/db/containers_test.go
+++ b/lxd/db/containers_test.go
@@ -8,7 +8,7 @@ import (
 	"github.com/stretchr/testify/require"
 
 	"github.com/lxc/lxd/lxd/db"
-	"github.com/lxc/lxd/lxd/instance"
+	"github.com/lxc/lxd/lxd/instance/instancetype"
 	"github.com/lxc/lxd/shared/api"
 )
 
@@ -32,7 +32,7 @@ func TestContainerList(t *testing.T) {
 	addContainerDevice(t, tx, "c2", "eth0", "nic", nil)
 	addContainerDevice(t, tx, "c3", "root", "disk", map[string]string{"x": "y"})
 
-	filter := db.InstanceFilter{Type: instance.TypeContainer}
+	filter := db.InstanceFilter{Type: instancetype.Container}
 	containers, err := tx.InstanceList(filter)
 	require.NoError(t, err)
 	assert.Len(t, containers, 3)
@@ -74,7 +74,7 @@ func TestContainerList_FilterByNode(t *testing.T) {
 	filter := db.InstanceFilter{
 		Project: "default",
 		Node:    "node2",
-		Type:    instance.TypeContainer,
+		Type:    instancetype.Container,
 	}
 
 	containers, err := tx.InstanceList(filter)
@@ -119,7 +119,7 @@ func TestInstanceList_ContainerWithSameNameInDifferentProjects(t *testing.T) {
 		Project:      "blah",
 		Name:         "c1",
 		Node:         "none",
-		Type:         instance.TypeContainer,
+		Type:         instancetype.Container,
 		Architecture: 1,
 		Ephemeral:    false,
 		Stateful:     true,
@@ -134,7 +134,7 @@ func TestInstanceList_ContainerWithSameNameInDifferentProjects(t *testing.T) {
 		Project:      "test",
 		Name:         "c1",
 		Node:         "none",
-		Type:         instance.TypeContainer,
+		Type:         instancetype.Container,
 		Architecture: 1,
 		Ephemeral:    false,
 		Stateful:     true,
@@ -173,7 +173,7 @@ func TestInstanceListExpanded(t *testing.T) {
 		Project:      "default",
 		Name:         "c1",
 		Node:         "none",
-		Type:         instance.TypeContainer,
+		Type:         instancetype.Container,
 		Architecture: 1,
 		Ephemeral:    false,
 		Stateful:     true,
@@ -313,7 +313,7 @@ func TestContainersListByNodeAddress(t *testing.T) {
 	addContainer(t, tx, nodeID3, "c3")
 	addContainer(t, tx, nodeID2, "c4")
 
-	result, err := tx.ContainersListByNodeAddress("default", instance.TypeContainer)
+	result, err := tx.ContainersListByNodeAddress("default", instancetype.Container)
 	require.NoError(t, err)
 	assert.Equal(
 		t,
@@ -337,7 +337,7 @@ func TestContainersByNodeName(t *testing.T) {
 	addContainer(t, tx, nodeID2, "c1")
 	addContainer(t, tx, nodeID1, "c2")
 
-	result, err := tx.ContainersByNodeName("default", instance.TypeContainer)
+	result, err := tx.ContainersByNodeName("default", instancetype.Container)
 	require.NoError(t, err)
 	assert.Equal(
 		t,
@@ -398,7 +398,7 @@ func TestContainersNodeList(t *testing.T) {
 	})
 	require.NoError(t, err)
 
-	names, err := cluster.ContainersNodeList(instance.TypeContainer)
+	names, err := cluster.ContainersNodeList(instancetype.Container)
 	require.NoError(t, err)
 	assert.Equal(t, names, []string{"c1"})
 }
@@ -450,7 +450,7 @@ func addContainer(t *testing.T, tx *db.ClusterTx, nodeID int64, name string) {
 	stmt := `
 INSERT INTO instances(node_id, name, architecture, type, project_id) VALUES (?, ?, 1, ?, 1)
 `
-	_, err := tx.Tx().Exec(stmt, nodeID, name, instance.TypeContainer)
+	_, err := tx.Tx().Exec(stmt, nodeID, name, instancetype.Container)
 	require.NoError(t, err)
 }
 
diff --git a/lxd/db/images.go b/lxd/db/images.go
index 294ec0a348..c41c7a0852 100644
--- a/lxd/db/images.go
+++ b/lxd/db/images.go
@@ -8,7 +8,7 @@ import (
 	"github.com/pkg/errors"
 
 	"github.com/lxc/lxd/lxd/db/query"
-	"github.com/lxc/lxd/lxd/instance"
+	"github.com/lxc/lxd/lxd/instance/instancetype"
 	"github.com/lxc/lxd/shared/api"
 	"github.com/lxc/lxd/shared/osarch"
 )
@@ -157,10 +157,10 @@ func (c *Cluster) ImageSourceGet(imageID int) (int, api.ImageSource, error) {
 // cached image that matches the given remote details (server, protocol and
 // alias). Return the fingerprint linked to the matching entry, if any.
 func (c *Cluster) ImageSourceGetCachedFingerprint(server string, protocol string, alias string, typeName string) (string, error) {
-	imageType := instance.TypeAny
+	imageType := instancetype.Any
 	if typeName != "" {
 		var err error
-		imageType, err = instance.New(typeName)
+		imageType, err = instancetype.New(typeName)
 		if err != nil {
 			return "", err
 		}
@@ -431,7 +431,7 @@ func (c *Cluster) imageFill(id int, image *api.Image, create, expire, used, uplo
 	}
 
 	image.Architecture, _ = osarch.ArchitectureName(arch)
-	image.Type = instance.Type(imageType).String()
+	image.Type = instancetype.Type(imageType).String()
 
 	// The upload date is enforced by NOT NULL in the schema, so it can never be nil.
 	image.UploadedAt = *upload
@@ -647,7 +647,7 @@ func (c *Cluster) ImageAliasGet(project, name string, isTrustedClient bool) (int
 	entry.Name = name
 	entry.Target = fingerprint
 	entry.Description = description
-	entry.Type = instance.Type(imageType).String()
+	entry.Type = instancetype.Type(imageType).String()
 
 	return id, entry, nil
 }
@@ -807,10 +807,10 @@ func (c *Cluster) ImageInsert(project, fp string, fname string, sz int64, public
 		arch = 0
 	}
 
-	imageType := instance.TypeAny
+	imageType := instancetype.Any
 	if typeName != "" {
 		var err error
-		imageType, err = instance.New(typeName)
+		imageType, err = instancetype.New(typeName)
 		if err != nil {
 			return err
 		}
diff --git a/lxd/db/instances.mapper.go b/lxd/db/instances.mapper.go
index 74389ef9e7..2db0957c66 100644
--- a/lxd/db/instances.mapper.go
+++ b/lxd/db/instances.mapper.go
@@ -10,7 +10,7 @@ import (
 
 	"github.com/lxc/lxd/lxd/db/cluster"
 	"github.com/lxc/lxd/lxd/db/query"
-	"github.com/lxc/lxd/lxd/instance"
+	"github.com/lxc/lxd/lxd/instance/instancetype"
 	"github.com/lxc/lxd/shared/api"
 )
 
@@ -166,7 +166,7 @@ func (c *ClusterTx) InstanceList(filter InstanceFilter) ([]Instance, error) {
 	if filter.Node != "" {
 		criteria["Node"] = filter.Node
 	}
-	if filter.Type != instance.TypeAny {
+	if filter.Type != instancetype.Any {
 		criteria["Type"] = filter.Type
 	}
 
diff --git a/lxd/db/snapshots_test.go b/lxd/db/snapshots_test.go
index 0ef5121170..804ccf27f6 100644
--- a/lxd/db/snapshots_test.go
+++ b/lxd/db/snapshots_test.go
@@ -8,7 +8,7 @@ import (
 	"github.com/stretchr/testify/require"
 
 	"github.com/lxc/lxd/lxd/db"
-	"github.com/lxc/lxd/lxd/instance"
+	"github.com/lxc/lxd/lxd/instance/instancetype"
 	"github.com/lxc/lxd/shared/api"
 )
 
@@ -97,7 +97,7 @@ func TestInstanceSnapshotList_SameNameInDifferentProjects(t *testing.T) {
 		Project:      "default",
 		Name:         "i1",
 		Node:         "none",
-		Type:         instance.TypeContainer,
+		Type:         instancetype.Container,
 		Architecture: 1,
 		Ephemeral:    false,
 		Stateful:     true,
@@ -110,7 +110,7 @@ func TestInstanceSnapshotList_SameNameInDifferentProjects(t *testing.T) {
 		Project:      "p1",
 		Name:         "i1",
 		Node:         "none",
-		Type:         instance.TypeContainer,
+		Type:         instancetype.Container,
 		Architecture: 1,
 		Ephemeral:    false,
 		Stateful:     true,

From 808332f05a3817a7a64e6adc3bdd69f043d3070b Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Wed, 25 Sep 2019 12:00:28 +0100
Subject: [PATCH 62/72] lxd/daemon: Removes setupSharedMounts as moved to
 daemon package

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/daemon.go | 36 ------------------------------------
 1 file changed, 36 deletions(-)

diff --git a/lxd/daemon.go b/lxd/daemon.go
index d9a0f1d3a7..8e241b8374 100644
--- a/lxd/daemon.go
+++ b/lxd/daemon.go
@@ -471,42 +471,6 @@ func (d *Daemon) createCmd(restAPI *mux.Router, version string, c APIEndpoint) {
 	}
 }
 
-// have we setup shared mounts?
-var sharedMounted bool
-var sharedMountsLock sync.Mutex
-
-func setupSharedMounts() error {
-	// Check if we already went through this
-	if sharedMounted {
-		return nil
-	}
-
-	// Get a lock to prevent races
-	sharedMountsLock.Lock()
-	defer sharedMountsLock.Unlock()
-
-	// Check if already setup
-	path := shared.VarPath("shmounts")
-	if shared.IsMountPoint(path) {
-		sharedMounted = true
-		return nil
-	}
-
-	// Mount a new tmpfs
-	if err := unix.Mount("tmpfs", path, "tmpfs", 0, "size=100k,mode=0711"); err != nil {
-		return err
-	}
-
-	// Mark as MS_SHARED and MS_REC
-	var flags uintptr = unix.MS_SHARED | unix.MS_REC
-	if err := unix.Mount(path, path, "none", flags, ""); err != nil {
-		return err
-	}
-
-	sharedMounted = true
-	return nil
-}
-
 func (d *Daemon) Init() error {
 	err := d.init()
 

From 10d41e05e4006bd0b3e033afd622c2892d3581b9 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Wed, 25 Sep 2019 12:00:55 +0100
Subject: [PATCH 63/72] lxd/daemon*: Updates use of moved types

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/daemon.go        | 25 +++++++++++++------------
 lxd/daemon_images.go |  9 +++++----
 2 files changed, 18 insertions(+), 16 deletions(-)

diff --git a/lxd/daemon.go b/lxd/daemon.go
index 8e241b8374..d681ddd362 100644
--- a/lxd/daemon.go
+++ b/lxd/daemon.go
@@ -13,7 +13,6 @@ import (
 	"os"
 	"path/filepath"
 	"strings"
-	"sync"
 	"time"
 
 	"github.com/CanonicalLtd/candidclient"
@@ -29,9 +28,11 @@ import (
 	"gopkg.in/macaroon-bakery.v2/httpbakery"
 
 	"github.com/lxc/lxd/lxd/cluster"
+	"github.com/lxc/lxd/lxd/daemon"
 	"github.com/lxc/lxd/lxd/db"
 	"github.com/lxc/lxd/lxd/device"
 	"github.com/lxc/lxd/lxd/endpoints"
+	"github.com/lxc/lxd/lxd/instance"
 	"github.com/lxc/lxd/lxd/maas"
 	"github.com/lxc/lxd/lxd/node"
 	"github.com/lxc/lxd/lxd/rbac"
@@ -173,19 +174,19 @@ type APIEndpointAlias struct {
 
 // APIEndpointAction represents an action on an API endpoint.
 type APIEndpointAction struct {
-	Handler        func(d *Daemon, r *http.Request) Response
-	AccessHandler  func(d *Daemon, r *http.Request) Response
+	Handler        func(d *Daemon, r *http.Request) daemon.Response
+	AccessHandler  func(d *Daemon, r *http.Request) daemon.Response
 	AllowUntrusted bool
 }
 
 // AllowAuthenticated is a AccessHandler which allows all requests
-func AllowAuthenticated(d *Daemon, r *http.Request) Response {
+func AllowAuthenticated(d *Daemon, r *http.Request) daemon.Response {
 	return EmptySyncResponse
 }
 
 // AllowProjectPermission is a wrapper to check access against the project, its features and RBAC permission
-func AllowProjectPermission(feature string, permission string) func(d *Daemon, r *http.Request) Response {
-	return func(d *Daemon, r *http.Request) Response {
+func AllowProjectPermission(feature string, permission string) func(d *Daemon, r *http.Request) daemon.Response {
+	return func(d *Daemon, r *http.Request) daemon.Response {
 		// Shortcut for speed
 		if d.userIsAdmin(r) {
 			return EmptySyncResponse
@@ -402,7 +403,7 @@ func (d *Daemon) createCmd(restAPI *mux.Router, version string, c APIEndpoint) {
 		}
 
 		// Dump full request JSON when in debug mode
-		if debug && r.Method != "GET" && isJSONRequest(r) {
+		if daemon.Debug && r.Method != "GET" && isJSONRequest(r) {
 			newBody := &bytes.Buffer{}
 			captured := &bytes.Buffer{}
 			multiW := io.MultiWriter(newBody, captured)
@@ -416,10 +417,10 @@ func (d *Daemon) createCmd(restAPI *mux.Router, version string, c APIEndpoint) {
 		}
 
 		// Actually process the request
-		var resp Response
+		var resp daemon.Response
 		resp = NotImplemented(nil)
 
-		handleRequest := func(action APIEndpointAction) Response {
+		handleRequest := func(action APIEndpointAction) daemon.Response {
 			if action.Handler == nil {
 				return NotImplemented(nil)
 			}
@@ -604,7 +605,7 @@ func (d *Daemon) init() error {
 	/* Setup some mounts (nice to have) */
 	if !d.os.MockMode {
 		// Attempt to mount the shmounts tmpfs
-		setupSharedMounts()
+		daemon.SetupSharedMounts()
 
 		// Attempt to Mount the devlxd tmpfs
 		devlxd := filepath.Join(d.os.VarDir, "devlxd")
@@ -835,7 +836,7 @@ func (d *Daemon) init() error {
 
 		// Register devices on running instances to receive events.
 		// This should come after the event handler go routines have been started.
-		devicesRegister(d.State())
+		instance.DevicesRegister(d.State())
 
 		// Setup seccomp handler
 		if d.os.SeccompListener {
@@ -956,7 +957,7 @@ func (d *Daemon) Ready() error {
 }
 
 func (d *Daemon) numRunningContainers() (int, error) {
-	results, err := instanceLoadNodeAll(d.State())
+	results, err := instance.InstanceLoadNodeAll(d.State())
 	if err != nil {
 		return 0, err
 	}
diff --git a/lxd/daemon_images.go b/lxd/daemon_images.go
index d5a0f1e9a0..3641c3a30f 100644
--- a/lxd/daemon_images.go
+++ b/lxd/daemon_images.go
@@ -17,6 +17,7 @@ import (
 	"github.com/lxc/lxd/client"
 	"github.com/lxc/lxd/lxd/cluster"
 	"github.com/lxc/lxd/lxd/db"
+	"github.com/lxc/lxd/lxd/operation"
 	"github.com/lxc/lxd/lxd/sys"
 	"github.com/lxc/lxd/lxd/util"
 	"github.com/lxc/lxd/shared"
@@ -79,7 +80,7 @@ func imageGetStreamCache(d *Daemon) (map[string]*imageStreamCacheEntry, error) {
 }
 
 // ImageDownload resolves the image fingerprint and if not in the database, downloads it
-func (d *Daemon) ImageDownload(op *operation, server string, protocol string, certificate string, secret string, alias string, imageType string, forContainer bool, autoUpdate bool, storagePool string, preferCached bool, project string) (*api.Image, error) {
+func (d *Daemon) ImageDownload(op *operation.Operation, server string, protocol string, certificate string, secret string, alias string, imageType string, forContainer bool, autoUpdate bool, storagePool string, preferCached bool, project string) (*api.Image, error) {
 	var err error
 	var ctxMap log.Ctx
 
@@ -356,7 +357,7 @@ func (d *Daemon) ImageDownload(op *operation, server string, protocol string, ce
 	if op == nil {
 		ctxMap = log.Ctx{"alias": alias, "server": server}
 	} else {
-		ctxMap = log.Ctx{"trigger": op.url, "image": fp, "operation": op.id, "alias": alias, "server": server}
+		ctxMap = log.Ctx{"trigger": op.URL, "image": fp, "operation": op.ID, "alias": alias, "server": server}
 	}
 	logger.Info("Downloading image", ctxMap)
 
@@ -379,7 +380,7 @@ func (d *Daemon) ImageDownload(op *operation, server string, protocol string, ce
 			return
 		}
 
-		meta := op.metadata
+		meta := op.Metadata
 		if meta == nil {
 			meta = make(map[string]interface{})
 		}
@@ -393,7 +394,7 @@ func (d *Daemon) ImageDownload(op *operation, server string, protocol string, ce
 	var canceler *cancel.Canceler
 	if op != nil {
 		canceler = cancel.NewCanceler()
-		op.canceler = canceler
+		op.Canceler = canceler
 	}
 
 	if protocol == "lxd" || protocol == "simplestreams" {

From 5024031178924757393cc06cda159e3ba23a4cf7 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Wed, 25 Sep 2019 12:01:46 +0100
Subject: [PATCH 64/72] lxd/api*: Updates use of moved types

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/api_1.0.go      |  9 +++++----
 lxd/api_cluster.go  | 34 ++++++++++++++++++----------------
 lxd/api_internal.go | 44 +++++++++++++++++++++++---------------------
 lxd/api_project.go  | 22 ++++++++++++----------
 4 files changed, 58 insertions(+), 51 deletions(-)

diff --git a/lxd/api_1.0.go b/lxd/api_1.0.go
index 7339ca26b7..91027b60f5 100644
--- a/lxd/api_1.0.go
+++ b/lxd/api_1.0.go
@@ -11,6 +11,7 @@ import (
 	lxd "github.com/lxc/lxd/client"
 	"github.com/lxc/lxd/lxd/cluster"
 	"github.com/lxc/lxd/lxd/config"
+	"github.com/lxc/lxd/lxd/daemon"
 	"github.com/lxc/lxd/lxd/db"
 	"github.com/lxc/lxd/lxd/node"
 	"github.com/lxc/lxd/lxd/util"
@@ -83,7 +84,7 @@ var api10 = []APIEndpoint{
 	storagePoolVolumeTypeImageCmd,
 }
 
-func api10Get(d *Daemon, r *http.Request) Response {
+func api10Get(d *Daemon, r *http.Request) daemon.Response {
 	authMethods := []string{"tls"}
 	err := d.cluster.Transaction(func(tx *db.ClusterTx) error {
 		config, err := cluster.ConfigLoad(tx)
@@ -244,7 +245,7 @@ func api10Get(d *Daemon, r *http.Request) Response {
 	return SyncResponseETag(true, fullSrv, fullSrv.Config)
 }
 
-func api10Put(d *Daemon, r *http.Request) Response {
+func api10Put(d *Daemon, r *http.Request) daemon.Response {
 	// If a target was specified, forward the request to the relevant node.
 	response := ForwardedResponseIfTargetIsRemote(d, r)
 	if response != nil {
@@ -292,7 +293,7 @@ func api10Put(d *Daemon, r *http.Request) Response {
 	return doApi10Update(d, req, false)
 }
 
-func api10Patch(d *Daemon, r *http.Request) Response {
+func api10Patch(d *Daemon, r *http.Request) daemon.Response {
 	// If a target was specified, forward the request to the relevant node.
 	response := ForwardedResponseIfTargetIsRemote(d, r)
 	if response != nil {
@@ -320,7 +321,7 @@ func api10Patch(d *Daemon, r *http.Request) Response {
 	return doApi10Update(d, req, true)
 }
 
-func doApi10Update(d *Daemon, req api.ServerPut, patch bool) Response {
+func doApi10Update(d *Daemon, req api.ServerPut, patch bool) daemon.Response {
 	s := d.State()
 
 	// First deal with config specific to the local daemon
diff --git a/lxd/api_cluster.go b/lxd/api_cluster.go
index 6980defc7b..fef0f0f5f4 100644
--- a/lxd/api_cluster.go
+++ b/lxd/api_cluster.go
@@ -18,8 +18,10 @@ import (
 
 	lxd "github.com/lxc/lxd/client"
 	"github.com/lxc/lxd/lxd/cluster"
+	"github.com/lxc/lxd/lxd/daemon"
 	"github.com/lxc/lxd/lxd/db"
 	"github.com/lxc/lxd/lxd/node"
+	"github.com/lxc/lxd/lxd/operation"
 	storagedriver "github.com/lxc/lxd/lxd/storage"
 	"github.com/lxc/lxd/lxd/util"
 	"github.com/lxc/lxd/shared"
@@ -68,7 +70,7 @@ var internalClusterPromoteCmd = APIEndpoint{
 }
 
 // Return information about the cluster.
-func clusterGet(d *Daemon, r *http.Request) Response {
+func clusterGet(d *Daemon, r *http.Request) daemon.Response {
 	name := ""
 	err := d.cluster.Transaction(func(tx *db.ClusterTx) error {
 		var err error
@@ -169,7 +171,7 @@ func clusterGetMemberConfig(cluster *db.Cluster) ([]api.ClusterMemberConfigKey,
 // - disable clustering on a node
 //
 // The client is required to be trusted.
-func clusterPut(d *Daemon, r *http.Request) Response {
+func clusterPut(d *Daemon, r *http.Request) daemon.Response {
 	req := api.ClusterPut{}
 
 	// Parse the request
@@ -201,8 +203,8 @@ func clusterPut(d *Daemon, r *http.Request) Response {
 	return clusterPutJoin(d, req)
 }
 
-func clusterPutBootstrap(d *Daemon, req api.ClusterPut) Response {
-	run := func(op *operation) error {
+func clusterPutBootstrap(d *Daemon, req api.ClusterPut) daemon.Response {
+	run := func(op *operation.Operation) error {
 		// The default timeout when non-clustered is one minute, let's
 		// lower it down now that we'll likely have to make requests
 		// over the network.
@@ -250,7 +252,7 @@ func clusterPutBootstrap(d *Daemon, req api.ClusterPut) Response {
 		return nil
 	})
 
-	op, err := operationCreate(d.cluster, "", operationClassTask, db.OperationClusterBootstrap, resources, nil, run, nil, nil)
+	op, err := operation.OperationCreate(d.cluster, "", operation.OperationClassTask, db.OperationClusterBootstrap, resources, nil, run, nil, nil)
 	if err != nil {
 		return InternalError(err)
 	}
@@ -261,7 +263,7 @@ func clusterPutBootstrap(d *Daemon, req api.ClusterPut) Response {
 	return OperationResponse(op)
 }
 
-func clusterPutJoin(d *Daemon, req api.ClusterPut) Response {
+func clusterPutJoin(d *Daemon, req api.ClusterPut) daemon.Response {
 	// Make sure basic pre-conditions are met.
 	if len(req.ClusterCertificate) == 0 {
 		return BadRequest(fmt.Errorf("No target cluster node certificate provided"))
@@ -354,7 +356,7 @@ func clusterPutJoin(d *Daemon, req api.ClusterPut) Response {
 	fingerprint := cert.Fingerprint()
 
 	// Asynchronously join the cluster.
-	run := func(op *operation) error {
+	run := func(op *operation.Operation) error {
 		logger.Debug("Running cluster join operation")
 
 		// If the user has provided a cluster password, setup the trust
@@ -608,7 +610,7 @@ func clusterPutJoin(d *Daemon, req api.ClusterPut) Response {
 	resources := map[string][]string{}
 	resources["cluster"] = []string{}
 
-	op, err := operationCreate(d.cluster, "", operationClassTask, db.OperationClusterJoin, resources, nil, run, nil, nil)
+	op, err := operation.OperationCreate(d.cluster, "", operation.OperationClassTask, db.OperationClusterJoin, resources, nil, run, nil, nil)
 	if err != nil {
 		return InternalError(err)
 	}
@@ -617,7 +619,7 @@ func clusterPutJoin(d *Daemon, req api.ClusterPut) Response {
 }
 
 // Disable clustering on a node.
-func clusterPutDisable(d *Daemon) Response {
+func clusterPutDisable(d *Daemon) daemon.Response {
 	// Close the cluster database
 	err := d.cluster.Close()
 	if err != nil {
@@ -814,7 +816,7 @@ func clusterAcceptMember(
 	return info, nil
 }
 
-func clusterNodesGet(d *Daemon, r *http.Request) Response {
+func clusterNodesGet(d *Daemon, r *http.Request) daemon.Response {
 	recursion := util.IsRecursionRequest(r)
 
 	nodes, err := cluster.List(d.State())
@@ -837,7 +839,7 @@ func clusterNodesGet(d *Daemon, r *http.Request) Response {
 	return SyncResponse(true, result)
 }
 
-func clusterNodeGet(d *Daemon, r *http.Request) Response {
+func clusterNodeGet(d *Daemon, r *http.Request) daemon.Response {
 	name := mux.Vars(r)["name"]
 
 	nodes, err := cluster.List(d.State())
@@ -854,7 +856,7 @@ func clusterNodeGet(d *Daemon, r *http.Request) Response {
 	return NotFound(fmt.Errorf("Node '%s' not found", name))
 }
 
-func clusterNodePost(d *Daemon, r *http.Request) Response {
+func clusterNodePost(d *Daemon, r *http.Request) daemon.Response {
 	name := mux.Vars(r)["name"]
 
 	req := api.ClusterMemberPost{}
@@ -875,7 +877,7 @@ func clusterNodePost(d *Daemon, r *http.Request) Response {
 	return EmptySyncResponse
 }
 
-func clusterNodeDelete(d *Daemon, r *http.Request) Response {
+func clusterNodeDelete(d *Daemon, r *http.Request) daemon.Response {
 	force, err := strconv.Atoi(r.FormValue("force"))
 	if err != nil {
 		force = 0
@@ -973,7 +975,7 @@ func tryClusterRebalance(d *Daemon) error {
 	return nil
 }
 
-func internalClusterPostAccept(d *Daemon, r *http.Request) Response {
+func internalClusterPostAccept(d *Daemon, r *http.Request) daemon.Response {
 	req := internalClusterPostAcceptRequest{}
 
 	// Parse the request
@@ -1057,7 +1059,7 @@ type internalRaftNode struct {
 
 // Used to update the cluster after a database node has been removed, and
 // possibly promote another one as database node.
-func internalClusterPostRebalance(d *Daemon, r *http.Request) Response {
+func internalClusterPostRebalance(d *Daemon, r *http.Request) daemon.Response {
 	// Redirect all requests to the leader, which is the one with with
 	// up-to-date knowledge of what nodes are part of the raft cluster.
 	localAddress, err := node.ClusterAddress(d.db)
@@ -1145,7 +1147,7 @@ func internalClusterPostRebalance(d *Daemon, r *http.Request) Response {
 }
 
 // Used to promote the local non-database node to be a database one.
-func internalClusterPostPromote(d *Daemon, r *http.Request) Response {
+func internalClusterPostPromote(d *Daemon, r *http.Request) daemon.Response {
 	req := internalClusterPostPromoteRequest{}
 
 	// Parse the request
diff --git a/lxd/api_internal.go b/lxd/api_internal.go
index e6534ed8ef..8863f537aa 100644
--- a/lxd/api_internal.go
+++ b/lxd/api_internal.go
@@ -17,12 +17,14 @@ import (
 	"github.com/pkg/errors"
 	"gopkg.in/yaml.v2"
 
+	"github.com/lxc/lxd/lxd/daemon"
 	"github.com/lxc/lxd/lxd/db"
 	"github.com/lxc/lxd/lxd/db/cluster"
 	"github.com/lxc/lxd/lxd/db/node"
 	"github.com/lxc/lxd/lxd/db/query"
 	deviceConfig "github.com/lxc/lxd/lxd/device/config"
 	"github.com/lxc/lxd/lxd/instance"
+	"github.com/lxc/lxd/lxd/instance/instancetype"
 	"github.com/lxc/lxd/lxd/project"
 	driver "github.com/lxc/lxd/lxd/storage"
 	"github.com/lxc/lxd/shared"
@@ -103,7 +105,7 @@ var internalRAFTSnapshotCmd = APIEndpoint{
 	Get: APIEndpointAction{Handler: internalRAFTSnapshot},
 }
 
-func internalWaitReady(d *Daemon, r *http.Request) Response {
+func internalWaitReady(d *Daemon, r *http.Request) daemon.Response {
 	select {
 	case <-d.readyChan:
 	default:
@@ -113,24 +115,24 @@ func internalWaitReady(d *Daemon, r *http.Request) Response {
 	return EmptySyncResponse
 }
 
-func internalShutdown(d *Daemon, r *http.Request) Response {
+func internalShutdown(d *Daemon, r *http.Request) daemon.Response {
 	d.shutdownChan <- struct{}{}
 
 	return EmptySyncResponse
 }
 
-func internalContainerOnStart(d *Daemon, r *http.Request) Response {
+func internalContainerOnStart(d *Daemon, r *http.Request) daemon.Response {
 	id, err := strconv.Atoi(mux.Vars(r)["id"])
 	if err != nil {
 		return SmartError(err)
 	}
 
-	inst, err := instanceLoadById(d.State(), id)
+	inst, err := instance.InstanceLoadById(d.State(), id)
 	if err != nil {
 		return SmartError(err)
 	}
 
-	if inst.Type() != instance.TypeContainer {
+	if inst.Type() != instancetype.Container {
 		return SmartError(fmt.Errorf("Instance is not container type"))
 	}
 
@@ -144,7 +146,7 @@ func internalContainerOnStart(d *Daemon, r *http.Request) Response {
 	return EmptySyncResponse
 }
 
-func internalContainerOnStopNS(d *Daemon, r *http.Request) Response {
+func internalContainerOnStopNS(d *Daemon, r *http.Request) daemon.Response {
 	id, err := strconv.Atoi(mux.Vars(r)["id"])
 	if err != nil {
 		return SmartError(err)
@@ -156,12 +158,12 @@ func internalContainerOnStopNS(d *Daemon, r *http.Request) Response {
 	}
 	netns := queryParam(r, "netns")
 
-	inst, err := instanceLoadById(d.State(), id)
+	inst, err := instance.InstanceLoadById(d.State(), id)
 	if err != nil {
 		return SmartError(err)
 	}
 
-	if inst.Type() != instance.TypeContainer {
+	if inst.Type() != instancetype.Container {
 		return SmartError(fmt.Errorf("Instance is not container type"))
 	}
 
@@ -175,7 +177,7 @@ func internalContainerOnStopNS(d *Daemon, r *http.Request) Response {
 	return EmptySyncResponse
 }
 
-func internalContainerOnStop(d *Daemon, r *http.Request) Response {
+func internalContainerOnStop(d *Daemon, r *http.Request) daemon.Response {
 	id, err := strconv.Atoi(mux.Vars(r)["id"])
 	if err != nil {
 		return SmartError(err)
@@ -186,12 +188,12 @@ func internalContainerOnStop(d *Daemon, r *http.Request) Response {
 		target = "unknown"
 	}
 
-	inst, err := instanceLoadById(d.State(), id)
+	inst, err := instance.InstanceLoadById(d.State(), id)
 	if err != nil {
 		return SmartError(err)
 	}
 
-	if inst.Type() != instance.TypeContainer {
+	if inst.Type() != instancetype.Container {
 		return SmartError(fmt.Errorf("Instance is not container type"))
 	}
 
@@ -226,7 +228,7 @@ type internalSQLResult struct {
 }
 
 // Perform a database dump.
-func internalSQLGet(d *Daemon, r *http.Request) Response {
+func internalSQLGet(d *Daemon, r *http.Request) daemon.Response {
 	database := r.FormValue("database")
 
 	if !shared.StringInSlice(database, []string{"local", "global"}) {
@@ -262,7 +264,7 @@ func internalSQLGet(d *Daemon, r *http.Request) Response {
 }
 
 // Execute queries.
-func internalSQLPost(d *Daemon, r *http.Request) Response {
+func internalSQLPost(d *Daemon, r *http.Request) daemon.Response {
 	req := &internalSQLQuery{}
 	// Parse the request.
 	err := json.NewDecoder(r.Body).Decode(&req)
@@ -389,13 +391,13 @@ func internalSQLExec(tx *sql.Tx, query string, result *internalSQLResult) error
 	return nil
 }
 
-func slurpBackupFile(path string) (*backupFile, error) {
+func slurpBackupFile(path string) (*instance.BackupFile, error) {
 	data, err := ioutil.ReadFile(path)
 	if err != nil {
 		return nil, err
 	}
 
-	backup := backupFile{}
+	backup := instance.BackupFile{}
 
 	if err := yaml.Unmarshal(data, &backup); err != nil {
 		return nil, err
@@ -409,7 +411,7 @@ type internalImportPost struct {
 	Force bool   `json:"force" yaml:"force"`
 }
 
-func internalImport(d *Daemon, r *http.Request) Response {
+func internalImport(d *Daemon, r *http.Request) daemon.Response {
 	projectName := projectParam(r)
 
 	req := &internalImportPost{}
@@ -734,7 +736,7 @@ func internalImport(d *Daemon, r *http.Request) Response {
 		switch backup.Pool.Driver {
 		case "btrfs":
 			snpMntPt := driver.GetSnapshotMountPoint(projectName, backup.Pool.Name, snap.Name)
-			if !shared.PathExists(snpMntPt) || !isBtrfsSubVolume(snpMntPt) {
+			if !shared.PathExists(snpMntPt) || !driver.IsBtrfsSubVolume(snpMntPt) {
 				if req.Force {
 					continue
 				}
@@ -921,7 +923,7 @@ func internalImport(d *Daemon, r *http.Request) Response {
 		BaseImage:    baseImage,
 		Config:       backup.Container.Config,
 		CreationDate: backup.Container.CreatedAt,
-		Type:         instance.TypeContainer,
+		Type:         instancetype.Container,
 		Description:  backup.Container.Description,
 		Devices:      deviceConfig.NewDevices(backup.Container.Devices),
 		Ephemeral:    backup.Container.Ephemeral,
@@ -1027,7 +1029,7 @@ func internalImport(d *Daemon, r *http.Request) Response {
 			BaseImage:    baseImage,
 			Config:       snap.Config,
 			CreationDate: snap.CreatedAt,
-			Type:         instance.TypeContainer,
+			Type:         instancetype.Container,
 			Snapshot:     true,
 			Devices:      deviceConfig.NewDevices(snap.Devices),
 			Ephemeral:    snap.Ephemeral,
@@ -1056,7 +1058,7 @@ func internalImport(d *Daemon, r *http.Request) Response {
 	return EmptySyncResponse
 }
 
-func internalGC(d *Daemon, r *http.Request) Response {
+func internalGC(d *Daemon, r *http.Request) daemon.Response {
 	logger.Infof("Started forced garbage collection run")
 	runtime.GC()
 	runtimeDebug.FreeOSMemory()
@@ -1065,7 +1067,7 @@ func internalGC(d *Daemon, r *http.Request) Response {
 	return EmptySyncResponse
 }
 
-func internalRAFTSnapshot(d *Daemon, r *http.Request) Response {
+func internalRAFTSnapshot(d *Daemon, r *http.Request) daemon.Response {
 	logger.Infof("Started forced RAFT snapshot")
 	err := d.gateway.Snapshot()
 	if err != nil {
diff --git a/lxd/api_project.go b/lxd/api_project.go
index 4802c1d15c..0bf8680238 100644
--- a/lxd/api_project.go
+++ b/lxd/api_project.go
@@ -11,7 +11,9 @@ import (
 	"github.com/gorilla/mux"
 	"github.com/pkg/errors"
 
+	"github.com/lxc/lxd/lxd/daemon"
 	"github.com/lxc/lxd/lxd/db"
+	"github.com/lxc/lxd/lxd/operation"
 	"github.com/lxc/lxd/lxd/util"
 	"github.com/lxc/lxd/shared"
 	"github.com/lxc/lxd/shared/api"
@@ -35,7 +37,7 @@ var projectCmd = APIEndpoint{
 	Put:    APIEndpointAction{Handler: projectPut, AccessHandler: AllowAuthenticated},
 }
 
-func projectsGet(d *Daemon, r *http.Request) Response {
+func projectsGet(d *Daemon, r *http.Request) daemon.Response {
 	recursion := util.IsRecursionRequest(r)
 
 	var result interface{}
@@ -86,7 +88,7 @@ func projectsGet(d *Daemon, r *http.Request) Response {
 	return SyncResponse(true, result)
 }
 
-func projectsPost(d *Daemon, r *http.Request) Response {
+func projectsPost(d *Daemon, r *http.Request) daemon.Response {
 	// Parse the request
 	project := api.ProjectsPost{}
 
@@ -174,7 +176,7 @@ func projectCreateDefaultProfile(tx *db.ClusterTx, project string) error {
 	return nil
 }
 
-func projectGet(d *Daemon, r *http.Request) Response {
+func projectGet(d *Daemon, r *http.Request) daemon.Response {
 	name := mux.Vars(r)["name"]
 
 	// Check user permissions
@@ -202,7 +204,7 @@ func projectGet(d *Daemon, r *http.Request) Response {
 	return SyncResponseETag(true, project, etag)
 }
 
-func projectPut(d *Daemon, r *http.Request) Response {
+func projectPut(d *Daemon, r *http.Request) daemon.Response {
 	name := mux.Vars(r)["name"]
 
 	// Check user permissions
@@ -243,7 +245,7 @@ func projectPut(d *Daemon, r *http.Request) Response {
 	return projectChange(d, project, req)
 }
 
-func projectPatch(d *Daemon, r *http.Request) Response {
+func projectPatch(d *Daemon, r *http.Request) daemon.Response {
 	name := mux.Vars(r)["name"]
 
 	// Check user permissions
@@ -311,7 +313,7 @@ func projectPatch(d *Daemon, r *http.Request) Response {
 }
 
 // Common logic between PUT and PATCH.
-func projectChange(d *Daemon, project *api.Project, req api.ProjectPut) Response {
+func projectChange(d *Daemon, project *api.Project, req api.ProjectPut) daemon.Response {
 	// Flag indicating if any feature has changed.
 	featuresChanged := req.Config["features.images"] != project.Config["features.images"] || req.Config["features.profiles"] != project.Config["features.profiles"]
 
@@ -363,7 +365,7 @@ func projectChange(d *Daemon, project *api.Project, req api.ProjectPut) Response
 	return EmptySyncResponse
 }
 
-func projectPost(d *Daemon, r *http.Request) Response {
+func projectPost(d *Daemon, r *http.Request) daemon.Response {
 	name := mux.Vars(r)["name"]
 
 	// Parse the request
@@ -380,7 +382,7 @@ func projectPost(d *Daemon, r *http.Request) Response {
 	}
 
 	// Perform the rename
-	run := func(op *operation) error {
+	run := func(op *operation.Operation) error {
 		var id int64
 		err := d.cluster.Transaction(func(tx *db.ClusterTx) error {
 			project, err := tx.ProjectGet(req.Name)
@@ -422,7 +424,7 @@ func projectPost(d *Daemon, r *http.Request) Response {
 		return nil
 	}
 
-	op, err := operationCreate(d.cluster, "", operationClassTask, db.OperationProjectRename, nil, nil, run, nil, nil)
+	op, err := operation.OperationCreate(d.cluster, "", operation.OperationClassTask, db.OperationProjectRename, nil, nil, run, nil, nil)
 	if err != nil {
 		return InternalError(err)
 	}
@@ -430,7 +432,7 @@ func projectPost(d *Daemon, r *http.Request) Response {
 	return OperationResponse(op)
 }
 
-func projectDelete(d *Daemon, r *http.Request) Response {
+func projectDelete(d *Daemon, r *http.Request) daemon.Response {
 	name := mux.Vars(r)["name"]
 
 	// Sanity checks

From 970aa712c758f7a24c118cd67cceec5a4983ced3 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Wed, 25 Sep 2019 12:07:12 +0100
Subject: [PATCH 65/72] lxd/backup: Removes functions and structs that have
 been moved to instance package

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/backup.go | 135 --------------------------------------------------
 1 file changed, 135 deletions(-)

diff --git a/lxd/backup.go b/lxd/backup.go
index 16f6b7c9f6..bc8b409fe8 100644
--- a/lxd/backup.go
+++ b/lxd/backup.go
@@ -24,33 +24,6 @@ import (
 	"github.com/pkg/errors"
 )
 
-// Load a backup from the database
-func backupLoadByName(s *state.State, project, name string) (*backup, error) {
-	// Get the backup database record
-	args, err := s.Cluster.ContainerGetBackup(project, name)
-	if err != nil {
-		return nil, errors.Wrap(err, "Load backup from database")
-	}
-
-	// Load the instance it belongs to
-	instance, err := instanceLoadById(s, args.ContainerID)
-	if err != nil {
-		return nil, errors.Wrap(err, "Load container from database")
-	}
-
-	// Return the backup struct
-	return &backup{
-		state:            s,
-		instance:         instance,
-		id:               args.ID,
-		name:             name,
-		creationDate:     args.CreationDate,
-		expiryDate:       args.ExpiryDate,
-		instanceOnly:     args.InstanceOnly,
-		optimizedStorage: args.OptimizedStorage,
-	}, nil
-}
-
 // Create a new backup
 func backupCreate(s *state.State, args db.ContainerBackupArgs, sourceContainer Instance) error {
 	// Create the database entry
@@ -79,84 +52,6 @@ func backupCreate(s *state.State, args db.ContainerBackupArgs, sourceContainer I
 	return nil
 }
 
-// backup represents a container backup
-type backup struct {
-	state    *state.State
-	instance Instance
-
-	// Properties
-	id               int
-	name             string
-	creationDate     time.Time
-	expiryDate       time.Time
-	instanceOnly     bool
-	optimizedStorage bool
-}
-
-type backupInfo struct {
-	Project         string   `json:"project" yaml:"project"`
-	Name            string   `json:"name" yaml:"name"`
-	Backend         string   `json:"backend" yaml:"backend"`
-	Privileged      bool     `json:"privileged" yaml:"privileged"`
-	Pool            string   `json:"pool" yaml:"pool"`
-	Snapshots       []string `json:"snapshots,omitempty" yaml:"snapshots,omitempty"`
-	HasBinaryFormat bool     `json:"-" yaml:"-"`
-}
-
-// Rename renames a container backup
-func (b *backup) Rename(newName string) error {
-	oldBackupPath := shared.VarPath("backups", b.name)
-	newBackupPath := shared.VarPath("backups", newName)
-
-	// Create the new backup path
-	backupsPath := shared.VarPath("backups", b.instance.Name())
-	if !shared.PathExists(backupsPath) {
-		err := os.MkdirAll(backupsPath, 0700)
-		if err != nil {
-			return err
-		}
-	}
-
-	// Rename the backup directory
-	err := os.Rename(oldBackupPath, newBackupPath)
-	if err != nil {
-		return err
-	}
-
-	// Check if we can remove the container directory
-	empty, _ := shared.PathIsEmpty(backupsPath)
-	if empty {
-		err := os.Remove(backupsPath)
-		if err != nil {
-			return err
-		}
-	}
-
-	// Rename the database record
-	err = b.state.Cluster.ContainerBackupRename(b.name, newName)
-	if err != nil {
-		return err
-	}
-
-	return nil
-}
-
-// Delete removes an instance backup
-func (b *backup) Delete() error {
-	return doBackupDelete(b.state, b.name, b.instance.Name())
-}
-
-func (b *backup) Render() *api.InstanceBackup {
-	return &api.InstanceBackup{
-		Name:             strings.SplitN(b.name, "/", 2)[1],
-		CreatedAt:        b.creationDate,
-		ExpiresAt:        b.expiryDate,
-		InstanceOnly:     b.instanceOnly,
-		ContainerOnly:    b.instanceOnly,
-		OptimizedStorage: b.optimizedStorage,
-	}
-}
-
 func backupGetInfo(r io.ReadSeeker) (*backupInfo, error) {
 	var tr *tar.Reader
 	result := backupInfo{}
@@ -497,33 +392,3 @@ func pruneExpiredContainerBackups(ctx context.Context, d *Daemon) error {
 
 	return nil
 }
-
-func doBackupDelete(s *state.State, backupName, containerName string) error {
-	backupPath := shared.VarPath("backups", backupName)
-
-	// Delete the on-disk data
-	if shared.PathExists(backupPath) {
-		err := os.RemoveAll(backupPath)
-		if err != nil {
-			return err
-		}
-	}
-
-	// Check if we can remove the container directory
-	backupsPath := shared.VarPath("backups", containerName)
-	empty, _ := shared.PathIsEmpty(backupsPath)
-	if empty {
-		err := os.Remove(backupsPath)
-		if err != nil {
-			return err
-		}
-	}
-
-	// Remove the database record
-	err := s.Cluster.ContainerBackupRemove(backupName)
-	if err != nil {
-		return err
-	}
-
-	return nil
-}

From 1ebf13f7d4b976169a1fac0b6220b46fab981a30 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Wed, 25 Sep 2019 12:07:36 +0100
Subject: [PATCH 66/72] lxd/backup: Updates use of moved types

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/backup.go | 40 ++++++++++++++++++++--------------------
 1 file changed, 20 insertions(+), 20 deletions(-)

diff --git a/lxd/backup.go b/lxd/backup.go
index bc8b409fe8..d225cc6b9d 100644
--- a/lxd/backup.go
+++ b/lxd/backup.go
@@ -7,7 +7,6 @@ import (
 	"os"
 	"os/exec"
 	"path/filepath"
-	"strings"
 	"time"
 
 	"context"
@@ -15,17 +14,18 @@ import (
 
 	"github.com/lxc/lxd/lxd/cluster"
 	"github.com/lxc/lxd/lxd/db"
+	"github.com/lxc/lxd/lxd/instance"
+	"github.com/lxc/lxd/lxd/operation"
 	"github.com/lxc/lxd/lxd/state"
 	"github.com/lxc/lxd/lxd/task"
 	"github.com/lxc/lxd/shared"
-	"github.com/lxc/lxd/shared/api"
 	log "github.com/lxc/lxd/shared/log15"
 	"github.com/lxc/lxd/shared/logger"
 	"github.com/pkg/errors"
 )
 
 // Create a new backup
-func backupCreate(s *state.State, args db.ContainerBackupArgs, sourceContainer Instance) error {
+func backupCreate(s *state.State, args db.ContainerBackupArgs, sourceContainer instance.Instance) error {
 	// Create the database entry
 	err := s.Cluster.ContainerBackupCreate(args)
 	if err != nil {
@@ -37,7 +37,7 @@ func backupCreate(s *state.State, args db.ContainerBackupArgs, sourceContainer I
 	}
 
 	// Get the backup struct
-	b, err := backupLoadByName(s, sourceContainer.Project(), args.Name)
+	b, err := instance.BackupLoadByName(s, sourceContainer.Project(), args.Name)
 	if err != nil {
 		return errors.Wrap(err, "Load backup object")
 	}
@@ -52,9 +52,9 @@ func backupCreate(s *state.State, args db.ContainerBackupArgs, sourceContainer I
 	return nil
 }
 
-func backupGetInfo(r io.ReadSeeker) (*backupInfo, error) {
+func backupGetInfo(r io.ReadSeeker) (*instance.BackupInfo, error) {
 	var tr *tar.Reader
-	result := backupInfo{}
+	result := instance.BackupInfo{}
 	hasBinaryFormat := false
 	hasIndexFile := false
 
@@ -125,7 +125,7 @@ func backupGetInfo(r io.ReadSeeker) (*backupInfo, error) {
 // fixBackupStoragePool changes the pool information in the backup.yaml. This
 // is done only if the provided pool doesn't exist. In this case, the pool of
 // the default profile will be used.
-func backupFixStoragePool(c *db.Cluster, b backupInfo, useDefaultPool bool) error {
+func backupFixStoragePool(c *db.Cluster, b instance.BackupInfo, useDefaultPool bool) error {
 	var poolName string
 
 	if useDefaultPool {
@@ -216,23 +216,23 @@ func backupFixStoragePool(c *db.Cluster, b backupInfo, useDefaultPool bool) erro
 	return nil
 }
 
-func backupCreateTarball(s *state.State, path string, backup backup) error {
+func backupCreateTarball(s *state.State, path string, backup instance.Backup) error {
 	// Create the index
-	pool, err := backup.instance.StoragePool()
+	pool, err := backup.Instance.StoragePool()
 	if err != nil {
 		return err
 	}
 
-	indexFile := backupInfo{
-		Name:       backup.instance.Name(),
-		Backend:    backup.instance.Storage().GetStorageTypeName(),
-		Privileged: backup.instance.IsPrivileged(),
+	indexFile := instance.BackupInfo{
+		Name:       backup.Instance.Name(),
+		Backend:    backup.Instance.Storage().GetStorageTypeName(),
+		Privileged: backup.Instance.IsPrivileged(),
 		Pool:       pool,
 		Snapshots:  []string{},
 	}
 
-	if !backup.instanceOnly {
-		snaps, err := backup.instance.Snapshots()
+	if !backup.InstanceOnly {
+		snaps, err := backup.Instance.Snapshots()
 		if err != nil {
 			return err
 		}
@@ -260,7 +260,7 @@ func backupCreateTarball(s *state.State, path string, backup backup) error {
 	}
 
 	// Create the target path if needed
-	backupsPath := shared.VarPath("backups", backup.instance.Name())
+	backupsPath := shared.VarPath("backups", backup.Instance.Name())
 	if !shared.PathExists(backupsPath) {
 		err := os.MkdirAll(backupsPath, 0700)
 		if err != nil {
@@ -269,7 +269,7 @@ func backupCreateTarball(s *state.State, path string, backup backup) error {
 	}
 
 	// Create the tarball
-	backupPath := shared.VarPath("backups", backup.name)
+	backupPath := shared.VarPath("backups", backup.Name)
 	success := false
 	defer func() {
 		if success {
@@ -340,11 +340,11 @@ func backupCreateTarball(s *state.State, path string, backup backup) error {
 
 func pruneExpiredContainerBackupsTask(d *Daemon) (task.Func, task.Schedule) {
 	f := func(ctx context.Context) {
-		opRun := func(op *operation) error {
+		opRun := func(op *operation.Operation) error {
 			return pruneExpiredContainerBackups(ctx, d)
 		}
 
-		op, err := operationCreate(d.cluster, "", operationClassTask, db.OperationBackupsExpire, nil, nil, opRun, nil, nil)
+		op, err := operation.OperationCreate(d.cluster, "", operation.OperationClassTask, db.OperationBackupsExpire, nil, nil, opRun, nil, nil)
 		if err != nil {
 			logger.Error("Failed to start expired backups operation", log.Ctx{"err": err})
 			return
@@ -384,7 +384,7 @@ func pruneExpiredContainerBackups(ctx context.Context, d *Daemon) error {
 
 	for _, backup := range backups {
 		containerName, _, _ := shared.ContainerGetParentAndSnapshotName(backup)
-		err := doBackupDelete(d.State(), backup, containerName)
+		err := instance.DoBackupDelete(d.State(), backup, containerName)
 		if err != nil {
 			return errors.Wrapf(err, "Error deleting container backup %s", backup)
 		}

From a7fe47fbd0f8a9f8f89180e6a92a1d0c526533d6 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Wed, 25 Sep 2019 12:07:51 +0100
Subject: [PATCH 67/72] lxd/certificates: Updates use of moved types

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/certificates.go | 15 ++++++++-------
 1 file changed, 8 insertions(+), 7 deletions(-)

diff --git a/lxd/certificates.go b/lxd/certificates.go
index 463c04a9d2..86d2e481c6 100644
--- a/lxd/certificates.go
+++ b/lxd/certificates.go
@@ -14,6 +14,7 @@ import (
 
 	lxd "github.com/lxc/lxd/client"
 	"github.com/lxc/lxd/lxd/cluster"
+	"github.com/lxc/lxd/lxd/daemon"
 	"github.com/lxc/lxd/lxd/db"
 	"github.com/lxc/lxd/lxd/util"
 	"github.com/lxc/lxd/shared"
@@ -40,7 +41,7 @@ var certificateCmd = APIEndpoint{
 	Put:    APIEndpointAction{Handler: certificatePut},
 }
 
-func certificatesGet(d *Daemon, r *http.Request) Response {
+func certificatesGet(d *Daemon, r *http.Request) daemon.Response {
 	recursion := util.IsRecursionRequest(r)
 
 	if recursion {
@@ -100,7 +101,7 @@ func readSavedClientCAList(d *Daemon) {
 	}
 }
 
-func certificatesPost(d *Daemon, r *http.Request) Response {
+func certificatesPost(d *Daemon, r *http.Request) daemon.Response {
 	// Parse the request
 	req := api.CertificatesPost{}
 	if err := shared.ReadToJSON(r.Body, &req); err != nil {
@@ -217,7 +218,7 @@ func certificatesPost(d *Daemon, r *http.Request) Response {
 	return SyncResponseLocation(true, nil, fmt.Sprintf("/%s/certificates/%s", version.APIVersion, fingerprint))
 }
 
-func certificateGet(d *Daemon, r *http.Request) Response {
+func certificateGet(d *Daemon, r *http.Request) daemon.Response {
 	fingerprint := mux.Vars(r)["fingerprint"]
 
 	cert, err := doCertificateGet(d.cluster, fingerprint)
@@ -248,7 +249,7 @@ func doCertificateGet(db *db.Cluster, fingerprint string) (api.Certificate, erro
 	return resp, nil
 }
 
-func certificatePut(d *Daemon, r *http.Request) Response {
+func certificatePut(d *Daemon, r *http.Request) daemon.Response {
 	fingerprint := mux.Vars(r)["fingerprint"]
 
 	oldEntry, err := doCertificateGet(d.cluster, fingerprint)
@@ -270,7 +271,7 @@ func certificatePut(d *Daemon, r *http.Request) Response {
 	return doCertificateUpdate(d, fingerprint, req)
 }
 
-func certificatePatch(d *Daemon, r *http.Request) Response {
+func certificatePatch(d *Daemon, r *http.Request) daemon.Response {
 	fingerprint := mux.Vars(r)["fingerprint"]
 
 	oldEntry, err := doCertificateGet(d.cluster, fingerprint)
@@ -305,7 +306,7 @@ func certificatePatch(d *Daemon, r *http.Request) Response {
 	return doCertificateUpdate(d, fingerprint, req.Writable())
 }
 
-func doCertificateUpdate(d *Daemon, fingerprint string, req api.CertificatePut) Response {
+func doCertificateUpdate(d *Daemon, fingerprint string, req api.CertificatePut) daemon.Response {
 	if req.Type != "client" {
 		return BadRequest(fmt.Errorf("Unknown request type %s", req.Type))
 	}
@@ -318,7 +319,7 @@ func doCertificateUpdate(d *Daemon, fingerprint string, req api.CertificatePut)
 	return EmptySyncResponse
 }
 
-func certificateDelete(d *Daemon, r *http.Request) Response {
+func certificateDelete(d *Daemon, r *http.Request) daemon.Response {
 	fingerprint := mux.Vars(r)["fingerprint"]
 
 	certInfo, err := d.cluster.CertificateGet(fingerprint)

From a0d806398b901c2ed1fcf9c952c7e35924abfd24 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Wed, 25 Sep 2019 12:08:05 +0100
Subject: [PATCH 68/72] lxd/cluster/connect: Updates use of moved types

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/cluster/connect.go | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/lxd/cluster/connect.go b/lxd/cluster/connect.go
index 6f26255894..43ec165eb6 100644
--- a/lxd/cluster/connect.go
+++ b/lxd/cluster/connect.go
@@ -7,7 +7,7 @@ import (
 
 	lxd "github.com/lxc/lxd/client"
 	"github.com/lxc/lxd/lxd/db"
-	"github.com/lxc/lxd/lxd/instance"
+	"github.com/lxc/lxd/lxd/instance/instancetype"
 	"github.com/lxc/lxd/shared"
 	"github.com/lxc/lxd/shared/api"
 	"github.com/pkg/errors"
@@ -38,7 +38,7 @@ func Connect(address string, cert *shared.CertInfo, notify bool) (lxd.InstanceSe
 // running the container with the given name. If it's not the local node will
 // connect to it and return the connected client, otherwise it will just return
 // nil.
-func ConnectIfContainerIsRemote(cluster *db.Cluster, project, name string, cert *shared.CertInfo, instanceType instance.Type) (lxd.InstanceServer, error) {
+func ConnectIfContainerIsRemote(cluster *db.Cluster, project, name string, cert *shared.CertInfo, instanceType instancetype.Type) (lxd.InstanceServer, error) {
 	var address string // Node address
 	err := cluster.Transaction(func(tx *db.ClusterTx) error {
 		var err error

From 4de09b6e4f6e34c1a4e8af33046050adc907cff8 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Wed, 25 Sep 2019 12:09:36 +0100
Subject: [PATCH 69/72] lxd/container: Removes functions that have been moved
 to instance package

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/container.go | 214 -----------------------------------------------
 1 file changed, 214 deletions(-)

diff --git a/lxd/container.go b/lxd/container.go
index 45c7687dba..5e106c3f65 100644
--- a/lxd/container.go
+++ b/lxd/container.go
@@ -79,139 +79,6 @@ func containerValidName(name string) error {
 	return nil
 }
 
-func containerValidConfigKey(os *sys.OS, key string, value string) error {
-	f, err := shared.ConfigKeyChecker(key)
-	if err != nil {
-		return err
-	}
-	if err = f(value); err != nil {
-		return err
-	}
-	if key == "raw.lxc" {
-		return lxcValidConfig(value)
-	}
-	if key == "security.syscalls.blacklist_compat" {
-		for _, arch := range os.Architectures {
-			if arch == osarch.ARCH_64BIT_INTEL_X86 ||
-				arch == osarch.ARCH_64BIT_ARMV8_LITTLE_ENDIAN ||
-				arch == osarch.ARCH_64BIT_POWERPC_BIG_ENDIAN {
-				return nil
-			}
-		}
-		return fmt.Errorf("security.syscalls.blacklist_compat isn't supported on this architecture")
-	}
-	return nil
-}
-
-func allowedUnprivilegedOnlyMap(rawIdmap string) error {
-	rawMaps, err := parseRawIdmap(rawIdmap)
-	if err != nil {
-		return err
-	}
-
-	for _, ent := range rawMaps {
-		if ent.Hostid == 0 {
-			return fmt.Errorf("Cannot map root user into container as LXD was configured to only allow unprivileged containers")
-		}
-	}
-
-	return nil
-}
-
-func containerValidConfig(sysOS *sys.OS, config map[string]string, profile bool, expanded bool) error {
-	if config == nil {
-		return nil
-	}
-
-	for k, v := range config {
-		if profile && strings.HasPrefix(k, "volatile.") {
-			return fmt.Errorf("Volatile keys can only be set on containers")
-		}
-
-		if profile && strings.HasPrefix(k, "image.") {
-			return fmt.Errorf("Image keys can only be set on containers")
-		}
-
-		err := containerValidConfigKey(sysOS, k, v)
-		if err != nil {
-			return err
-		}
-	}
-
-	_, rawSeccomp := config["raw.seccomp"]
-	_, whitelist := config["security.syscalls.whitelist"]
-	_, blacklist := config["security.syscalls.blacklist"]
-	blacklistDefault := shared.IsTrue(config["security.syscalls.blacklist_default"])
-	blacklistCompat := shared.IsTrue(config["security.syscalls.blacklist_compat"])
-
-	if rawSeccomp && (whitelist || blacklist || blacklistDefault || blacklistCompat) {
-		return fmt.Errorf("raw.seccomp is mutually exclusive with security.syscalls*")
-	}
-
-	if whitelist && (blacklist || blacklistDefault || blacklistCompat) {
-		return fmt.Errorf("security.syscalls.whitelist is mutually exclusive with security.syscalls.blacklist*")
-	}
-
-	if expanded && (config["security.privileged"] == "" || !shared.IsTrue(config["security.privileged"])) && sysOS.IdmapSet == nil {
-		return fmt.Errorf("LXD doesn't have a uid/gid allocation. In this mode, only privileged containers are supported")
-	}
-
-	unprivOnly := os.Getenv("LXD_UNPRIVILEGED_ONLY")
-	if shared.IsTrue(unprivOnly) {
-		if config["raw.idmap"] != "" {
-			err := allowedUnprivilegedOnlyMap(config["raw.idmap"])
-			if err != nil {
-				return err
-			}
-		}
-
-		if shared.IsTrue(config["security.privileged"]) {
-			return fmt.Errorf("LXD was configured to only allow unprivileged containers")
-		}
-	}
-
-	return nil
-}
-
-// containerValidDevices validate container device configs.
-func containerValidDevices(state *state.State, cluster *db.Cluster, instanceName string, devices config.Devices, expanded bool) error {
-	// Empty device list
-	if devices == nil {
-		return nil
-	}
-
-	// Create a temporary containerLXC struct to use as an Instance in device validation.
-	// Populate it's name, localDevices and expandedDevices properties based on the mode of
-	// validation occurring. In non-expanded validation expensive checks should be avoided.
-	instance := &containerLXC{
-		name:         instanceName,
-		localDevices: devices.Clone(), // Prevent devices from modifying their config.
-	}
-
-	if expanded {
-		instance.expandedDevices = instance.localDevices // Avoid another clone.
-	}
-
-	// Check each device individually using the device package.
-	for name, config := range devices {
-		_, err := device.New(instance, state, name, config, nil, nil)
-		if err != nil {
-			return err
-		}
-
-	}
-
-	// Check we have a root disk if in expanded validation mode.
-	if expanded {
-		_, _, err := shared.GetRootDiskDevice(devices.CloneNative())
-		if err != nil {
-			return errors.Wrap(err, "Detect root disk device")
-		}
-	}
-
-	return nil
-}
-
 // The container interface
 type container interface {
 	Instance
@@ -938,62 +805,6 @@ func containerConfigureInternal(c Instance) error {
 	return nil
 }
 
-func instanceLoadById(s *state.State, id int) (Instance, error) {
-	// Get the DB record
-	project, name, err := s.Cluster.ContainerProjectAndName(id)
-	if err != nil {
-		return nil, err
-	}
-
-	return instanceLoadByProjectAndName(s, project, name)
-}
-
-func instanceLoadByProjectAndName(s *state.State, project, name string) (Instance, error) {
-	// Get the DB record
-	var container *db.Instance
-	err := s.Cluster.Transaction(func(tx *db.ClusterTx) error {
-		var err error
-
-		if strings.Contains(name, shared.SnapshotDelimiter) {
-			parts := strings.SplitN(name, shared.SnapshotDelimiter, 2)
-			instanceName := parts[0]
-			snapshotName := parts[1]
-
-			instance, err := tx.InstanceGet(project, instanceName)
-			if err != nil {
-				return errors.Wrapf(err, "Failed to fetch instance %q in project %q", name, project)
-			}
-
-			snapshot, err := tx.InstanceSnapshotGet(project, instanceName, snapshotName)
-			if err != nil {
-				return errors.Wrapf(err, "Failed to fetch snapshot %q of instance %q in project %q", snapshotName, instanceName, project)
-			}
-
-			c := db.InstanceSnapshotToInstance(instance, snapshot)
-			container = &c
-		} else {
-			container, err = tx.InstanceGet(project, name)
-			if err != nil {
-				return errors.Wrapf(err, "Failed to fetch container %q in project %q", name, project)
-			}
-		}
-
-		return nil
-	})
-	if err != nil {
-		return nil, err
-	}
-
-	args := db.ContainerToArgs(container)
-
-	c, err := containerLXCLoad(s, args, nil)
-	if err != nil {
-		return nil, errors.Wrap(err, "Failed to load container")
-	}
-
-	return c, nil
-}
-
 func instanceLoadByProject(s *state.State, project string) ([]Instance, error) {
 	// Get all the containers
 	var cts []db.Instance
@@ -1042,31 +853,6 @@ func instanceLoadFromAllProjects(s *state.State) ([]Instance, error) {
 	return instances, nil
 }
 
-// Legacy interface.
-func instanceLoadAll(s *state.State) ([]Instance, error) {
-	return instanceLoadByProject(s, "default")
-}
-
-// Load all instances of this nodes.
-func instanceLoadNodeAll(s *state.State) ([]Instance, error) {
-	// Get all the container arguments
-	var cts []db.Instance
-	err := s.Cluster.Transaction(func(tx *db.ClusterTx) error {
-		var err error
-		cts, err = tx.ContainerNodeList()
-		if err != nil {
-			return err
-		}
-
-		return nil
-	})
-	if err != nil {
-		return nil, err
-	}
-
-	return instanceLoadAllInternal(cts, s)
-}
-
 // Load all instances of this nodes under the given project.
 func instanceLoadNodeProjectAll(s *state.State, project string, instanceType instance.Type) ([]Instance, error) {
 	// Get all the container arguments

From e868c18bb63ebe670b57ec2a36e04b1c32b11bee Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Wed, 25 Sep 2019 12:10:00 +0100
Subject: [PATCH 70/72] lxd/container: Updates use of moved types

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/container.go | 106 +++++++++++++++++++++++------------------------
 1 file changed, 53 insertions(+), 53 deletions(-)

diff --git a/lxd/container.go b/lxd/container.go
index 5e106c3f65..94631464af 100644
--- a/lxd/container.go
+++ b/lxd/container.go
@@ -21,8 +21,9 @@ import (
 	"github.com/lxc/lxd/lxd/device"
 	"github.com/lxc/lxd/lxd/device/config"
 	"github.com/lxc/lxd/lxd/instance"
+	"github.com/lxc/lxd/lxd/instance/instancetype"
+	"github.com/lxc/lxd/lxd/operation"
 	"github.com/lxc/lxd/lxd/state"
-	"github.com/lxc/lxd/lxd/sys"
 	"github.com/lxc/lxd/lxd/task"
 	"github.com/lxc/lxd/shared"
 	"github.com/lxc/lxd/shared/api"
@@ -38,7 +39,7 @@ func init() {
 	// Expose instanceLoadNodeAll to the device package converting the response to a slice of InstanceIdentifiers.
 	// This is because container types are defined in the main package and are not importable.
 	device.InstanceLoadNodeAll = func(s *state.State) ([]device.InstanceIdentifier, error) {
-		containers, err := instanceLoadNodeAll(s)
+		containers, err := instance.InstanceLoadNodeAll(s)
 		if err != nil {
 			return nil, err
 		}
@@ -51,10 +52,10 @@ func init() {
 		return identifiers, nil
 	}
 
-	// Expose instanceLoadByProjectAndName to the device package converting the response to an InstanceIdentifier.
+	// Expose instance.InstanceLoadByProjectAndName to the device package converting the response to an InstanceIdentifier.
 	// This is because container types are defined in the main package and are not importable.
 	device.InstanceLoadByProjectAndName = func(s *state.State, project, name string) (device.InstanceIdentifier, error) {
-		container, err := instanceLoadByProjectAndName(s, project, name)
+		container, err := instance.InstanceLoadByProjectAndName(s, project, name)
 		if err != nil {
 			return nil, err
 		}
@@ -81,12 +82,12 @@ func containerValidName(name string) error {
 
 // The container interface
 type container interface {
-	Instance
+	instance.Instance
 
 	/* actionScript here is a script called action.sh in the stateDir, to
 	 * be passed to CRIU as --action-script
 	 */
-	Migrate(args *CriuMigrationArgs) error
+	Migrate(args *instance.CriuMigrationArgs) error
 
 	ConsoleLog(opts lxc.ConsoleLogOptions) (string, error)
 
@@ -130,9 +131,8 @@ func containerCreateAsEmpty(d *Daemon, args db.ContainerArgs) (container, error)
 	return c, nil
 }
 
-func containerCreateFromBackup(s *state.State, info backupInfo, data io.ReadSeeker,
-	customPool bool) (storage, error) {
-	var pool storage
+func containerCreateFromBackup(s *state.State, info instance.BackupInfo, data io.ReadSeeker, customPool bool) (instance.Storage, error) {
+	var pool instance.Storage
 	var fixBackupFile = false
 
 	// Get storage pool from index.yaml
@@ -220,7 +220,7 @@ func containerCreateFromImage(d *Daemon, args db.ContainerArgs, hash string, tra
 	}
 
 	// Validate the type of the image matches the type of the instance.
-	imgType, err := instance.New(img.Type)
+	imgType, err := instancetype.New(img.Type)
 	if err != nil {
 		return nil, err
 	}
@@ -295,13 +295,13 @@ func containerCreateFromImage(d *Daemon, args db.ContainerArgs, hash string, tra
 	return c, nil
 }
 
-func containerCreateAsCopy(s *state.State, args db.ContainerArgs, sourceContainer Instance, containerOnly bool, refresh bool) (Instance, error) {
-	var ct Instance
+func containerCreateAsCopy(s *state.State, args db.ContainerArgs, sourceContainer instance.Instance, containerOnly bool, refresh bool) (instance.Instance, error) {
+	var ct instance.Instance
 	var err error
 
 	if refresh {
 		// Load the target container
-		ct, err = instanceLoadByProjectAndName(s, args.Project, args.Name)
+		ct, err = instance.InstanceLoadByProjectAndName(s, args.Project, args.Name)
 		if err != nil {
 			refresh = false
 		}
@@ -330,7 +330,7 @@ func containerCreateAsCopy(s *state.State, args db.ContainerArgs, sourceContaine
 	}
 
 	csList := []*container{}
-	var snapshots []Instance
+	var snapshots []instance.Instance
 
 	if !containerOnly {
 		if refresh {
@@ -464,8 +464,8 @@ func containerCreateAsCopy(s *state.State, args db.ContainerArgs, sourceContaine
 	return ct, nil
 }
 
-func containerCreateAsSnapshot(s *state.State, args db.ContainerArgs, sourceInstance Instance) (Instance, error) {
-	if sourceInstance.Type() != instance.TypeContainer {
+func containerCreateAsSnapshot(s *state.State, args db.ContainerArgs, sourceInstance instance.Instance) (instance.Instance, error) {
+	if sourceInstance.Type() != instancetype.Container {
 		return nil, fmt.Errorf("Instance not container type")
 	}
 
@@ -496,14 +496,14 @@ func containerCreateAsSnapshot(s *state.State, args db.ContainerArgs, sourceInst
 		 * after snapshotting will fail.
 		 */
 
-		criuMigrationArgs := CriuMigrationArgs{
-			cmd:          lxc.MIGRATE_DUMP,
-			stateDir:     stateDir,
-			function:     "snapshot",
-			stop:         false,
-			actionScript: false,
-			dumpDir:      "",
-			preDumpDir:   "",
+		criuMigrationArgs := instance.CriuMigrationArgs{
+			Cmd:          lxc.MIGRATE_DUMP,
+			StateDir:     stateDir,
+			Function:     "snapshot",
+			Stop:         false,
+			ActionScript: false,
+			DumpDir:      "",
+			PreDumpDir:   "",
 		}
 
 		c := sourceInstance.(container)
@@ -537,7 +537,7 @@ func containerCreateAsSnapshot(s *state.State, args db.ContainerArgs, sourceInst
 		defer sourceInstance.StorageStop()
 	}
 
-	err = writeBackupFile(sourceInstance)
+	err = instance.WriteBackupFile(sourceInstance)
 	if err != nil {
 		c.Delete()
 		return nil, err
@@ -595,13 +595,13 @@ func containerCreateInternal(s *state.State, args db.ContainerArgs) (container,
 	}
 
 	// Validate container config
-	err := containerValidConfig(s.OS, args.Config, false, false)
+	err := instance.ContainerValidConfig(s.OS, args.Config, false, false)
 	if err != nil {
 		return nil, err
 	}
 
 	// Validate container devices with the supplied container name and devices.
-	err = containerValidDevices(s, s.Cluster, args.Name, args.Devices, false)
+	err = instance.ContainerValidDevices(s, s.Cluster, args.Name, args.Devices, false)
 	if err != nil {
 		return nil, errors.Wrap(err, "Invalid devices")
 	}
@@ -750,7 +750,7 @@ func containerCreateInternal(s *state.State, args db.ContainerArgs) (container,
 	args = db.ContainerToArgs(&container)
 
 	// Setup the container struct and finish creation (storage and idmap)
-	c, err := containerLXCCreate(s, args)
+	c, err := instance.ContainerLXCCreate(s, args)
 	if err != nil {
 		s.Cluster.ContainerRemove(args.Project, args.Name)
 		return nil, errors.Wrap(err, "Create LXC container")
@@ -759,7 +759,7 @@ func containerCreateInternal(s *state.State, args db.ContainerArgs) (container,
 	return c, nil
 }
 
-func containerConfigureInternal(c Instance) error {
+func containerConfigureInternal(c instance.Instance) error {
 	// Find the root device
 	_, rootDiskDevice, err := shared.GetRootDiskDevice(c.ExpandedDevices().CloneNative())
 	if err != nil {
@@ -797,7 +797,7 @@ func containerConfigureInternal(c Instance) error {
 		defer c.StorageStop()
 	}
 
-	err = writeBackupFile(c)
+	err = instance.WriteBackupFile(c)
 	if err != nil {
 		return err
 	}
@@ -805,13 +805,13 @@ func containerConfigureInternal(c Instance) error {
 	return nil
 }
 
-func instanceLoadByProject(s *state.State, project string) ([]Instance, error) {
+func instanceLoadByProject(s *state.State, project string) ([]instance.Instance, error) {
 	// Get all the containers
 	var cts []db.Instance
 	err := s.Cluster.Transaction(func(tx *db.ClusterTx) error {
 		filter := db.InstanceFilter{
 			Project: project,
-			Type:    instance.TypeContainer,
+			Type:    instancetype.Container,
 		}
 		var err error
 		cts, err = tx.InstanceList(filter)
@@ -829,7 +829,7 @@ func instanceLoadByProject(s *state.State, project string) ([]Instance, error) {
 }
 
 // Load all instances across all projects.
-func instanceLoadFromAllProjects(s *state.State) ([]Instance, error) {
+func instanceLoadFromAllProjects(s *state.State) ([]instance.Instance, error) {
 	var projects []string
 
 	err := s.Cluster.Transaction(func(tx *db.ClusterTx) error {
@@ -841,7 +841,7 @@ func instanceLoadFromAllProjects(s *state.State) ([]Instance, error) {
 		return nil, err
 	}
 
-	instances := []Instance{}
+	instances := []instance.Instance{}
 	for _, project := range projects {
 		projectInstances, err := instanceLoadByProject(s, project)
 		if err != nil {
@@ -854,7 +854,7 @@ func instanceLoadFromAllProjects(s *state.State) ([]Instance, error) {
 }
 
 // Load all instances of this nodes under the given project.
-func instanceLoadNodeProjectAll(s *state.State, project string, instanceType instance.Type) ([]Instance, error) {
+func instanceLoadNodeProjectAll(s *state.State, project string, instanceType instancetype.Type) ([]instance.Instance, error) {
 	// Get all the container arguments
 	var cts []db.Instance
 	err := s.Cluster.Transaction(func(tx *db.ClusterTx) error {
@@ -873,7 +873,7 @@ func instanceLoadNodeProjectAll(s *state.State, project string, instanceType ins
 	return instanceLoadAllInternal(cts, s)
 }
 
-func instanceLoadAllInternal(dbInstances []db.Instance, s *state.State) ([]Instance, error) {
+func instanceLoadAllInternal(dbInstances []db.Instance, s *state.State) ([]instance.Instance, error) {
 	// Figure out what profiles are in use
 	profiles := map[string]map[string]api.Profile{}
 	for _, instArgs := range dbInstances {
@@ -903,7 +903,7 @@ func instanceLoadAllInternal(dbInstances []db.Instance, s *state.State) ([]Insta
 	}
 
 	// Load the instances structs
-	instances := []Instance{}
+	instances := []instance.Instance{}
 	for _, dbInstance := range dbInstances {
 		// Figure out the instances's profiles
 		cProfiles := []api.Profile{}
@@ -911,9 +911,9 @@ func instanceLoadAllInternal(dbInstances []db.Instance, s *state.State) ([]Insta
 			cProfiles = append(cProfiles, profiles[dbInstance.Project][name])
 		}
 
-		if dbInstance.Type == instance.TypeContainer {
+		if dbInstance.Type == instancetype.Container {
 			args := db.ContainerToArgs(&dbInstance)
-			ct, err := containerLXCLoad(s, args, cProfiles)
+			ct, err := instance.ContainerLXCLoad(s, args, cProfiles)
 			if err != nil {
 				return nil, err
 			}
@@ -928,7 +928,7 @@ func instanceLoadAllInternal(dbInstances []db.Instance, s *state.State) ([]Insta
 	return instances, nil
 }
 
-func containerCompareSnapshots(source Instance, target Instance) ([]Instance, []Instance, error) {
+func containerCompareSnapshots(source instance.Instance, target instance.Instance) ([]instance.Instance, []instance.Instance, error) {
 	// Get the source snapshots
 	sourceSnapshots, err := source.Snapshots()
 	if err != nil {
@@ -945,8 +945,8 @@ func containerCompareSnapshots(source Instance, target Instance) ([]Instance, []
 	sourceSnapshotsTime := map[string]time.Time{}
 	targetSnapshotsTime := map[string]time.Time{}
 
-	toDelete := []Instance{}
-	toSync := []Instance{}
+	toDelete := []instance.Instance{}
+	toSync := []instance.Instance{}
 
 	for _, snap := range sourceSnapshots {
 		_, snapName, _ := shared.ContainerGetParentAndSnapshotName(snap.Name())
@@ -981,14 +981,14 @@ func containerCompareSnapshots(source Instance, target Instance) ([]Instance, []
 func autoCreateContainerSnapshotsTask(d *Daemon) (task.Func, task.Schedule) {
 	f := func(ctx context.Context) {
 		// Load all local instances
-		allContainers, err := instanceLoadNodeAll(d.State())
+		allContainers, err := instance.InstanceLoadNodeAll(d.State())
 		if err != nil {
 			logger.Error("Failed to load containers for scheduled snapshots", log.Ctx{"err": err})
 			return
 		}
 
 		// Figure out which need snapshotting (if any)
-		instances := []Instance{}
+		instances := []instance.Instance{}
 		for _, c := range allContainers {
 			schedule := c.ExpandedConfig()["snapshots.schedule"]
 
@@ -1034,11 +1034,11 @@ func autoCreateContainerSnapshotsTask(d *Daemon) (task.Func, task.Schedule) {
 			return
 		}
 
-		opRun := func(op *operation) error {
+		opRun := func(op *operation.Operation) error {
 			return autoCreateContainerSnapshots(ctx, d, instances)
 		}
 
-		op, err := operationCreate(d.cluster, "", operationClassTask, db.OperationSnapshotCreate, nil, nil, opRun, nil, nil)
+		op, err := operation.OperationCreate(d.cluster, "", operation.OperationClassTask, db.OperationSnapshotCreate, nil, nil, opRun, nil, nil)
 		if err != nil {
 			logger.Error("Failed to start create snapshot operation", log.Ctx{"err": err})
 			return
@@ -1069,7 +1069,7 @@ func autoCreateContainerSnapshotsTask(d *Daemon) (task.Func, task.Schedule) {
 	return f, schedule
 }
 
-func autoCreateContainerSnapshots(ctx context.Context, d *Daemon, instances []Instance) error {
+func autoCreateContainerSnapshots(ctx context.Context, d *Daemon, instances []instance.Instance) error {
 	// Make the snapshots
 	for _, c := range instances {
 		ch := make(chan error)
@@ -1124,14 +1124,14 @@ func autoCreateContainerSnapshots(ctx context.Context, d *Daemon, instances []In
 func pruneExpiredContainerSnapshotsTask(d *Daemon) (task.Func, task.Schedule) {
 	f := func(ctx context.Context) {
 		// Load all local instances
-		allInstances, err := instanceLoadNodeAll(d.State())
+		allInstances, err := instance.InstanceLoadNodeAll(d.State())
 		if err != nil {
 			logger.Error("Failed to load instances for snapshot expiry", log.Ctx{"err": err})
 			return
 		}
 
 		// Figure out which need snapshotting (if any)
-		expiredSnapshots := []Instance{}
+		expiredSnapshots := []instance.Instance{}
 		for _, c := range allInstances {
 			snapshots, err := c.Snapshots()
 			if err != nil {
@@ -1155,11 +1155,11 @@ func pruneExpiredContainerSnapshotsTask(d *Daemon) (task.Func, task.Schedule) {
 			return
 		}
 
-		opRun := func(op *operation) error {
+		opRun := func(op *operation.Operation) error {
 			return pruneExpiredContainerSnapshots(ctx, d, expiredSnapshots)
 		}
 
-		op, err := operationCreate(d.cluster, "", operationClassTask, db.OperationSnapshotsExpire, nil, nil, opRun, nil, nil)
+		op, err := operation.OperationCreate(d.cluster, "", operation.OperationClassTask, db.OperationSnapshotsExpire, nil, nil, opRun, nil, nil)
 		if err != nil {
 			logger.Error("Failed to start expired snapshots operation", log.Ctx{"err": err})
 			return
@@ -1190,7 +1190,7 @@ func pruneExpiredContainerSnapshotsTask(d *Daemon) (task.Func, task.Schedule) {
 	return f, schedule
 }
 
-func pruneExpiredContainerSnapshots(ctx context.Context, d *Daemon, snapshots []Instance) error {
+func pruneExpiredContainerSnapshots(ctx context.Context, d *Daemon, snapshots []instance.Instance) error {
 	// Find snapshots to delete
 	for _, snapshot := range snapshots {
 		err := snapshot.Delete()
@@ -1202,7 +1202,7 @@ func pruneExpiredContainerSnapshots(ctx context.Context, d *Daemon, snapshots []
 	return nil
 }
 
-func containerDetermineNextSnapshotName(d *Daemon, c Instance, defaultPattern string) (string, error) {
+func containerDetermineNextSnapshotName(d *Daemon, c instance.Instance, defaultPattern string) (string, error) {
 	var err error
 
 	pattern := c.ExpandedConfig()["snapshots.pattern"]

From 72ea6ddb5a0dc9c1a45cb075a9090fe558e4589a Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Wed, 25 Sep 2019 12:12:52 +0100
Subject: [PATCH 71/72] lxd/containers: Removes containerDeleteSnapshots as
 moved to instance package

- Renamed to instanceDeleteSnapshots.

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/containers.go | 26 --------------------------
 1 file changed, 26 deletions(-)

diff --git a/lxd/containers.go b/lxd/containers.go
index a340642a00..10689e7001 100644
--- a/lxd/containers.go
+++ b/lxd/containers.go
@@ -350,29 +350,3 @@ func containersShutdown(s *state.State) error {
 
 	return nil
 }
-
-func containerDeleteSnapshots(s *state.State, project, cname string) error {
-	results, err := s.Cluster.ContainerGetSnapshots(project, cname)
-	if err != nil {
-		return err
-	}
-
-	for _, sname := range results {
-		sc, err := instanceLoadByProjectAndName(s, project, sname)
-		if err != nil {
-			logger.Error(
-				"containerDeleteSnapshots: Failed to load the snapshot container",
-				log.Ctx{"container": cname, "snapshot": sname, "err": err})
-
-			continue
-		}
-
-		if err := sc.Delete(); err != nil {
-			logger.Error(
-				"containerDeleteSnapshots: Failed to delete a snapshot container",
-				log.Ctx{"container": cname, "snapshot": sname, "err": err})
-		}
-	}
-
-	return nil
-}

From c1f4e4b81dd68baf59066cccc8f89967d7f06c3f Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Wed, 25 Sep 2019 12:14:11 +0100
Subject: [PATCH 72/72] lxd/container*: Updates use of moved types

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/container_backup.go         | 47 ++++++++++++----------
 lxd/container_console.go        | 29 ++++++++------
 lxd/container_delete.go         | 11 +++--
 lxd/container_exec.go           | 25 +++++++-----
 lxd/container_file.go           | 12 +++---
 lxd/container_get.go            |  7 +++-
 lxd/container_instance_types.go |  5 ++-
 lxd/container_logs.go           |  7 ++--
 lxd/container_metadata.go       | 24 ++++++-----
 lxd/container_patch.go          |  6 ++-
 lxd/container_post.go           | 35 ++++++++--------
 lxd/container_put.go            | 19 +++++----
 lxd/container_snapshot.go       | 51 ++++++++++++-----------
 lxd/container_state.go          | 29 ++++++++------
 lxd/container_test.go           | 51 +++++++++++------------
 lxd/containers.go               | 19 +++++----
 lxd/containers_get.go           | 20 +++++-----
 lxd/containers_post.go          | 71 +++++++++++++++++----------------
 18 files changed, 251 insertions(+), 217 deletions(-)

diff --git a/lxd/container_backup.go b/lxd/container_backup.go
index 040e2f9443..2c2ca1f733 100644
--- a/lxd/container_backup.go
+++ b/lxd/container_backup.go
@@ -10,14 +10,17 @@ import (
 	"github.com/gorilla/mux"
 	"github.com/pkg/errors"
 
+	"github.com/lxc/lxd/lxd/daemon"
 	"github.com/lxc/lxd/lxd/db"
+	"github.com/lxc/lxd/lxd/instance"
+	"github.com/lxc/lxd/lxd/operation"
 	"github.com/lxc/lxd/lxd/util"
 	"github.com/lxc/lxd/shared"
 	"github.com/lxc/lxd/shared/api"
 	"github.com/lxc/lxd/shared/version"
 )
 
-func containerBackupsGet(d *Daemon, r *http.Request) Response {
+func containerBackupsGet(d *Daemon, r *http.Request) daemon.Response {
 	instanceType, err := urlInstanceTypeDetect(r)
 	if err != nil {
 		return SmartError(err)
@@ -37,7 +40,7 @@ func containerBackupsGet(d *Daemon, r *http.Request) Response {
 
 	recursion := util.IsRecursionRequest(r)
 
-	c, err := instanceLoadByProjectAndName(d.State(), project, cname)
+	c, err := instance.InstanceLoadByProjectAndName(d.State(), project, cname)
 	if err != nil {
 		return SmartError(err)
 	}
@@ -53,7 +56,7 @@ func containerBackupsGet(d *Daemon, r *http.Request) Response {
 	for _, backup := range backups {
 		if !recursion {
 			url := fmt.Sprintf("/%s/containers/%s/backups/%s",
-				version.APIVersion, cname, strings.Split(backup.name, "/")[1])
+				version.APIVersion, cname, strings.Split(backup.Name, "/")[1])
 			resultString = append(resultString, url)
 		} else {
 			render := backup.Render()
@@ -68,7 +71,7 @@ func containerBackupsGet(d *Daemon, r *http.Request) Response {
 	return SyncResponse(true, resultMap)
 }
 
-func containerBackupsPost(d *Daemon, r *http.Request) Response {
+func containerBackupsPost(d *Daemon, r *http.Request) daemon.Response {
 	instanceType, err := urlInstanceTypeDetect(r)
 	if err != nil {
 		return SmartError(err)
@@ -86,7 +89,7 @@ func containerBackupsPost(d *Daemon, r *http.Request) Response {
 		return response
 	}
 
-	c, err := instanceLoadByProjectAndName(d.State(), project, name)
+	c, err := instance.InstanceLoadByProjectAndName(d.State(), project, name)
 	if err != nil {
 		return SmartError(err)
 	}
@@ -129,11 +132,11 @@ func containerBackupsPost(d *Daemon, r *http.Request) Response {
 
 		for _, backup := range backups {
 			// Ignore backups not containing base
-			if !strings.HasPrefix(backup.name, base) {
+			if !strings.HasPrefix(backup.Name, base) {
 				continue
 			}
 
-			substr := backup.name[length:]
+			substr := backup.Name[length:]
 			var num int
 			count, err := fmt.Sscanf(substr, "%d", &num)
 			if err != nil || count != 1 {
@@ -155,7 +158,7 @@ func containerBackupsPost(d *Daemon, r *http.Request) Response {
 	fullName := name + shared.SnapshotDelimiter + req.Name
 	instanceOnly := req.InstanceOnly || req.ContainerOnly
 
-	backup := func(op *operation) error {
+	backup := func(op *operation.Operation) error {
 		args := db.ContainerBackupArgs{
 			Name:             fullName,
 			ContainerID:      c.Id(),
@@ -177,7 +180,7 @@ func containerBackupsPost(d *Daemon, r *http.Request) Response {
 	resources["containers"] = []string{name}
 	resources["backups"] = []string{req.Name}
 
-	op, err := operationCreate(d.cluster, project, operationClassTask,
+	op, err := operation.OperationCreate(d.cluster, project, operation.OperationClassTask,
 		db.OperationBackupCreate, resources, nil, backup, nil, nil)
 	if err != nil {
 		return InternalError(err)
@@ -186,7 +189,7 @@ func containerBackupsPost(d *Daemon, r *http.Request) Response {
 	return OperationResponse(op)
 }
 
-func containerBackupGet(d *Daemon, r *http.Request) Response {
+func containerBackupGet(d *Daemon, r *http.Request) daemon.Response {
 	instanceType, err := urlInstanceTypeDetect(r)
 	if err != nil {
 		return SmartError(err)
@@ -206,7 +209,7 @@ func containerBackupGet(d *Daemon, r *http.Request) Response {
 	}
 
 	fullName := name + shared.SnapshotDelimiter + backupName
-	backup, err := backupLoadByName(d.State(), project, fullName)
+	backup, err := instance.BackupLoadByName(d.State(), project, fullName)
 	if err != nil {
 		return SmartError(err)
 	}
@@ -214,7 +217,7 @@ func containerBackupGet(d *Daemon, r *http.Request) Response {
 	return SyncResponse(true, backup.Render())
 }
 
-func containerBackupPost(d *Daemon, r *http.Request) Response {
+func containerBackupPost(d *Daemon, r *http.Request) daemon.Response {
 	instanceType, err := urlInstanceTypeDetect(r)
 	if err != nil {
 		return SmartError(err)
@@ -245,14 +248,14 @@ func containerBackupPost(d *Daemon, r *http.Request) Response {
 	}
 
 	oldName := name + shared.SnapshotDelimiter + backupName
-	backup, err := backupLoadByName(d.State(), project, oldName)
+	backup, err := instance.BackupLoadByName(d.State(), project, oldName)
 	if err != nil {
 		return SmartError(err)
 	}
 
 	newName := name + shared.SnapshotDelimiter + req.Name
 
-	rename := func(op *operation) error {
+	rename := func(op *operation.Operation) error {
 		err := backup.Rename(newName)
 		if err != nil {
 			return err
@@ -264,7 +267,7 @@ func containerBackupPost(d *Daemon, r *http.Request) Response {
 	resources := map[string][]string{}
 	resources["containers"] = []string{name}
 
-	op, err := operationCreate(d.cluster, project, operationClassTask,
+	op, err := operation.OperationCreate(d.cluster, project, operation.OperationClassTask,
 		db.OperationBackupRename, resources, nil, rename, nil, nil)
 	if err != nil {
 		return InternalError(err)
@@ -273,7 +276,7 @@ func containerBackupPost(d *Daemon, r *http.Request) Response {
 	return OperationResponse(op)
 }
 
-func containerBackupDelete(d *Daemon, r *http.Request) Response {
+func containerBackupDelete(d *Daemon, r *http.Request) daemon.Response {
 	instanceType, err := urlInstanceTypeDetect(r)
 	if err != nil {
 		return SmartError(err)
@@ -293,12 +296,12 @@ func containerBackupDelete(d *Daemon, r *http.Request) Response {
 	}
 
 	fullName := name + shared.SnapshotDelimiter + backupName
-	backup, err := backupLoadByName(d.State(), project, fullName)
+	backup, err := instance.BackupLoadByName(d.State(), project, fullName)
 	if err != nil {
 		return SmartError(err)
 	}
 
-	remove := func(op *operation) error {
+	remove := func(op *operation.Operation) error {
 		err := backup.Delete()
 		if err != nil {
 			return err
@@ -310,7 +313,7 @@ func containerBackupDelete(d *Daemon, r *http.Request) Response {
 	resources := map[string][]string{}
 	resources["container"] = []string{name}
 
-	op, err := operationCreate(d.cluster, project, operationClassTask,
+	op, err := operation.OperationCreate(d.cluster, project, operation.OperationClassTask,
 		db.OperationBackupRemove, resources, nil, remove, nil, nil)
 	if err != nil {
 		return InternalError(err)
@@ -319,7 +322,7 @@ func containerBackupDelete(d *Daemon, r *http.Request) Response {
 	return OperationResponse(op)
 }
 
-func containerBackupExportGet(d *Daemon, r *http.Request) Response {
+func containerBackupExportGet(d *Daemon, r *http.Request) daemon.Response {
 	instanceType, err := urlInstanceTypeDetect(r)
 	if err != nil {
 		return SmartError(err)
@@ -339,13 +342,13 @@ func containerBackupExportGet(d *Daemon, r *http.Request) Response {
 	}
 
 	fullName := name + shared.SnapshotDelimiter + backupName
-	backup, err := backupLoadByName(d.State(), project, fullName)
+	backup, err := instance.BackupLoadByName(d.State(), project, fullName)
 	if err != nil {
 		return SmartError(err)
 	}
 
 	ent := fileResponseEntry{
-		path: shared.VarPath("backups", backup.name),
+		path: shared.VarPath("backups", backup.Name),
 	}
 
 	return FileResponse(r, []fileResponseEntry{ent}, nil, false)
diff --git a/lxd/container_console.go b/lxd/container_console.go
index 9aa73fea41..135d7fa63b 100644
--- a/lxd/container_console.go
+++ b/lxd/container_console.go
@@ -17,8 +17,11 @@ import (
 	"gopkg.in/lxc/go-lxc.v2"
 
 	"github.com/lxc/lxd/lxd/cluster"
+	"github.com/lxc/lxd/lxd/daemon"
 	"github.com/lxc/lxd/lxd/db"
 	"github.com/lxc/lxd/lxd/instance"
+	"github.com/lxc/lxd/lxd/instance/instancetype"
+	"github.com/lxc/lxd/lxd/operation"
 	"github.com/lxc/lxd/lxd/util"
 	"github.com/lxc/lxd/shared"
 	"github.com/lxc/lxd/shared/api"
@@ -27,7 +30,7 @@ import (
 
 type consoleWs struct {
 	// instance currently worked on
-	instance Instance
+	instance instance.Instance
 
 	// uid to chown pty to
 	rootUid int64
@@ -70,7 +73,7 @@ func (s *consoleWs) Metadata() interface{} {
 	return shared.Jmap{"fds": fds}
 }
 
-func (s *consoleWs) Connect(op *operation, r *http.Request, w http.ResponseWriter) error {
+func (s *consoleWs) Connect(op *operation.Operation, r *http.Request, w http.ResponseWriter) error {
 	secret := r.FormValue("secret")
 	if secret == "" {
 		return fmt.Errorf("missing secret")
@@ -111,7 +114,7 @@ func (s *consoleWs) Connect(op *operation, r *http.Request, w http.ResponseWrite
 	return os.ErrPermission
 }
 
-func (s *consoleWs) Do(op *operation) error {
+func (s *consoleWs) Do(op *operation.Operation) error {
 	<-s.allConnected
 
 	var err error
@@ -254,7 +257,7 @@ func (s *consoleWs) Do(op *operation) error {
 	return finisher(err)
 }
 
-func containerConsolePost(d *Daemon, r *http.Request) Response {
+func containerConsolePost(d *Daemon, r *http.Request) daemon.Response {
 	instanceType, err := urlInstanceTypeDetect(r)
 	if err != nil {
 		return SmartError(err)
@@ -292,7 +295,7 @@ func containerConsolePost(d *Daemon, r *http.Request) Response {
 		return ForwardedOperationResponse(project, &opAPI)
 	}
 
-	inst, err := instanceLoadByProjectAndName(d.State(), project, name)
+	inst, err := instance.InstanceLoadByProjectAndName(d.State(), project, name)
 	if err != nil {
 		return SmartError(err)
 	}
@@ -311,7 +314,7 @@ func containerConsolePost(d *Daemon, r *http.Request) Response {
 	ws.fds = map[int]string{}
 
 	// If the type of instance is container, setup the root UID/GID for web socket.
-	if inst.Type() == instance.TypeContainer {
+	if inst.Type() == instancetype.Container {
 		c := inst.(container)
 		idmapset, err := c.CurrentIdmap()
 		if err != nil {
@@ -342,7 +345,7 @@ func containerConsolePost(d *Daemon, r *http.Request) Response {
 	resources := map[string][]string{}
 	resources["containers"] = []string{ws.instance.Name()}
 
-	op, err := operationCreate(d.cluster, project, operationClassWebsocket, db.OperationConsoleShow,
+	op, err := operation.OperationCreate(d.cluster, project, operation.OperationClassWebsocket, db.OperationConsoleShow,
 		resources, ws.Metadata(), ws.Do, nil, ws.Connect)
 	if err != nil {
 		return InternalError(err)
@@ -351,7 +354,7 @@ func containerConsolePost(d *Daemon, r *http.Request) Response {
 	return OperationResponse(op)
 }
 
-func containerConsoleLogGet(d *Daemon, r *http.Request) Response {
+func containerConsoleLogGet(d *Daemon, r *http.Request) daemon.Response {
 	instanceType, err := urlInstanceTypeDetect(r)
 	if err != nil {
 		return SmartError(err)
@@ -373,12 +376,12 @@ func containerConsoleLogGet(d *Daemon, r *http.Request) Response {
 		return BadRequest(fmt.Errorf("Querying the console buffer requires liblxc >= 3.0"))
 	}
 
-	inst, err := instanceLoadByProjectAndName(d.State(), project, name)
+	inst, err := instance.InstanceLoadByProjectAndName(d.State(), project, name)
 	if err != nil {
 		return SmartError(err)
 	}
 
-	if inst.Type() != instance.TypeContainer {
+	if inst.Type() != instancetype.Container {
 		return SmartError(fmt.Errorf("Instance is not container type"))
 	}
 
@@ -419,7 +422,7 @@ func containerConsoleLogGet(d *Daemon, r *http.Request) Response {
 	return FileResponse(r, []fileResponseEntry{ent}, nil, false)
 }
 
-func containerConsoleLogDelete(d *Daemon, r *http.Request) Response {
+func containerConsoleLogDelete(d *Daemon, r *http.Request) daemon.Response {
 	if !util.RuntimeLiblxcVersionAtLeast(3, 0, 0) {
 		return BadRequest(fmt.Errorf("Clearing the console buffer requires liblxc >= 3.0"))
 	}
@@ -427,12 +430,12 @@ func containerConsoleLogDelete(d *Daemon, r *http.Request) Response {
 	name := mux.Vars(r)["name"]
 	project := projectParam(r)
 
-	inst, err := instanceLoadByProjectAndName(d.State(), project, name)
+	inst, err := instance.InstanceLoadByProjectAndName(d.State(), project, name)
 	if err != nil {
 		return SmartError(err)
 	}
 
-	if inst.Type() != instance.TypeContainer {
+	if inst.Type() != instancetype.Container {
 		return SmartError(fmt.Errorf("Instance is not container type"))
 	}
 
diff --git a/lxd/container_delete.go b/lxd/container_delete.go
index 2885588d1e..5a13d6a5ee 100644
--- a/lxd/container_delete.go
+++ b/lxd/container_delete.go
@@ -5,10 +5,13 @@ import (
 	"net/http"
 
 	"github.com/gorilla/mux"
+	"github.com/lxc/lxd/lxd/daemon"
 	"github.com/lxc/lxd/lxd/db"
+	"github.com/lxc/lxd/lxd/instance"
+	"github.com/lxc/lxd/lxd/operation"
 )
 
-func containerDelete(d *Daemon, r *http.Request) Response {
+func containerDelete(d *Daemon, r *http.Request) daemon.Response {
 	instanceType, err := urlInstanceTypeDetect(r)
 	if err != nil {
 		return SmartError(err)
@@ -25,7 +28,7 @@ func containerDelete(d *Daemon, r *http.Request) Response {
 		return response
 	}
 
-	c, err := instanceLoadByProjectAndName(d.State(), project, name)
+	c, err := instance.InstanceLoadByProjectAndName(d.State(), project, name)
 	if err != nil {
 		return SmartError(err)
 	}
@@ -34,14 +37,14 @@ func containerDelete(d *Daemon, r *http.Request) Response {
 		return BadRequest(fmt.Errorf("container is running"))
 	}
 
-	rmct := func(op *operation) error {
+	rmct := func(op *operation.Operation) error {
 		return c.Delete()
 	}
 
 	resources := map[string][]string{}
 	resources["containers"] = []string{name}
 
-	op, err := operationCreate(d.cluster, project, operationClassTask, db.OperationContainerDelete, resources, nil, rmct, nil, nil)
+	op, err := operation.OperationCreate(d.cluster, project, operation.OperationClassTask, db.OperationContainerDelete, resources, nil, rmct, nil, nil)
 	if err != nil {
 		return InternalError(err)
 	}
diff --git a/lxd/container_exec.go b/lxd/container_exec.go
index 3c871fed37..0ef420e1c5 100644
--- a/lxd/container_exec.go
+++ b/lxd/container_exec.go
@@ -18,8 +18,11 @@ import (
 	"golang.org/x/sys/unix"
 
 	"github.com/lxc/lxd/lxd/cluster"
+	"github.com/lxc/lxd/lxd/daemon"
 	"github.com/lxc/lxd/lxd/db"
 	"github.com/lxc/lxd/lxd/instance"
+	"github.com/lxc/lxd/lxd/instance/instancetype"
+	"github.com/lxc/lxd/lxd/operation"
 	"github.com/lxc/lxd/shared"
 	"github.com/lxc/lxd/shared/api"
 	log "github.com/lxc/lxd/shared/log15"
@@ -30,7 +33,7 @@ import (
 
 type execWs struct {
 	command  []string
-	instance Instance
+	instance instance.Instance
 	env      map[string]string
 
 	rootUid          int64
@@ -66,7 +69,7 @@ func (s *execWs) Metadata() interface{} {
 	}
 }
 
-func (s *execWs) Connect(op *operation, r *http.Request, w http.ResponseWriter) error {
+func (s *execWs) Connect(op *operation.Operation, r *http.Request, w http.ResponseWriter) error {
 	secret := r.FormValue("secret")
 	if secret == "" {
 		return fmt.Errorf("missing secret")
@@ -107,7 +110,7 @@ func (s *execWs) Connect(op *operation, r *http.Request, w http.ResponseWriter)
 	return os.ErrPermission
 }
 
-func (s *execWs) Do(op *operation) error {
+func (s *execWs) Do(op *operation.Operation) error {
 	<-s.allConnected
 
 	var err error
@@ -341,7 +344,7 @@ func (s *execWs) Do(op *operation) error {
 	return finisher(-1, nil)
 }
 
-func containerExecPost(d *Daemon, r *http.Request) Response {
+func containerExecPost(d *Daemon, r *http.Request) daemon.Response {
 	instanceType, err := urlInstanceTypeDetect(r)
 	if err != nil {
 		return SmartError(err)
@@ -378,7 +381,7 @@ func containerExecPost(d *Daemon, r *http.Request) Response {
 		return ForwardedOperationResponse(project, &opAPI)
 	}
 
-	inst, err := instanceLoadByProjectAndName(d.State(), project, name)
+	inst, err := instance.InstanceLoadByProjectAndName(d.State(), project, name)
 	if err != nil {
 		return SmartError(err)
 	}
@@ -439,7 +442,7 @@ func containerExecPost(d *Daemon, r *http.Request) Response {
 		ws := &execWs{}
 		ws.fds = map[int]string{}
 
-		if inst.Type() == instance.TypeContainer {
+		if inst.Type() == instancetype.Container {
 			c := inst.(container)
 			idmapset, err := c.CurrentIdmap()
 			if err != nil {
@@ -482,7 +485,7 @@ func containerExecPost(d *Daemon, r *http.Request) Response {
 		resources := map[string][]string{}
 		resources["containers"] = []string{ws.instance.Name()}
 
-		op, err := operationCreate(d.cluster, project, operationClassWebsocket, db.OperationCommandExec, resources, ws.Metadata(), ws.Do, nil, ws.Connect)
+		op, err := operation.OperationCreate(d.cluster, project, operation.OperationClassWebsocket, db.OperationCommandExec, resources, ws.Metadata(), ws.Do, nil, ws.Connect)
 		if err != nil {
 			return InternalError(err)
 		}
@@ -490,20 +493,20 @@ func containerExecPost(d *Daemon, r *http.Request) Response {
 		return OperationResponse(op)
 	}
 
-	run := func(op *operation) error {
+	run := func(op *operation.Operation) error {
 		var cmdErr error
 		var cmdResult int
 		metadata := shared.Jmap{}
 
 		if post.RecordOutput {
 			// Prepare stdout and stderr recording
-			stdout, err := os.OpenFile(filepath.Join(inst.LogPath(), fmt.Sprintf("exec_%s.stdout", op.id)), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666)
+			stdout, err := os.OpenFile(filepath.Join(inst.LogPath(), fmt.Sprintf("exec_%s.stdout", op.ID)), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666)
 			if err != nil {
 				return err
 			}
 			defer stdout.Close()
 
-			stderr, err := os.OpenFile(filepath.Join(inst.LogPath(), fmt.Sprintf("exec_%s.stderr", op.id)), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666)
+			stderr, err := os.OpenFile(filepath.Join(inst.LogPath(), fmt.Sprintf("exec_%s.stderr", op.ID)), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666)
 			if err != nil {
 				return err
 			}
@@ -534,7 +537,7 @@ func containerExecPost(d *Daemon, r *http.Request) Response {
 	resources := map[string][]string{}
 	resources["containers"] = []string{name}
 
-	op, err := operationCreate(d.cluster, project, operationClassTask, db.OperationCommandExec, resources, nil, run, nil, nil)
+	op, err := operation.OperationCreate(d.cluster, project, operation.OperationClassTask, db.OperationCommandExec, resources, nil, run, nil, nil)
 	if err != nil {
 		return InternalError(err)
 	}
diff --git a/lxd/container_file.go b/lxd/container_file.go
index 7946fe4444..9b8e541037 100644
--- a/lxd/container_file.go
+++ b/lxd/container_file.go
@@ -10,10 +10,12 @@ import (
 
 	"github.com/gorilla/mux"
 
+	"github.com/lxc/lxd/lxd/daemon"
+	"github.com/lxc/lxd/lxd/instance"
 	"github.com/lxc/lxd/shared"
 )
 
-func containerFileHandler(d *Daemon, r *http.Request) Response {
+func containerFileHandler(d *Daemon, r *http.Request) daemon.Response {
 	instanceType, err := urlInstanceTypeDetect(r)
 	if err != nil {
 		return SmartError(err)
@@ -30,7 +32,7 @@ func containerFileHandler(d *Daemon, r *http.Request) Response {
 		return response
 	}
 
-	c, err := instanceLoadByProjectAndName(d.State(), project, name)
+	c, err := instance.InstanceLoadByProjectAndName(d.State(), project, name)
 	if err != nil {
 		return SmartError(err)
 	}
@@ -52,7 +54,7 @@ func containerFileHandler(d *Daemon, r *http.Request) Response {
 	}
 }
 
-func containerFileGet(c Instance, path string, r *http.Request) Response {
+func containerFileGet(c instance.Instance, path string, r *http.Request) daemon.Response {
 	/*
 	 * Copy out of the ns to a temporary file, and then use that to serve
 	 * the request from. This prevents us from having to worry about stuff
@@ -97,7 +99,7 @@ func containerFileGet(c Instance, path string, r *http.Request) Response {
 	}
 }
 
-func containerFilePost(c Instance, path string, r *http.Request) Response {
+func containerFilePost(c instance.Instance, path string, r *http.Request) daemon.Response {
 	// Extract file ownership and mode from headers
 	uid, gid, mode, type_, write := shared.ParseLXDFileHeaders(r.Header)
 
@@ -150,7 +152,7 @@ func containerFilePost(c Instance, path string, r *http.Request) Response {
 	}
 }
 
-func containerFileDelete(c Instance, path string, r *http.Request) Response {
+func containerFileDelete(c instance.Instance, path string, r *http.Request) daemon.Response {
 	err := c.FileRemove(path)
 	if err != nil {
 		return SmartError(err)
diff --git a/lxd/container_get.go b/lxd/container_get.go
index b76a85baa0..6bca2ea864 100644
--- a/lxd/container_get.go
+++ b/lxd/container_get.go
@@ -4,9 +4,12 @@ import (
 	"net/http"
 
 	"github.com/gorilla/mux"
+
+	"github.com/lxc/lxd/lxd/daemon"
+	"github.com/lxc/lxd/lxd/instance"
 )
 
-func containerGet(d *Daemon, r *http.Request) Response {
+func containerGet(d *Daemon, r *http.Request) daemon.Response {
 	instanceType, err := urlInstanceTypeDetect(r)
 	if err != nil {
 		return SmartError(err)
@@ -24,7 +27,7 @@ func containerGet(d *Daemon, r *http.Request) Response {
 		return response
 	}
 
-	c, err := instanceLoadByProjectAndName(d.State(), project, name)
+	c, err := instance.InstanceLoadByProjectAndName(d.State(), project, name)
 	if err != nil {
 		return SmartError(err)
 	}
diff --git a/lxd/container_instance_types.go b/lxd/container_instance_types.go
index 93beb809e0..a07a50c4dd 100644
--- a/lxd/container_instance_types.go
+++ b/lxd/container_instance_types.go
@@ -11,6 +11,7 @@ import (
 	"gopkg.in/yaml.v2"
 
 	"github.com/lxc/lxd/lxd/db"
+	"github.com/lxc/lxd/lxd/operation"
 	"github.com/lxc/lxd/lxd/task"
 	"github.com/lxc/lxd/lxd/util"
 	"github.com/lxc/lxd/shared"
@@ -74,7 +75,7 @@ func instanceRefreshTypesTask(d *Daemon) (task.Func, task.Schedule) {
 	// returning in case the context expires.
 	_, hasCancellationSupport := interface{}(&http.Request{}).(util.ContextAwareRequest)
 	f := func(ctx context.Context) {
-		opRun := func(op *operation) error {
+		opRun := func(op *operation.Operation) error {
 			if hasCancellationSupport {
 				return instanceRefreshTypes(ctx, d)
 			}
@@ -91,7 +92,7 @@ func instanceRefreshTypesTask(d *Daemon) (task.Func, task.Schedule) {
 			}
 		}
 
-		op, err := operationCreate(d.cluster, "", operationClassTask, db.OperationInstanceTypesUpdate, nil, nil, opRun, nil, nil)
+		op, err := operation.OperationCreate(d.cluster, "", operation.OperationClassTask, db.OperationInstanceTypesUpdate, nil, nil, opRun, nil, nil)
 		if err != nil {
 			logger.Error("Failed to start instance types update operation", log.Ctx{"err": err})
 			return
diff --git a/lxd/container_logs.go b/lxd/container_logs.go
index 8c68c47e67..f5044bde94 100644
--- a/lxd/container_logs.go
+++ b/lxd/container_logs.go
@@ -9,6 +9,7 @@ import (
 
 	"github.com/gorilla/mux"
 
+	"github.com/lxc/lxd/lxd/daemon"
 	"github.com/lxc/lxd/shared"
 	"github.com/lxc/lxd/shared/version"
 )
@@ -30,7 +31,7 @@ var instanceLogsCmd = APIEndpoint{
 	Get: APIEndpointAction{Handler: containerLogsGet, AccessHandler: AllowProjectPermission("containers", "view")},
 }
 
-func containerLogsGet(d *Daemon, r *http.Request) Response {
+func containerLogsGet(d *Daemon, r *http.Request) daemon.Response {
 	/* Let's explicitly *not* try to do a containerLoadByName here. In some
 	 * cases (e.g. when container creation failed), the container won't
 	 * exist in the DB but it does have some log files on disk.
@@ -89,7 +90,7 @@ func validLogFileName(fname string) bool {
 		strings.HasPrefix(fname, "exec_")
 }
 
-func containerLogGet(d *Daemon, r *http.Request) Response {
+func containerLogGet(d *Daemon, r *http.Request) daemon.Response {
 	instanceType, err := urlInstanceTypeDetect(r)
 	if err != nil {
 		return SmartError(err)
@@ -125,7 +126,7 @@ func containerLogGet(d *Daemon, r *http.Request) Response {
 	return FileResponse(r, []fileResponseEntry{ent}, nil, false)
 }
 
-func containerLogDelete(d *Daemon, r *http.Request) Response {
+func containerLogDelete(d *Daemon, r *http.Request) daemon.Response {
 	instanceType, err := urlInstanceTypeDetect(r)
 	if err != nil {
 		return SmartError(err)
diff --git a/lxd/container_metadata.go b/lxd/container_metadata.go
index bd4bdc712f..5adb55494d 100644
--- a/lxd/container_metadata.go
+++ b/lxd/container_metadata.go
@@ -13,11 +13,13 @@ import (
 	"github.com/gorilla/mux"
 	"gopkg.in/yaml.v2"
 
+	"github.com/lxc/lxd/lxd/daemon"
+	"github.com/lxc/lxd/lxd/instance"
 	"github.com/lxc/lxd/shared"
 	"github.com/lxc/lxd/shared/api"
 )
 
-func containerMetadataGet(d *Daemon, r *http.Request) Response {
+func containerMetadataGet(d *Daemon, r *http.Request) daemon.Response {
 	instanceType, err := urlInstanceTypeDetect(r)
 	if err != nil {
 		return SmartError(err)
@@ -36,7 +38,7 @@ func containerMetadataGet(d *Daemon, r *http.Request) Response {
 	}
 
 	// Load the container
-	c, err := instanceLoadByProjectAndName(d.State(), project, name)
+	c, err := instance.InstanceLoadByProjectAndName(d.State(), project, name)
 	if err != nil {
 		return SmartError(err)
 	}
@@ -78,7 +80,7 @@ func containerMetadataGet(d *Daemon, r *http.Request) Response {
 	return SyncResponse(true, metadata)
 }
 
-func containerMetadataPut(d *Daemon, r *http.Request) Response {
+func containerMetadataPut(d *Daemon, r *http.Request) daemon.Response {
 	instanceType, err := urlInstanceTypeDetect(r)
 	if err != nil {
 		return SmartError(err)
@@ -97,7 +99,7 @@ func containerMetadataPut(d *Daemon, r *http.Request) Response {
 	}
 
 	// Load the container
-	c, err := instanceLoadByProjectAndName(d.State(), project, name)
+	c, err := instance.InstanceLoadByProjectAndName(d.State(), project, name)
 	if err != nil {
 		return SmartError(err)
 	}
@@ -132,7 +134,7 @@ func containerMetadataPut(d *Daemon, r *http.Request) Response {
 }
 
 // Return a list of templates used in a container or the content of a template
-func containerMetadataTemplatesGet(d *Daemon, r *http.Request) Response {
+func containerMetadataTemplatesGet(d *Daemon, r *http.Request) daemon.Response {
 	instanceType, err := urlInstanceTypeDetect(r)
 	if err != nil {
 		return SmartError(err)
@@ -151,7 +153,7 @@ func containerMetadataTemplatesGet(d *Daemon, r *http.Request) Response {
 	}
 
 	// Load the container
-	c, err := instanceLoadByProjectAndName(d.State(), project, name)
+	c, err := instance.InstanceLoadByProjectAndName(d.State(), project, name)
 	if err != nil {
 		return SmartError(err)
 	}
@@ -226,7 +228,7 @@ func containerMetadataTemplatesGet(d *Daemon, r *http.Request) Response {
 }
 
 // Add a container template file
-func containerMetadataTemplatesPostPut(d *Daemon, r *http.Request) Response {
+func containerMetadataTemplatesPostPut(d *Daemon, r *http.Request) daemon.Response {
 	instanceType, err := urlInstanceTypeDetect(r)
 	if err != nil {
 		return SmartError(err)
@@ -245,7 +247,7 @@ func containerMetadataTemplatesPostPut(d *Daemon, r *http.Request) Response {
 	}
 
 	// Load the container
-	c, err := instanceLoadByProjectAndName(d.State(), project, name)
+	c, err := instance.InstanceLoadByProjectAndName(d.State(), project, name)
 	if err != nil {
 		return SmartError(err)
 	}
@@ -298,7 +300,7 @@ func containerMetadataTemplatesPostPut(d *Daemon, r *http.Request) Response {
 }
 
 // Delete a container template
-func containerMetadataTemplatesDelete(d *Daemon, r *http.Request) Response {
+func containerMetadataTemplatesDelete(d *Daemon, r *http.Request) daemon.Response {
 	instanceType, err := urlInstanceTypeDetect(r)
 	if err != nil {
 		return SmartError(err)
@@ -318,7 +320,7 @@ func containerMetadataTemplatesDelete(d *Daemon, r *http.Request) Response {
 	}
 
 	// Load the container
-	c, err := instanceLoadByProjectAndName(d.State(), project, name)
+	c, err := instance.InstanceLoadByProjectAndName(d.State(), project, name)
 	if err != nil {
 		return SmartError(err)
 	}
@@ -357,7 +359,7 @@ func containerMetadataTemplatesDelete(d *Daemon, r *http.Request) Response {
 }
 
 // Return the full path of a container template.
-func getContainerTemplatePath(c Instance, filename string) (string, error) {
+func getContainerTemplatePath(c instance.Instance, filename string) (string, error) {
 	if strings.Contains(filename, "/") {
 		return "", fmt.Errorf("Invalid template filename")
 	}
diff --git a/lxd/container_patch.go b/lxd/container_patch.go
index 16b8bf3743..6e3dafa862 100644
--- a/lxd/container_patch.go
+++ b/lxd/container_patch.go
@@ -9,15 +9,17 @@ import (
 
 	"github.com/gorilla/mux"
 
+	"github.com/lxc/lxd/lxd/daemon"
 	"github.com/lxc/lxd/lxd/db"
 	deviceConfig "github.com/lxc/lxd/lxd/device/config"
+	"github.com/lxc/lxd/lxd/instance"
 	"github.com/lxc/lxd/lxd/util"
 	"github.com/lxc/lxd/shared"
 	"github.com/lxc/lxd/shared/api"
 	"github.com/lxc/lxd/shared/osarch"
 )
 
-func containerPatch(d *Daemon, r *http.Request) Response {
+func containerPatch(d *Daemon, r *http.Request) daemon.Response {
 	instanceType, err := urlInstanceTypeDetect(r)
 	if err != nil {
 		return SmartError(err)
@@ -37,7 +39,7 @@ func containerPatch(d *Daemon, r *http.Request) Response {
 		return response
 	}
 
-	c, err := instanceLoadByProjectAndName(d.State(), project, name)
+	c, err := instance.InstanceLoadByProjectAndName(d.State(), project, name)
 	if err != nil {
 		return NotFound(err)
 	}
diff --git a/lxd/container_post.go b/lxd/container_post.go
index da7e960938..24c8ba7a23 100644
--- a/lxd/container_post.go
+++ b/lxd/container_post.go
@@ -13,8 +13,11 @@ import (
 
 	lxd "github.com/lxc/lxd/client"
 	"github.com/lxc/lxd/lxd/cluster"
+	"github.com/lxc/lxd/lxd/daemon"
 	"github.com/lxc/lxd/lxd/db"
 	"github.com/lxc/lxd/lxd/instance"
+	"github.com/lxc/lxd/lxd/instance/instancetype"
+	"github.com/lxc/lxd/lxd/operation"
 	driver "github.com/lxc/lxd/lxd/storage"
 	"github.com/lxc/lxd/shared"
 	"github.com/lxc/lxd/shared/api"
@@ -27,7 +30,7 @@ var internalClusterContainerMovedCmd = APIEndpoint{
 	Post: APIEndpointAction{Handler: internalClusterContainerMovedPost},
 }
 
-func containerPost(d *Daemon, r *http.Request) Response {
+func containerPost(d *Daemon, r *http.Request) daemon.Response {
 	instanceType, err := urlInstanceTypeDetect(r)
 	if err != nil {
 		return SmartError(err)
@@ -102,7 +105,7 @@ func containerPost(d *Daemon, r *http.Request) Response {
 		return BadRequest(fmt.Errorf("Target node is offline"))
 	}
 
-	var inst Instance
+	var inst instance.Instance
 
 	// Check whether to forward the request to the node that is running the
 	// container. Here are the possible cases:
@@ -135,7 +138,7 @@ func containerPost(d *Daemon, r *http.Request) Response {
 			return response
 		}
 
-		inst, err = instanceLoadByProjectAndName(d.State(), project, name)
+		inst, err = instance.InstanceLoadByProjectAndName(d.State(), project, name)
 		if err != nil {
 			return SmartError(err)
 		}
@@ -204,7 +207,7 @@ func containerPost(d *Daemon, r *http.Request) Response {
 
 		instanceOnly := req.InstanceOnly || req.ContainerOnly
 
-		if inst.Type() != instance.TypeContainer {
+		if inst.Type() != instancetype.Container {
 			return SmartError(fmt.Errorf("Instance is not container type"))
 		}
 
@@ -224,7 +227,7 @@ func containerPost(d *Daemon, r *http.Request) Response {
 				return InternalError(err)
 			}
 
-			op, err := operationCreate(d.cluster, project, operationClassTask, db.OperationContainerMigrate, resources, nil, ws.Do, nil, nil)
+			op, err := operation.OperationCreate(d.cluster, project, operation.OperationClassTask, db.OperationContainerMigrate, resources, nil, ws.Do, nil, nil)
 			if err != nil {
 				return InternalError(err)
 			}
@@ -233,7 +236,7 @@ func containerPost(d *Daemon, r *http.Request) Response {
 		}
 
 		// Pull mode
-		op, err := operationCreate(d.cluster, project, operationClassWebsocket, db.OperationContainerMigrate, resources, ws.Metadata(), ws.Do, nil, ws.Connect)
+		op, err := operation.OperationCreate(d.cluster, project, operation.OperationClassWebsocket, db.OperationContainerMigrate, resources, ws.Metadata(), ws.Do, nil, ws.Connect)
 		if err != nil {
 			return InternalError(err)
 		}
@@ -247,14 +250,14 @@ func containerPost(d *Daemon, r *http.Request) Response {
 		return Conflict(fmt.Errorf("Name '%s' already in use", req.Name))
 	}
 
-	run := func(*operation) error {
+	run := func(*operation.Operation) error {
 		return inst.Rename(req.Name)
 	}
 
 	resources := map[string][]string{}
 	resources["containers"] = []string{name}
 
-	op, err := operationCreate(d.cluster, project, operationClassTask, db.OperationContainerRename, resources, nil, run, nil, nil)
+	op, err := operation.OperationCreate(d.cluster, project, operation.OperationClassTask, db.OperationContainerRename, resources, nil, run, nil, nil)
 	if err != nil {
 		return InternalError(err)
 	}
@@ -263,7 +266,7 @@ func containerPost(d *Daemon, r *http.Request) Response {
 }
 
 // Move a non-ceph container to another cluster node.
-func containerPostClusteringMigrate(d *Daemon, c Instance, oldName, newName, newNode string) Response {
+func containerPostClusteringMigrate(d *Daemon, c instance.Instance, oldName, newName, newNode string) daemon.Response {
 	cert := d.endpoints.NetworkCert()
 
 	var sourceAddress string
@@ -293,7 +296,7 @@ func containerPostClusteringMigrate(d *Daemon, c Instance, oldName, newName, new
 		return SmartError(err)
 	}
 
-	run := func(*operation) error {
+	run := func(*operation.Operation) error {
 		// Connect to the source host, i.e. ourselves (the node the container is running on).
 		source, err := cluster.Connect(sourceAddress, cert, false)
 		if err != nil {
@@ -397,7 +400,7 @@ func containerPostClusteringMigrate(d *Daemon, c Instance, oldName, newName, new
 
 	resources := map[string][]string{}
 	resources["containers"] = []string{oldName}
-	op, err := operationCreate(d.cluster, c.Project(), operationClassTask, db.OperationContainerMigrate, resources, nil, run, nil, nil)
+	op, err := operation.OperationCreate(d.cluster, c.Project(), operation.OperationClassTask, db.OperationContainerMigrate, resources, nil, run, nil, nil)
 	if err != nil {
 		return InternalError(err)
 	}
@@ -406,8 +409,8 @@ func containerPostClusteringMigrate(d *Daemon, c Instance, oldName, newName, new
 }
 
 // Special case migrating a container backed by ceph across two cluster nodes.
-func containerPostClusteringMigrateWithCeph(d *Daemon, c Instance, project, oldName, newName, newNode string, instanceType instance.Type) Response {
-	run := func(*operation) error {
+func containerPostClusteringMigrateWithCeph(d *Daemon, c instance.Instance, project, oldName, newName, newNode string, instanceType instancetype.Type) daemon.Response {
+	run := func(*operation.Operation) error {
 		// If source node is online (i.e. we're serving the request on
 		// it, and c != nil), let's unmap the RBD volume locally
 		if c != nil {
@@ -505,7 +508,7 @@ func containerPostClusteringMigrateWithCeph(d *Daemon, c Instance, project, oldN
 
 	resources := map[string][]string{}
 	resources["containers"] = []string{oldName}
-	op, err := operationCreate(d.cluster, project, operationClassTask, db.OperationContainerMigrate, resources, nil, run, nil, nil)
+	op, err := operation.OperationCreate(d.cluster, project, operation.OperationClassTask, db.OperationContainerMigrate, resources, nil, run, nil, nil)
 	if err != nil {
 		return InternalError(err)
 	}
@@ -517,7 +520,7 @@ func containerPostClusteringMigrateWithCeph(d *Daemon, c Instance, project, oldN
 //
 // At the moment it's used for ceph-based containers, where the target node needs
 // to create the appropriate mount points.
-func internalClusterContainerMovedPost(d *Daemon, r *http.Request) Response {
+func internalClusterContainerMovedPost(d *Daemon, r *http.Request) daemon.Response {
 	project := projectParam(r)
 	containerName := mux.Vars(r)["name"]
 	err := containerPostCreateContainerMountPoint(d, project, containerName)
@@ -530,7 +533,7 @@ func internalClusterContainerMovedPost(d *Daemon, r *http.Request) Response {
 // Used after to create the appropriate mounts point after a container has been
 // moved.
 func containerPostCreateContainerMountPoint(d *Daemon, project, containerName string) error {
-	c, err := instanceLoadByProjectAndName(d.State(), project, containerName)
+	c, err := instance.InstanceLoadByProjectAndName(d.State(), project, containerName)
 	if err != nil {
 		return errors.Wrap(err, "Failed to load moved container on target node")
 	}
diff --git a/lxd/container_put.go b/lxd/container_put.go
index 93eb4132ca..c5aea8b977 100644
--- a/lxd/container_put.go
+++ b/lxd/container_put.go
@@ -7,8 +7,11 @@ import (
 
 	"github.com/gorilla/mux"
 
+	"github.com/lxc/lxd/lxd/daemon"
 	"github.com/lxc/lxd/lxd/db"
 	deviceConfig "github.com/lxc/lxd/lxd/device/config"
+	"github.com/lxc/lxd/lxd/instance"
+	"github.com/lxc/lxd/lxd/operation"
 	"github.com/lxc/lxd/lxd/state"
 	"github.com/lxc/lxd/lxd/util"
 	"github.com/lxc/lxd/shared"
@@ -20,7 +23,7 @@ import (
  * Update configuration, or, if 'restore:snapshot-name' is present, restore
  * the named snapshot
  */
-func containerPut(d *Daemon, r *http.Request) Response {
+func containerPut(d *Daemon, r *http.Request) daemon.Response {
 	instanceType, err := urlInstanceTypeDetect(r)
 	if err != nil {
 		return SmartError(err)
@@ -40,7 +43,7 @@ func containerPut(d *Daemon, r *http.Request) Response {
 		return response
 	}
 
-	c, err := instanceLoadByProjectAndName(d.State(), project, name)
+	c, err := instance.InstanceLoadByProjectAndName(d.State(), project, name)
 	if err != nil {
 		return NotFound(err)
 	}
@@ -62,11 +65,11 @@ func containerPut(d *Daemon, r *http.Request) Response {
 		architecture = 0
 	}
 
-	var do func(*operation) error
+	var do func(*operation.Operation) error
 	var opType db.OperationType
 	if configRaw.Restore == "" {
 		// Update container configuration
-		do = func(op *operation) error {
+		do = func(op *operation.Operation) error {
 			args := db.ContainerArgs{
 				Architecture: architecture,
 				Config:       configRaw.Config,
@@ -89,7 +92,7 @@ func containerPut(d *Daemon, r *http.Request) Response {
 		opType = db.OperationContainerUpdate
 	} else {
 		// Snapshot Restore
-		do = func(op *operation) error {
+		do = func(op *operation.Operation) error {
 			return containerSnapRestore(d.State(), project, name, configRaw.Restore, configRaw.Stateful)
 		}
 
@@ -99,7 +102,7 @@ func containerPut(d *Daemon, r *http.Request) Response {
 	resources := map[string][]string{}
 	resources["containers"] = []string{name}
 
-	op, err := operationCreate(d.cluster, project, operationClassTask, opType, resources, nil, do, nil, nil)
+	op, err := operation.OperationCreate(d.cluster, project, operation.OperationClassTask, opType, resources, nil, do, nil, nil)
 	if err != nil {
 		return InternalError(err)
 	}
@@ -113,12 +116,12 @@ func containerSnapRestore(s *state.State, project, name, snap string, stateful b
 		snap = name + shared.SnapshotDelimiter + snap
 	}
 
-	c, err := instanceLoadByProjectAndName(s, project, name)
+	c, err := instance.InstanceLoadByProjectAndName(s, project, name)
 	if err != nil {
 		return err
 	}
 
-	source, err := instanceLoadByProjectAndName(s, project, snap)
+	source, err := instance.InstanceLoadByProjectAndName(s, project, snap)
 	if err != nil {
 		switch err {
 		case db.ErrNoSuchObject:
diff --git a/lxd/container_snapshot.go b/lxd/container_snapshot.go
index 51629b4d51..86508f20a1 100644
--- a/lxd/container_snapshot.go
+++ b/lxd/container_snapshot.go
@@ -12,15 +12,18 @@ import (
 
 	"github.com/gorilla/mux"
 
+	"github.com/lxc/lxd/lxd/daemon"
 	"github.com/lxc/lxd/lxd/db"
 	"github.com/lxc/lxd/lxd/instance"
+	"github.com/lxc/lxd/lxd/instance/instancetype"
+	"github.com/lxc/lxd/lxd/operation"
 	"github.com/lxc/lxd/lxd/util"
 	"github.com/lxc/lxd/shared"
 	"github.com/lxc/lxd/shared/api"
 	"github.com/lxc/lxd/shared/version"
 )
 
-func containerSnapshotsGet(d *Daemon, r *http.Request) Response {
+func containerSnapshotsGet(d *Daemon, r *http.Request) daemon.Response {
 	instanceType, err := urlInstanceTypeDetect(r)
 	if err != nil {
 		return SmartError(err)
@@ -59,7 +62,7 @@ func containerSnapshotsGet(d *Daemon, r *http.Request) Response {
 			}
 		}
 	} else {
-		c, err := instanceLoadByProjectAndName(d.State(), project, cname)
+		c, err := instance.InstanceLoadByProjectAndName(d.State(), project, cname)
 		if err != nil {
 			return SmartError(err)
 		}
@@ -86,7 +89,7 @@ func containerSnapshotsGet(d *Daemon, r *http.Request) Response {
 	return SyncResponse(true, resultMap)
 }
 
-func containerSnapshotsPost(d *Daemon, r *http.Request) Response {
+func containerSnapshotsPost(d *Daemon, r *http.Request) daemon.Response {
 	instanceType, err := urlInstanceTypeDetect(r)
 	if err != nil {
 		return SmartError(err)
@@ -110,7 +113,7 @@ func containerSnapshotsPost(d *Daemon, r *http.Request) Response {
 	 * 2. copy the database info over
 	 * 3. copy over the rootfs
 	 */
-	inst, err := instanceLoadByProjectAndName(d.State(), project, name)
+	inst, err := instance.InstanceLoadByProjectAndName(d.State(), project, name)
 	if err != nil {
 		return SmartError(err)
 	}
@@ -146,7 +149,7 @@ func containerSnapshotsPost(d *Daemon, r *http.Request) Response {
 		}
 	}
 
-	snapshot := func(op *operation) error {
+	snapshot := func(op *operation.Operation) error {
 		args := db.ContainerArgs{
 			Project:      inst.Project(),
 			Architecture: inst.Architecture(),
@@ -161,7 +164,7 @@ func containerSnapshotsPost(d *Daemon, r *http.Request) Response {
 			ExpiryDate:   expiry,
 		}
 
-		if inst.Type() != instance.TypeContainer {
+		if inst.Type() != instancetype.Container {
 			return fmt.Errorf("Instance is not container type")
 		}
 
@@ -178,7 +181,7 @@ func containerSnapshotsPost(d *Daemon, r *http.Request) Response {
 	resources := map[string][]string{}
 	resources["containers"] = []string{name}
 
-	op, err := operationCreate(d.cluster, project, operationClassTask, db.OperationSnapshotCreate, resources, nil, snapshot, nil, nil)
+	op, err := operation.OperationCreate(d.cluster, project, operation.OperationClassTask, db.OperationSnapshotCreate, resources, nil, snapshot, nil, nil)
 	if err != nil {
 		return InternalError(err)
 	}
@@ -186,7 +189,7 @@ func containerSnapshotsPost(d *Daemon, r *http.Request) Response {
 	return OperationResponse(op)
 }
 
-func containerSnapshotHandler(d *Daemon, r *http.Request) Response {
+func containerSnapshotHandler(d *Daemon, r *http.Request) daemon.Response {
 	instanceType, err := urlInstanceTypeDetect(r)
 	if err != nil {
 		return SmartError(err)
@@ -208,7 +211,7 @@ func containerSnapshotHandler(d *Daemon, r *http.Request) Response {
 	if err != nil {
 		return SmartError(err)
 	}
-	inst, err := instanceLoadByProjectAndName(
+	inst, err := instance.InstanceLoadByProjectAndName(
 		d.State(),
 		project, containerName+
 			shared.SnapshotDelimiter+
@@ -217,7 +220,7 @@ func containerSnapshotHandler(d *Daemon, r *http.Request) Response {
 		return SmartError(err)
 	}
 
-	if inst.Type() != instance.TypeContainer {
+	if inst.Type() != instancetype.Container {
 		return SmartError(fmt.Errorf("Instance not container type"))
 	}
 
@@ -237,7 +240,7 @@ func containerSnapshotHandler(d *Daemon, r *http.Request) Response {
 	}
 }
 
-func snapshotPut(d *Daemon, r *http.Request, sc container, name string) Response {
+func snapshotPut(d *Daemon, r *http.Request, sc container, name string) daemon.Response {
 	// Validate the ETag
 	etag := []interface{}{sc.ExpiryDate()}
 	err := util.EtagCheck(r, etag)
@@ -252,12 +255,12 @@ func snapshotPut(d *Daemon, r *http.Request, sc container, name string) Response
 		return InternalError(err)
 	}
 
-	var do func(op *operation) error
+	var do func(op *operation.Operation) error
 
 	_, err = rj.GetString("expires_at")
 	if err != nil {
 		// Skip updating the snapshot since the requested key wasn't provided
-		do = func(op *operation) error {
+		do = func(op *operation.Operation) error {
 			return nil
 		}
 	} else {
@@ -274,7 +277,7 @@ func snapshotPut(d *Daemon, r *http.Request, sc container, name string) Response
 		}
 
 		// Update container configuration
-		do = func(op *operation) error {
+		do = func(op *operation.Operation) error {
 			args := db.ContainerArgs{
 				Architecture: sc.Architecture(),
 				Config:       sc.LocalConfig(),
@@ -302,7 +305,7 @@ func snapshotPut(d *Daemon, r *http.Request, sc container, name string) Response
 	resources := map[string][]string{}
 	resources["containers"] = []string{name}
 
-	op, err := operationCreate(d.cluster, sc.Project(), operationClassTask, opType, resources, nil,
+	op, err := operation.OperationCreate(d.cluster, sc.Project(), operation.OperationClassTask, opType, resources, nil,
 		do, nil, nil)
 	if err != nil {
 		return InternalError(err)
@@ -311,7 +314,7 @@ func snapshotPut(d *Daemon, r *http.Request, sc container, name string) Response
 	return OperationResponse(op)
 }
 
-func snapshotGet(sc container, name string) Response {
+func snapshotGet(sc container, name string) daemon.Response {
 	render, _, err := sc.Render()
 	if err != nil {
 		return SmartError(err)
@@ -320,7 +323,7 @@ func snapshotGet(sc container, name string) Response {
 	return SyncResponse(true, render.(*api.InstanceSnapshot))
 }
 
-func snapshotPost(d *Daemon, r *http.Request, sc container, containerName string) Response {
+func snapshotPost(d *Daemon, r *http.Request, sc container, containerName string) daemon.Response {
 	body, err := ioutil.ReadAll(r.Body)
 	if err != nil {
 		return InternalError(err)
@@ -380,7 +383,7 @@ func snapshotPost(d *Daemon, r *http.Request, sc container, containerName string
 				return InternalError(err)
 			}
 
-			op, err := operationCreate(d.cluster, sc.Project(), operationClassTask, db.OperationSnapshotTransfer, resources, nil, ws.Do, nil, nil)
+			op, err := operation.OperationCreate(d.cluster, sc.Project(), operation.OperationClassTask, db.OperationSnapshotTransfer, resources, nil, ws.Do, nil, nil)
 			if err != nil {
 				return InternalError(err)
 			}
@@ -389,7 +392,7 @@ func snapshotPost(d *Daemon, r *http.Request, sc container, containerName string
 		}
 
 		// Pull mode
-		op, err := operationCreate(d.cluster, sc.Project(), operationClassWebsocket, db.OperationSnapshotTransfer, resources, ws.Metadata(), ws.Do, nil, ws.Connect)
+		op, err := operation.OperationCreate(d.cluster, sc.Project(), operation.OperationClassWebsocket, db.OperationSnapshotTransfer, resources, ws.Metadata(), ws.Do, nil, ws.Connect)
 		if err != nil {
 			return InternalError(err)
 		}
@@ -415,14 +418,14 @@ func snapshotPost(d *Daemon, r *http.Request, sc container, containerName string
 		return Conflict(fmt.Errorf("Name '%s' already in use", fullName))
 	}
 
-	rename := func(op *operation) error {
+	rename := func(op *operation.Operation) error {
 		return sc.Rename(fullName)
 	}
 
 	resources := map[string][]string{}
 	resources["containers"] = []string{containerName}
 
-	op, err := operationCreate(d.cluster, sc.Project(), operationClassTask, db.OperationSnapshotRename, resources, nil, rename, nil, nil)
+	op, err := operation.OperationCreate(d.cluster, sc.Project(), operation.OperationClassTask, db.OperationSnapshotRename, resources, nil, rename, nil, nil)
 	if err != nil {
 		return InternalError(err)
 	}
@@ -430,15 +433,15 @@ func snapshotPost(d *Daemon, r *http.Request, sc container, containerName string
 	return OperationResponse(op)
 }
 
-func snapshotDelete(sc container, name string) Response {
-	remove := func(op *operation) error {
+func snapshotDelete(sc container, name string) daemon.Response {
+	remove := func(op *operation.Operation) error {
 		return sc.Delete()
 	}
 
 	resources := map[string][]string{}
 	resources["containers"] = []string{sc.Name()}
 
-	op, err := operationCreate(sc.DaemonState().Cluster, sc.Project(), operationClassTask, db.OperationSnapshotDelete, resources, nil, remove, nil, nil)
+	op, err := operation.OperationCreate(sc.DaemonState().Cluster, sc.Project(), operation.OperationClassTask, db.OperationSnapshotDelete, resources, nil, remove, nil, nil)
 	if err != nil {
 		return InternalError(err)
 	}
diff --git a/lxd/container_state.go b/lxd/container_state.go
index b5198977ca..acb2f136de 100644
--- a/lxd/container_state.go
+++ b/lxd/container_state.go
@@ -8,12 +8,15 @@ import (
 
 	"github.com/gorilla/mux"
 
+	"github.com/lxc/lxd/lxd/daemon"
 	"github.com/lxc/lxd/lxd/db"
+	"github.com/lxc/lxd/lxd/instance"
+	"github.com/lxc/lxd/lxd/operation"
 	"github.com/lxc/lxd/shared"
 	"github.com/lxc/lxd/shared/api"
 )
 
-func containerState(d *Daemon, r *http.Request) Response {
+func containerState(d *Daemon, r *http.Request) daemon.Response {
 	instanceType, err := urlInstanceTypeDetect(r)
 	if err != nil {
 		return SmartError(err)
@@ -31,7 +34,7 @@ func containerState(d *Daemon, r *http.Request) Response {
 		return response
 	}
 
-	c, err := instanceLoadByProjectAndName(d.State(), project, name)
+	c, err := instance.InstanceLoadByProjectAndName(d.State(), project, name)
 	if err != nil {
 		return SmartError(err)
 	}
@@ -43,7 +46,7 @@ func containerState(d *Daemon, r *http.Request) Response {
 	return SyncResponse(true, state)
 }
 
-func containerStatePut(d *Daemon, r *http.Request) Response {
+func containerStatePut(d *Daemon, r *http.Request) daemon.Response {
 	instanceType, err := urlInstanceTypeDetect(r)
 	if err != nil {
 		return SmartError(err)
@@ -74,17 +77,17 @@ func containerStatePut(d *Daemon, r *http.Request) Response {
 	// Don't mess with containers while in setup mode
 	<-d.readyChan
 
-	c, err := instanceLoadByProjectAndName(d.State(), project, name)
+	c, err := instance.InstanceLoadByProjectAndName(d.State(), project, name)
 	if err != nil {
 		return SmartError(err)
 	}
 
 	var opType db.OperationType
-	var do func(*operation) error
+	var do func(*operation.Operation) error
 	switch shared.ContainerAction(raw.Action) {
 	case shared.Start:
 		opType = db.OperationContainerStart
-		do = func(op *operation) error {
+		do = func(op *operation.Operation) error {
 			c.SetOperation(op)
 			if err = c.Start(raw.Stateful); err != nil {
 				return err
@@ -94,7 +97,7 @@ func containerStatePut(d *Daemon, r *http.Request) Response {
 	case shared.Stop:
 		opType = db.OperationContainerStop
 		if raw.Stateful {
-			do = func(op *operation) error {
+			do = func(op *operation.Operation) error {
 				c.SetOperation(op)
 				err := c.Stop(raw.Stateful)
 				if err != nil {
@@ -104,7 +107,7 @@ func containerStatePut(d *Daemon, r *http.Request) Response {
 				return nil
 			}
 		} else if raw.Timeout == 0 || raw.Force {
-			do = func(op *operation) error {
+			do = func(op *operation.Operation) error {
 				c.SetOperation(op)
 				err = c.Stop(false)
 				if err != nil {
@@ -114,7 +117,7 @@ func containerStatePut(d *Daemon, r *http.Request) Response {
 				return nil
 			}
 		} else {
-			do = func(op *operation) error {
+			do = func(op *operation.Operation) error {
 				c.SetOperation(op)
 				if c.IsFrozen() {
 					err := c.Unfreeze()
@@ -133,7 +136,7 @@ func containerStatePut(d *Daemon, r *http.Request) Response {
 		}
 	case shared.Restart:
 		opType = db.OperationContainerRestart
-		do = func(op *operation) error {
+		do = func(op *operation.Operation) error {
 			c.SetOperation(op)
 			ephemeral := c.IsEphemeral()
 
@@ -192,7 +195,7 @@ func containerStatePut(d *Daemon, r *http.Request) Response {
 		}
 
 		opType = db.OperationContainerFreeze
-		do = func(op *operation) error {
+		do = func(op *operation.Operation) error {
 			c.SetOperation(op)
 			return c.Freeze()
 		}
@@ -202,7 +205,7 @@ func containerStatePut(d *Daemon, r *http.Request) Response {
 		}
 
 		opType = db.OperationContainerUnfreeze
-		do = func(op *operation) error {
+		do = func(op *operation.Operation) error {
 			c.SetOperation(op)
 			return c.Unfreeze()
 		}
@@ -213,7 +216,7 @@ func containerStatePut(d *Daemon, r *http.Request) Response {
 	resources := map[string][]string{}
 	resources["containers"] = []string{name}
 
-	op, err := operationCreate(d.cluster, project, operationClassTask, opType, resources, nil, do, nil, nil)
+	op, err := operation.OperationCreate(d.cluster, project, operation.OperationClassTask, opType, resources, nil, do, nil, nil)
 	if err != nil {
 		return InternalError(err)
 	}
diff --git a/lxd/container_test.go b/lxd/container_test.go
index 966fdc4659..33ffd8382e 100644
--- a/lxd/container_test.go
+++ b/lxd/container_test.go
@@ -9,6 +9,7 @@ import (
 	"github.com/lxc/lxd/lxd/db"
 	"github.com/lxc/lxd/lxd/device/config"
 	"github.com/lxc/lxd/lxd/instance"
+	"github.com/lxc/lxd/lxd/instance/instancetype"
 	driver "github.com/lxc/lxd/lxd/storage"
 	"github.com/lxc/lxd/shared"
 	"github.com/lxc/lxd/shared/api"
@@ -21,7 +22,7 @@ type containerTestSuite struct {
 
 func (suite *containerTestSuite) TestContainer_ProfilesDefault() {
 	args := db.ContainerArgs{
-		Type:      instance.TypeContainer,
+		Type:      instancetype.Container,
 		Ephemeral: false,
 		Name:      "testFoo",
 	}
@@ -63,7 +64,7 @@ func (suite *containerTestSuite) TestContainer_ProfilesMulti() {
 	}()
 
 	args := db.ContainerArgs{
-		Type:      instance.TypeContainer,
+		Type:      instancetype.Container,
 		Ephemeral: false,
 		Profiles:  []string{"default", "unprivileged"},
 		Name:      "testFoo",
@@ -86,7 +87,7 @@ func (suite *containerTestSuite) TestContainer_ProfilesMulti() {
 
 func (suite *containerTestSuite) TestContainer_ProfilesOverwriteDefaultNic() {
 	args := db.ContainerArgs{
-		Type:      instance.TypeContainer,
+		Type:      instancetype.Container,
 		Ephemeral: false,
 		Config:    map[string]string{"security.privileged": "true"},
 		Devices: config.Devices{
@@ -116,7 +117,7 @@ func (suite *containerTestSuite) TestContainer_ProfilesOverwriteDefaultNic() {
 
 func (suite *containerTestSuite) TestContainer_LoadFromDB() {
 	args := db.ContainerArgs{
-		Type:      instance.TypeContainer,
+		Type:      instancetype.Container,
 		Ephemeral: false,
 		Config:    map[string]string{"security.privileged": "true"},
 		Devices: config.Devices{
@@ -133,18 +134,12 @@ func (suite *containerTestSuite) TestContainer_LoadFromDB() {
 	defer c.Delete()
 
 	// Load the container and trigger initLXC()
-	c2, err := instanceLoadByProjectAndName(suite.d.State(), "default", "testFoo")
+	c2, err := instance.InstanceLoadByProjectAndName(suite.d.State(), "default", "testFoo")
 	c2.IsRunning()
 	suite.Req.Nil(err)
 	_, err = c2.StorageStart()
 	suite.Req.Nil(err)
 
-	// When loading from DB, we won't have a full LXC config
-	c.(*containerLXC).c = nil
-	c.(*containerLXC).cConfig = false
-	c2.(*containerLXC).c = nil
-	c2.(*containerLXC).cConfig = false
-
 	suite.Exactly(
 		c,
 		c2,
@@ -154,7 +149,7 @@ func (suite *containerTestSuite) TestContainer_LoadFromDB() {
 func (suite *containerTestSuite) TestContainer_Path_Regular() {
 	// Regular
 	args := db.ContainerArgs{
-		Type:      instance.TypeContainer,
+		Type:      instancetype.Container,
 		Ephemeral: false,
 		Name:      "testFoo",
 	}
@@ -170,7 +165,7 @@ func (suite *containerTestSuite) TestContainer_Path_Regular() {
 
 func (suite *containerTestSuite) TestContainer_LogPath() {
 	args := db.ContainerArgs{
-		Type:      instance.TypeContainer,
+		Type:      instancetype.Container,
 		Ephemeral: false,
 		Name:      "testFoo",
 	}
@@ -184,7 +179,7 @@ func (suite *containerTestSuite) TestContainer_LogPath() {
 
 func (suite *containerTestSuite) TestContainer_IsPrivileged_Privileged() {
 	args := db.ContainerArgs{
-		Type:      instance.TypeContainer,
+		Type:      instancetype.Container,
 		Ephemeral: false,
 		Config:    map[string]string{"security.privileged": "true"},
 		Name:      "testFoo",
@@ -199,7 +194,7 @@ func (suite *containerTestSuite) TestContainer_IsPrivileged_Privileged() {
 
 func (suite *containerTestSuite) TestContainer_IsPrivileged_Unprivileged() {
 	args := db.ContainerArgs{
-		Type:      instance.TypeContainer,
+		Type:      instancetype.Container,
 		Ephemeral: false,
 		Config:    map[string]string{"security.privileged": "false"},
 		Name:      "testFoo",
@@ -214,7 +209,7 @@ func (suite *containerTestSuite) TestContainer_IsPrivileged_Unprivileged() {
 
 func (suite *containerTestSuite) TestContainer_Rename() {
 	args := db.ContainerArgs{
-		Type:      instance.TypeContainer,
+		Type:      instancetype.Container,
 		Ephemeral: false,
 		Name:      "testFoo",
 	}
@@ -229,7 +224,7 @@ func (suite *containerTestSuite) TestContainer_Rename() {
 
 func (suite *containerTestSuite) TestContainer_findIdmap_isolated() {
 	c1, err := containerCreateInternal(suite.d.State(), db.ContainerArgs{
-		Type: instance.TypeContainer,
+		Type: instancetype.Container,
 		Name: "isol-1",
 		Config: map[string]string{
 			"security.idmap.isolated": "true",
@@ -239,7 +234,7 @@ func (suite *containerTestSuite) TestContainer_findIdmap_isolated() {
 	defer c1.Delete()
 
 	c2, err := containerCreateInternal(suite.d.State(), db.ContainerArgs{
-		Type: instance.TypeContainer,
+		Type: instancetype.Container,
 		Name: "isol-2",
 		Config: map[string]string{
 			"security.idmap.isolated": "true",
@@ -248,9 +243,9 @@ func (suite *containerTestSuite) TestContainer_findIdmap_isolated() {
 	suite.Req.Nil(err)
 	defer c2.Delete()
 
-	map1, err := c1.(*containerLXC).NextIdmap()
+	map1, err := c1.(*instance.ContainerLXC).NextIdmap()
 	suite.Req.Nil(err)
-	map2, err := c2.(*containerLXC).NextIdmap()
+	map2, err := c2.(*instance.ContainerLXC).NextIdmap()
 	suite.Req.Nil(err)
 
 	host := suite.d.os.IdmapSet.Idmap[0]
@@ -270,7 +265,7 @@ func (suite *containerTestSuite) TestContainer_findIdmap_isolated() {
 
 func (suite *containerTestSuite) TestContainer_findIdmap_mixed() {
 	c1, err := containerCreateInternal(suite.d.State(), db.ContainerArgs{
-		Type: instance.TypeContainer,
+		Type: instancetype.Container,
 		Name: "isol-1",
 		Config: map[string]string{
 			"security.idmap.isolated": "false",
@@ -280,7 +275,7 @@ func (suite *containerTestSuite) TestContainer_findIdmap_mixed() {
 	defer c1.Delete()
 
 	c2, err := containerCreateInternal(suite.d.State(), db.ContainerArgs{
-		Type: instance.TypeContainer,
+		Type: instancetype.Container,
 		Name: "isol-2",
 		Config: map[string]string{
 			"security.idmap.isolated": "true",
@@ -289,9 +284,9 @@ func (suite *containerTestSuite) TestContainer_findIdmap_mixed() {
 	suite.Req.Nil(err)
 	defer c2.Delete()
 
-	map1, err := c1.(*containerLXC).NextIdmap()
+	map1, err := c1.(*instance.ContainerLXC).NextIdmap()
 	suite.Req.Nil(err)
-	map2, err := c2.(*containerLXC).NextIdmap()
+	map2, err := c2.(*instance.ContainerLXC).NextIdmap()
 	suite.Req.Nil(err)
 
 	host := suite.d.os.IdmapSet.Idmap[0]
@@ -311,7 +306,7 @@ func (suite *containerTestSuite) TestContainer_findIdmap_mixed() {
 
 func (suite *containerTestSuite) TestContainer_findIdmap_raw() {
 	c1, err := containerCreateInternal(suite.d.State(), db.ContainerArgs{
-		Type: instance.TypeContainer,
+		Type: instancetype.Container,
 		Name: "isol-1",
 		Config: map[string]string{
 			"security.idmap.isolated": "false",
@@ -321,7 +316,7 @@ func (suite *containerTestSuite) TestContainer_findIdmap_raw() {
 	suite.Req.Nil(err)
 	defer c1.Delete()
 
-	map1, err := c1.(*containerLXC).NextIdmap()
+	map1, err := c1.(*instance.ContainerLXC).NextIdmap()
 	suite.Req.Nil(err)
 
 	host := suite.d.os.IdmapSet.Idmap[0]
@@ -350,7 +345,7 @@ func (suite *containerTestSuite) TestContainer_findIdmap_maxed() {
 
 	for i := 0; i < 7; i++ {
 		c, err := containerCreateInternal(suite.d.State(), db.ContainerArgs{
-			Type: instance.TypeContainer,
+			Type: instancetype.Container,
 			Name: fmt.Sprintf("isol-%d", i),
 			Config: map[string]string{
 				"security.idmap.isolated": "true",
@@ -367,7 +362,7 @@ func (suite *containerTestSuite) TestContainer_findIdmap_maxed() {
 
 		defer c.Delete()
 
-		m, err := c.(*containerLXC).NextIdmap()
+		m, err := c.(*instance.ContainerLXC).NextIdmap()
 		suite.Req.Nil(err)
 
 		maps = append(maps, m)
diff --git a/lxd/containers.go b/lxd/containers.go
index 10689e7001..ea089ec938 100644
--- a/lxd/containers.go
+++ b/lxd/containers.go
@@ -9,11 +9,10 @@ import (
 	"time"
 
 	"github.com/lxc/lxd/lxd/db"
+	"github.com/lxc/lxd/lxd/instance"
 	"github.com/lxc/lxd/lxd/state"
 	"github.com/lxc/lxd/shared"
 	"github.com/lxc/lxd/shared/logger"
-
-	log "github.com/lxc/lxd/shared/log15"
 )
 
 var instancesCmd = APIEndpoint{
@@ -141,7 +140,7 @@ var instanceBackupExportCmd = APIEndpoint{
 	Get: APIEndpointAction{Handler: containerBackupExportGet, AccessHandler: AllowProjectPermission("containers", "view")},
 }
 
-type containerAutostartList []Instance
+type containerAutostartList []instance.Instance
 
 func (slice containerAutostartList) Len() int {
 	return len(slice)
@@ -166,12 +165,12 @@ func (slice containerAutostartList) Swap(i, j int) {
 
 func containersRestart(s *state.State) error {
 	// Get all the instances
-	result, err := instanceLoadNodeAll(s)
+	result, err := instance.InstanceLoadNodeAll(s)
 	if err != nil {
 		return err
 	}
 
-	instances := []Instance{}
+	instances := []instance.Instance{}
 
 	for _, c := range result {
 		instances = append(instances, c)
@@ -207,7 +206,7 @@ func containersRestart(s *state.State) error {
 	return nil
 }
 
-type containerStopList []Instance
+type containerStopList []instance.Instance
 
 func (slice containerStopList) Len() int {
 	return len(slice)
@@ -264,11 +263,11 @@ func containersShutdown(s *state.State) error {
 	dbAvailable := true
 
 	// Get all the instances
-	instances, err := instanceLoadNodeAll(s)
+	instances, err := instance.InstanceLoadNodeAll(s)
 	if err != nil {
 		// Mark database as offline
 		dbAvailable = false
-		instances = []Instance{}
+		instances = []instance.Instance{}
 
 		// List all containers on disk
 		cnames, err := containersOnDisk()
@@ -278,7 +277,7 @@ func containersShutdown(s *state.State) error {
 
 		for project, names := range cnames {
 			for _, name := range names {
-				c, err := containerLXCLoad(s, db.ContainerArgs{
+				c, err := instance.ContainerLXCLoad(s, db.ContainerArgs{
 					Project: project,
 					Name:    name,
 					Config:  make(map[string]string),
@@ -335,7 +334,7 @@ func containersShutdown(s *state.State) error {
 
 			// Stop the instance
 			wg.Add(1)
-			go func(c Instance, lastState string) {
+			go func(c instance.Instance, lastState string) {
 				c.Shutdown(time.Second * time.Duration(timeoutSeconds))
 				c.Stop(false)
 				c.VolatileSet(map[string]string{"volatile.last_state.power": lastState})
diff --git a/lxd/containers_get.go b/lxd/containers_get.go
index 2d4d147920..cd39218d0d 100644
--- a/lxd/containers_get.go
+++ b/lxd/containers_get.go
@@ -13,9 +13,11 @@ import (
 	"github.com/pkg/errors"
 
 	"github.com/lxc/lxd/lxd/cluster"
+	"github.com/lxc/lxd/lxd/daemon"
 	"github.com/lxc/lxd/lxd/db"
 	"github.com/lxc/lxd/lxd/db/query"
 	"github.com/lxc/lxd/lxd/instance"
+	"github.com/lxc/lxd/lxd/instance/instancetype"
 	"github.com/lxc/lxd/shared"
 	"github.com/lxc/lxd/shared/api"
 	"github.com/lxc/lxd/shared/logger"
@@ -24,22 +26,22 @@ import (
 
 // urlInstanceTypeDetect detects what sort of instance type filter is being requested. Either
 // explicitly via the instance-type query param or implicitly via the endpoint URL used.
-func urlInstanceTypeDetect(r *http.Request) (instance.Type, error) {
+func urlInstanceTypeDetect(r *http.Request) (instancetype.Type, error) {
 	reqInstanceType := r.URL.Query().Get("instance-type")
 	if strings.HasPrefix(mux.CurrentRoute(r).GetName(), "container") {
-		return instance.TypeContainer, nil
+		return instancetype.Container, nil
 	} else if reqInstanceType != "" {
-		instanceType, err := instance.New(reqInstanceType)
+		instanceType, err := instancetype.New(reqInstanceType)
 		if err != nil {
-			return instance.TypeAny, err
+			return instancetype.Any, err
 		}
 		return instanceType, nil
 	}
 
-	return instance.TypeAny, nil
+	return instancetype.Any, nil
 }
 
-func containersGet(d *Daemon, r *http.Request) Response {
+func containersGet(d *Daemon, r *http.Request) daemon.Response {
 	for i := 0; i < 100; i++ {
 		result, err := doContainersGet(d, r)
 		if err == nil {
@@ -104,7 +106,7 @@ func doContainersGet(d *Daemon, r *http.Request) (interface{}, error) {
 	}
 
 	// Get the local instances
-	nodeCts := map[string]Instance{}
+	nodeCts := map[string]instance.Instance{}
 	if recursion > 0 {
 		cts, err := instanceLoadNodeProjectAll(d.State(), project, instanceType)
 		if err != nil {
@@ -292,7 +294,7 @@ func doContainersGet(d *Daemon, r *http.Request) (interface{}, error) {
 
 // Fetch information about the containers on the given remote node, using the
 // rest API and with a timeout of 30 seconds.
-func doContainersGetFromNode(project, node string, cert *shared.CertInfo, instanceType instance.Type) ([]api.Instance, error) {
+func doContainersGetFromNode(project, node string, cert *shared.CertInfo, instanceType instancetype.Type) ([]api.Instance, error) {
 	f := func() ([]api.Instance, error) {
 		client, err := cluster.Connect(node, cert, true)
 		if err != nil {
@@ -329,7 +331,7 @@ func doContainersGetFromNode(project, node string, cert *shared.CertInfo, instan
 	return containers, err
 }
 
-func doContainersFullGetFromNode(project, node string, cert *shared.CertInfo, instanceType instance.Type) ([]api.InstanceFull, error) {
+func doContainersFullGetFromNode(project, node string, cert *shared.CertInfo, instanceType instancetype.Type) ([]api.InstanceFull, error) {
 	f := func() ([]api.InstanceFull, error) {
 		client, err := cluster.Connect(node, cert, true)
 		if err != nil {
diff --git a/lxd/containers_post.go b/lxd/containers_post.go
index 822af02534..977a2a42b8 100644
--- a/lxd/containers_post.go
+++ b/lxd/containers_post.go
@@ -18,10 +18,13 @@ import (
 	"github.com/pkg/errors"
 
 	"github.com/lxc/lxd/lxd/cluster"
+	"github.com/lxc/lxd/lxd/daemon"
 	"github.com/lxc/lxd/lxd/db"
 	"github.com/lxc/lxd/lxd/device/config"
 	"github.com/lxc/lxd/lxd/instance"
+	"github.com/lxc/lxd/lxd/instance/instancetype"
 	"github.com/lxc/lxd/lxd/migration"
+	"github.com/lxc/lxd/lxd/operation"
 	"github.com/lxc/lxd/shared"
 	"github.com/lxc/lxd/shared/api"
 	"github.com/lxc/lxd/shared/ioprogress"
@@ -30,7 +33,7 @@ import (
 	"github.com/lxc/lxd/shared/osarch"
 )
 
-func createFromImage(d *Daemon, project string, req *api.InstancesPost) Response {
+func createFromImage(d *Daemon, project string, req *api.InstancesPost) daemon.Response {
 	var hash string
 	var err error
 
@@ -93,12 +96,12 @@ func createFromImage(d *Daemon, project string, req *api.InstancesPost) Response
 		return BadRequest(fmt.Errorf("Must specify one of alias, fingerprint or properties for init from image"))
 	}
 
-	dbType, err := instance.New(string(req.Type))
+	dbType, err := instancetype.New(string(req.Type))
 	if err != nil {
 		return BadRequest(err)
 	}
 
-	run := func(op *operation) error {
+	run := func(op *operation.Operation) error {
 		args := db.ContainerArgs{
 			Project:     project,
 			Config:      req.Config,
@@ -146,7 +149,7 @@ func createFromImage(d *Daemon, project string, req *api.InstancesPost) Response
 	resources := map[string][]string{}
 	resources["containers"] = []string{req.Name}
 
-	op, err := operationCreate(d.cluster, project, operationClassTask, db.OperationContainerCreate, resources, nil, run, nil, nil)
+	op, err := operation.OperationCreate(d.cluster, project, operation.OperationClassTask, db.OperationContainerCreate, resources, nil, run, nil, nil)
 	if err != nil {
 		return InternalError(err)
 	}
@@ -154,8 +157,8 @@ func createFromImage(d *Daemon, project string, req *api.InstancesPost) Response
 	return OperationResponse(op)
 }
 
-func createFromNone(d *Daemon, project string, req *api.InstancesPost) Response {
-	dbType, err := instance.New(string(req.Type))
+func createFromNone(d *Daemon, project string, req *api.InstancesPost) daemon.Response {
+	dbType, err := instancetype.New(string(req.Type))
 	if err != nil {
 		return BadRequest(err)
 	}
@@ -179,7 +182,7 @@ func createFromNone(d *Daemon, project string, req *api.InstancesPost) Response
 		args.Architecture = architecture
 	}
 
-	run := func(op *operation) error {
+	run := func(op *operation.Operation) error {
 		_, err := containerCreateAsEmpty(d, args)
 		return err
 	}
@@ -187,7 +190,7 @@ func createFromNone(d *Daemon, project string, req *api.InstancesPost) Response
 	resources := map[string][]string{}
 	resources["containers"] = []string{req.Name}
 
-	op, err := operationCreate(d.cluster, project, operationClassTask, db.OperationContainerCreate, resources, nil, run, nil, nil)
+	op, err := operation.OperationCreate(d.cluster, project, operation.OperationClassTask, db.OperationContainerCreate, resources, nil, run, nil, nil)
 	if err != nil {
 		return InternalError(err)
 	}
@@ -195,7 +198,7 @@ func createFromNone(d *Daemon, project string, req *api.InstancesPost) Response
 	return OperationResponse(op)
 }
 
-func createFromMigration(d *Daemon, project string, req *api.InstancesPost) Response {
+func createFromMigration(d *Daemon, project string, req *api.InstancesPost) daemon.Response {
 	// Validate migration mode
 	if req.Source.Mode != "pull" && req.Source.Mode != "push" {
 		return NotImplemented(fmt.Errorf("Mode '%s' not implemented", req.Source.Mode))
@@ -214,7 +217,7 @@ func createFromMigration(d *Daemon, project string, req *api.InstancesPost) Resp
 		req.Profiles = []string{"default"}
 	}
 
-	dbType, err := instance.New(string(req.Type))
+	dbType, err := instancetype.New(string(req.Type))
 	if err != nil {
 		return BadRequest(err)
 	}
@@ -285,14 +288,14 @@ func createFromMigration(d *Daemon, project string, req *api.InstancesPost) Resp
 	// Early check for refresh
 	if req.Source.Refresh {
 		// Check if the container exists
-		inst, err := instanceLoadByProjectAndName(d.State(), project, req.Name)
+		inst, err := instance.InstanceLoadByProjectAndName(d.State(), project, req.Name)
 		if err != nil {
 			req.Source.Refresh = false
 		} else if inst.IsRunning() {
 			return BadRequest(fmt.Errorf("Cannot refresh a running container"))
 		}
 
-		if inst.Type() != instance.TypeContainer {
+		if inst.Type() != instancetype.Container {
 			return BadRequest(fmt.Errorf("Instance type not container"))
 		}
 
@@ -321,7 +324,7 @@ func createFromMigration(d *Daemon, project string, req *api.InstancesPost) Resp
 			}
 		} else {
 			// Retrieve the future storage pool
-			cM, err := containerLXCLoad(d.State(), args, nil)
+			cM, err := instance.ContainerLXCLoad(d.State(), args, nil)
 			if err != nil {
 				return InternalError(err)
 			}
@@ -389,7 +392,7 @@ func createFromMigration(d *Daemon, project string, req *api.InstancesPost) Resp
 	}
 
 	instanceOnly := req.Source.InstanceOnly || req.Source.ContainerOnly
-	migrationArgs := MigrationSinkArgs{
+	migrationArgs := instance.MigrationSinkArgs{
 		Url: req.Source.Operation,
 		Dialer: websocket.Dialer{
 			TLSClientConfig: config,
@@ -408,7 +411,7 @@ func createFromMigration(d *Daemon, project string, req *api.InstancesPost) Resp
 		return InternalError(err)
 	}
 
-	run := func(op *operation) error {
+	run := func(op *operation.Operation) error {
 		// And finally run the migration.
 		err = sink.Do(op)
 		if err != nil {
@@ -433,14 +436,14 @@ func createFromMigration(d *Daemon, project string, req *api.InstancesPost) Resp
 	resources := map[string][]string{}
 	resources["containers"] = []string{req.Name}
 
-	var op *operation
+	var op *operation.Operation
 	if push {
-		op, err = operationCreate(d.cluster, project, operationClassWebsocket, db.OperationContainerCreate, resources, sink.Metadata(), run, nil, sink.Connect)
+		op, err = operation.OperationCreate(d.cluster, project, operation.OperationClassWebsocket, db.OperationContainerCreate, resources, sink.Metadata(), run, nil, sink.Connect)
 		if err != nil {
 			return InternalError(err)
 		}
 	} else {
-		op, err = operationCreate(d.cluster, project, operationClassTask, db.OperationContainerCreate, resources, nil, run, nil, nil)
+		op, err = operation.OperationCreate(d.cluster, project, operation.OperationClassTask, db.OperationContainerCreate, resources, nil, run, nil, nil)
 		if err != nil {
 			return InternalError(err)
 		}
@@ -449,7 +452,7 @@ func createFromMigration(d *Daemon, project string, req *api.InstancesPost) Resp
 	return OperationResponse(op)
 }
 
-func createFromCopy(d *Daemon, project string, req *api.InstancesPost) Response {
+func createFromCopy(d *Daemon, project string, req *api.InstancesPost) daemon.Response {
 	if req.Source.Source == "" {
 		return BadRequest(fmt.Errorf("must specify a source container"))
 	}
@@ -460,7 +463,7 @@ func createFromCopy(d *Daemon, project string, req *api.InstancesPost) Response
 	}
 	targetProject := project
 
-	source, err := instanceLoadByProjectAndName(d.State(), sourceProject, req.Source.Source)
+	source, err := instance.InstanceLoadByProjectAndName(d.State(), sourceProject, req.Source.Source)
 	if err != nil {
 		return SmartError(err)
 	}
@@ -565,7 +568,7 @@ func createFromCopy(d *Daemon, project string, req *api.InstancesPost) Response
 	// Early check for refresh
 	if req.Source.Refresh {
 		// Check if the container exists
-		c, err := instanceLoadByProjectAndName(d.State(), targetProject, req.Name)
+		c, err := instance.InstanceLoadByProjectAndName(d.State(), targetProject, req.Name)
 		if err != nil {
 			req.Source.Refresh = false
 		} else if c.IsRunning() {
@@ -573,12 +576,12 @@ func createFromCopy(d *Daemon, project string, req *api.InstancesPost) Response
 		}
 	}
 
-	dbType, err := instance.New(string(req.Type))
+	dbType, err := instancetype.New(string(req.Type))
 	if err != nil {
 		return BadRequest(err)
 	}
 
-	if dbType != instance.TypeAny && dbType != source.Type() {
+	if dbType != instancetype.Any && dbType != source.Type() {
 		return BadRequest(fmt.Errorf("Instance type should not be specified or should match source type"))
 	}
 
@@ -596,7 +599,7 @@ func createFromCopy(d *Daemon, project string, req *api.InstancesPost) Response
 		Stateful:     req.Stateful,
 	}
 
-	run := func(op *operation) error {
+	run := func(op *operation.Operation) error {
 		instanceOnly := req.Source.InstanceOnly || req.Source.ContainerOnly
 		_, err := containerCreateAsCopy(d.State(), args, source, instanceOnly, req.Source.Refresh)
 		if err != nil {
@@ -608,7 +611,7 @@ func createFromCopy(d *Daemon, project string, req *api.InstancesPost) Response
 	resources := map[string][]string{}
 	resources["containers"] = []string{req.Name, req.Source.Source}
 
-	op, err := operationCreate(d.cluster, targetProject, operationClassTask, db.OperationContainerCreate, resources, nil, run, nil, nil)
+	op, err := operation.OperationCreate(d.cluster, targetProject, operation.OperationClassTask, db.OperationContainerCreate, resources, nil, run, nil, nil)
 	if err != nil {
 		return InternalError(err)
 	}
@@ -616,7 +619,7 @@ func createFromCopy(d *Daemon, project string, req *api.InstancesPost) Response
 	return OperationResponse(op)
 }
 
-func createFromBackup(d *Daemon, project string, data io.Reader, pool string) Response {
+func createFromBackup(d *Daemon, project string, data io.Reader, pool string) daemon.Response {
 	// Write the data to a temp file
 	f, err := ioutil.TempFile("", "lxd_backup_")
 	if err != nil {
@@ -644,7 +647,7 @@ func createFromBackup(d *Daemon, project string, data io.Reader, pool string) Re
 		bInfo.Pool = pool
 	}
 
-	run := func(op *operation) error {
+	run := func(op *operation.Operation) error {
 		defer f.Close()
 
 		// Dump tarball to storage
@@ -659,7 +662,7 @@ func createFromBackup(d *Daemon, project string, data io.Reader, pool string) Re
 			Force: true,
 		})
 		if err != nil {
-			cPool.ContainerDelete(&containerLXC{name: bInfo.Name, project: project})
+			cPool.ContainerDelete(instance.ContainerLXCInstantiateEmpty(bInfo.Name, project))
 			return errors.Wrap(err, "Marshal internal import request")
 		}
 
@@ -672,11 +675,11 @@ func createFromBackup(d *Daemon, project string, data io.Reader, pool string) Re
 		resp := internalImport(d, req)
 
 		if resp.String() != "success" {
-			cPool.ContainerDelete(&containerLXC{name: bInfo.Name, project: project})
+			cPool.ContainerDelete(instance.ContainerLXCInstantiateEmpty(bInfo.Name, project))
 			return fmt.Errorf("Internal import request: %v", resp.String())
 		}
 
-		c, err := instanceLoadByProjectAndName(d.State(), project, bInfo.Name)
+		c, err := instance.InstanceLoadByProjectAndName(d.State(), project, bInfo.Name)
 		if err != nil {
 			return errors.Wrap(err, "Load container")
 		}
@@ -692,7 +695,7 @@ func createFromBackup(d *Daemon, project string, data io.Reader, pool string) Re
 	resources := map[string][]string{}
 	resources["containers"] = []string{bInfo.Name}
 
-	op, err := operationCreate(d.cluster, project, operationClassTask, db.OperationBackupRestore,
+	op, err := operation.OperationCreate(d.cluster, project, operation.OperationClassTask, db.OperationBackupRestore,
 		resources, nil, run, nil, nil)
 	if err != nil {
 		return InternalError(err)
@@ -701,7 +704,7 @@ func createFromBackup(d *Daemon, project string, data io.Reader, pool string) Re
 	return OperationResponse(op)
 }
 
-func containersPost(d *Daemon, r *http.Request) Response {
+func containersPost(d *Daemon, r *http.Request) daemon.Response {
 	project := projectParam(r)
 	logger.Debugf("Responding to container create")
 
@@ -830,7 +833,7 @@ func containersPost(d *Daemon, r *http.Request) Response {
 	}
 }
 
-func containerFindStoragePool(d *Daemon, project string, req *api.InstancesPost) (string, string, string, map[string]string, Response) {
+func containerFindStoragePool(d *Daemon, project string, req *api.InstancesPost) (string, string, string, map[string]string, daemon.Response) {
 	// Grab the container's root device if one is specified
 	storagePool := ""
 	storagePoolProfile := ""
@@ -887,7 +890,7 @@ func containerFindStoragePool(d *Daemon, project string, req *api.InstancesPost)
 	return storagePool, storagePoolProfile, localRootDiskDeviceKey, localRootDiskDevice, nil
 }
 
-func clusterCopyContainerInternal(d *Daemon, source Instance, project string, req *api.InstancesPost) Response {
+func clusterCopyContainerInternal(d *Daemon, source instance.Instance, project string, req *api.InstancesPost) daemon.Response {
 	name := req.Source.Source
 
 	// Locate the source of the container


More information about the lxc-devel mailing list