[lxc-devel] [lxd/master] Instance driver cleanup

stgraber on Github lxc-bot at linuxcontainers.org
Mon Nov 23 21:26:41 UTC 2020


A non-text attachment was scrubbed...
Name: not available
Type: text/x-mailbox
Size: 301 bytes
Desc: not available
URL: <http://lists.linuxcontainers.org/pipermail/lxc-devel/attachments/20201123/b5f61e77/attachment-0001.bin>
-------------- next part --------------
From dd693f4706f7cfe045bf6f0ddf77420b6be2d71e Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?St=C3=A9phane=20Graber?= <stgraber at ubuntu.com>
Date: Sun, 22 Nov 2020 19:45:19 -0500
Subject: [PATCH 1/6] lxd/instance: Move id field to common
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Signed-off-by: Stéphane Graber <stgraber at ubuntu.com>
---
 lxd/instance/drivers/driver_common.go | 1 +
 lxd/instance/drivers/driver_lxc.go    | 5 ++---
 lxd/instance/drivers/driver_qemu.go   | 5 ++---
 3 files changed, 5 insertions(+), 6 deletions(-)

diff --git a/lxd/instance/drivers/driver_common.go b/lxd/instance/drivers/driver_common.go
index f98bd1db69..7857e41319 100644
--- a/lxd/instance/drivers/driver_common.go
+++ b/lxd/instance/drivers/driver_common.go
@@ -14,6 +14,7 @@ import (
 
 // common provides structure common to all instance types.
 type common struct {
+	id              int
 	dbType          instancetype.Type
 	architecture    int
 	devPaths        []string
diff --git a/lxd/instance/drivers/driver_lxc.go b/lxd/instance/drivers/driver_lxc.go
index e461782da8..e6dc02c197 100644
--- a/lxd/instance/drivers/driver_lxc.go
+++ b/lxd/instance/drivers/driver_lxc.go
@@ -145,6 +145,7 @@ func lxcCreate(s *state.State, args db.InstanceArgs) (instance.Instance, error)
 	// Create the container struct
 	c := &lxc{
 		common: common{
+			id:           args.ID,
 			dbType:       args.Type,
 			architecture: args.Architecture,
 			localConfig:  args.Config,
@@ -153,7 +154,6 @@ func lxcCreate(s *state.State, args db.InstanceArgs) (instance.Instance, error)
 			state:        s,
 			profiles:     args.Profiles,
 		},
-		id:           args.ID,
 		name:         args.Name,
 		node:         args.Node,
 		description:  args.Description,
@@ -369,6 +369,7 @@ func lxcUnload(c *lxc) {
 func lxcInstantiate(s *state.State, args db.InstanceArgs, expandedDevices deviceConfig.Devices) instance.Instance {
 	c := &lxc{
 		common: common{
+			id:           args.ID,
 			dbType:       args.Type,
 			architecture: args.Architecture,
 			localConfig:  args.Config,
@@ -377,7 +378,6 @@ func lxcInstantiate(s *state.State, args db.InstanceArgs, expandedDevices device
 			state:        s,
 			profiles:     args.Profiles,
 		},
-		id:           args.ID,
 		name:         args.Name,
 		description:  args.Description,
 		ephemeral:    args.Ephemeral,
@@ -419,7 +419,6 @@ type lxc struct {
 	creationDate time.Time
 	lastUsedDate time.Time
 	ephemeral    bool
-	id           int
 	name         string
 	description  string
 	stateful     bool
diff --git a/lxd/instance/drivers/driver_qemu.go b/lxd/instance/drivers/driver_qemu.go
index 7dabdbf371..d68a5f748d 100644
--- a/lxd/instance/drivers/driver_qemu.go
+++ b/lxd/instance/drivers/driver_qemu.go
@@ -103,6 +103,7 @@ func qemuLoad(s *state.State, args db.InstanceArgs, profiles []api.Profile) (ins
 func qemuInstantiate(s *state.State, args db.InstanceArgs, expandedDevices deviceConfig.Devices) *qemu {
 	vm := &qemu{
 		common: common{
+			id:           args.ID,
 			dbType:       args.Type,
 			architecture: args.Architecture,
 			localConfig:  args.Config,
@@ -111,7 +112,6 @@ func qemuInstantiate(s *state.State, args db.InstanceArgs, expandedDevices devic
 			state:        s,
 			profiles:     args.Profiles,
 		},
-		id:           args.ID,
 		name:         args.Name,
 		description:  args.Description,
 		ephemeral:    args.Ephemeral,
@@ -155,6 +155,7 @@ func qemuCreate(s *state.State, args db.InstanceArgs) (instance.Instance, error)
 	// Create the instance struct.
 	vm := &qemu{
 		common: common{
+			id:           args.ID,
 			dbType:       args.Type,
 			architecture: args.Architecture,
 			localConfig:  args.Config,
@@ -163,7 +164,6 @@ func qemuCreate(s *state.State, args db.InstanceArgs) (instance.Instance, error)
 			profiles:     args.Profiles,
 			project:      args.Project,
 		},
-		id:           args.ID,
 		name:         args.Name,
 		node:         args.Node,
 		description:  args.Description,
@@ -306,7 +306,6 @@ type qemu struct {
 	creationDate time.Time
 	lastUsedDate time.Time
 	ephemeral    bool
-	id           int
 	name         string
 	description  string
 	stateful     bool

From 3ef71d41b5b12aa863a26c35d340b54585d51f4b Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?St=C3=A9phane=20Graber?= <stgraber at ubuntu.com>
Date: Mon, 23 Nov 2020 12:59:19 -0500
Subject: [PATCH 2/6] lxd/instance/common: Use 'd' as main variable
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Signed-off-by: Stéphane Graber <stgraber at ubuntu.com>
---
 lxd/instance/drivers/driver_common.go | 50 +++++++++++++--------------
 1 file changed, 25 insertions(+), 25 deletions(-)

diff --git a/lxd/instance/drivers/driver_common.go b/lxd/instance/drivers/driver_common.go
index 7857e41319..52264945a0 100644
--- a/lxd/instance/drivers/driver_common.go
+++ b/lxd/instance/drivers/driver_common.go
@@ -28,64 +28,64 @@ type common struct {
 }
 
 // Project returns instance's project.
-func (c *common) Project() string {
-	return c.project
+func (d *common) Project() string {
+	return d.project
 }
 
 // Type returns the instance's type.
-func (c *common) Type() instancetype.Type {
-	return c.dbType
+func (d *common) Type() instancetype.Type {
+	return d.dbType
 }
 
 // Architecture returns the instance's architecture.
-func (c *common) Architecture() int {
-	return c.architecture
+func (d *common) Architecture() int {
+	return d.architecture
 }
 
 // ExpandedConfig returns instance's expanded config.
-func (c *common) ExpandedConfig() map[string]string {
-	return c.expandedConfig
+func (d *common) ExpandedConfig() map[string]string {
+	return d.expandedConfig
 }
 
 // ExpandedDevices returns instance's expanded device config.
-func (c *common) ExpandedDevices() deviceConfig.Devices {
-	return c.expandedDevices
+func (d *common) ExpandedDevices() deviceConfig.Devices {
+	return d.expandedDevices
 }
 
 // LocalConfig returns the instance's local config.
-func (c *common) LocalConfig() map[string]string {
-	return c.localConfig
+func (d *common) LocalConfig() map[string]string {
+	return d.localConfig
 }
 
 // LocalDevices returns the instance's local device config.
-func (c *common) LocalDevices() deviceConfig.Devices {
-	return c.localDevices
+func (d *common) LocalDevices() deviceConfig.Devices {
+	return d.localDevices
 }
 
-func (c *common) expandConfig(profiles []api.Profile) error {
-	if profiles == nil && len(c.profiles) > 0 {
+func (d *common) expandConfig(profiles []api.Profile) error {
+	if profiles == nil && len(d.profiles) > 0 {
 		var err error
-		profiles, err = c.state.Cluster.GetProfiles(c.project, c.profiles)
+		profiles, err = d.state.Cluster.GetProfiles(d.project, d.profiles)
 		if err != nil {
 			return err
 		}
 	}
 
-	c.expandedConfig = db.ExpandInstanceConfig(c.localConfig, profiles)
+	d.expandedConfig = db.ExpandInstanceConfig(d.localConfig, profiles)
 
 	return nil
 }
 
-func (c *common) expandDevices(profiles []api.Profile) error {
-	if profiles == nil && len(c.profiles) > 0 {
+func (d *common) expandDevices(profiles []api.Profile) error {
+	if profiles == nil && len(d.profiles) > 0 {
 		var err error
-		profiles, err = c.state.Cluster.GetProfiles(c.project, c.profiles)
+		profiles, err = d.state.Cluster.GetProfiles(d.project, d.profiles)
 		if err != nil {
 			return err
 		}
 	}
 
-	c.expandedDevices = db.ExpandInstanceDevices(c.localDevices, profiles)
+	d.expandedDevices = db.ExpandInstanceDevices(d.localDevices, profiles)
 
 	return nil
 }
@@ -94,12 +94,12 @@ func (c *common) expandDevices(profiles []api.Profile) error {
 // This is function is only safe to call from within the security
 // packages as called during instance startup, the rest of the time this
 // will likely return nil.
-func (c *common) DevPaths() []string {
-	return c.devPaths
+func (d *common) DevPaths() []string {
+	return d.devPaths
 }
 
 // restart handles instance restarts.
-func (c *common) restart(inst instance.Instance, timeout time.Duration) error {
+func (d *common) restart(inst instance.Instance, timeout time.Duration) error {
 	if timeout == 0 {
 		err := inst.Stop(false)
 		if err != nil {

From b51ad28ee0501b99085f6d1e592d43a4ecff9e14 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?St=C3=A9phane=20Graber?= <stgraber at ubuntu.com>
Date: Mon, 23 Nov 2020 14:45:06 -0500
Subject: [PATCH 3/6] lxd/instance/qemu: Rename d to dev
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Signed-off-by: Stéphane Graber <stgraber at ubuntu.com>
---
 lxd/instance/drivers/driver_qemu.go | 59 ++++++++++++++---------------
 1 file changed, 29 insertions(+), 30 deletions(-)

diff --git a/lxd/instance/drivers/driver_qemu.go b/lxd/instance/drivers/driver_qemu.go
index d68a5f748d..dba993416c 100644
--- a/lxd/instance/drivers/driver_qemu.go
+++ b/lxd/instance/drivers/driver_qemu.go
@@ -784,8 +784,8 @@ func (vm *qemu) Start(stateful bool) error {
 	devConfs := make([]*deviceConfig.RunConfig, 0, len(vm.expandedDevices))
 
 	// Setup devices in sorted order, this ensures that device mounts are added in path order.
-	for _, d := range vm.expandedDevices.Sorted() {
-		dev := d // Ensure device variable has local scope for revert.
+	for _, entry := range vm.expandedDevices.Sorted() {
+		dev := entry // Ensure device variable has local scope for revert.
 
 		// Start the device.
 		runConf, err := vm.deviceStart(dev.Name, dev.Config, false)
@@ -1180,21 +1180,21 @@ func (vm *qemu) deviceVolatileSetFunc(devName string) func(save map[string]strin
 // RegisterDevices calls the Register() function on all of the instance's devices.
 func (vm *qemu) RegisterDevices() {
 	devices := vm.ExpandedDevices()
-	for _, dev := range devices.Sorted() {
-		d, _, err := vm.deviceLoad(dev.Name, dev.Config)
+	for _, entry := range devices.Sorted() {
+		dev, _, err := vm.deviceLoad(entry.Name, entry.Config)
 		if err == device.ErrUnsupportedDevType {
 			continue
 		}
 
 		if err != nil {
-			logger.Error("Failed to load device to register", log.Ctx{"err": err, "instance": vm.Name(), "device": dev.Name})
+			logger.Error("Failed to load device to register", log.Ctx{"err": err, "instance": vm.Name(), "device": entry.Name})
 			continue
 		}
 
 		// Check whether device wants to register for any events.
-		err = d.Register()
+		err = dev.Register()
 		if err != nil {
-			logger.Error("Failed to register device", log.Ctx{"err": err, "instance": vm.Name(), "device": dev.Name})
+			logger.Error("Failed to register device", log.Ctx{"err": err, "instance": vm.Name(), "device": entry.Name})
 			continue
 		}
 	}
@@ -1226,10 +1226,10 @@ func (vm *qemu) deviceLoad(deviceName string, rawConfig deviceConfig.Device) (de
 		configCopy = rawConfig.Clone()
 	}
 
-	d, err := device.New(vm, vm.state, deviceName, configCopy, vm.deviceVolatileGetFunc(deviceName), vm.deviceVolatileSetFunc(deviceName))
+	dev, err := device.New(vm, vm.state, deviceName, configCopy, vm.deviceVolatileGetFunc(deviceName), vm.deviceVolatileSetFunc(deviceName))
 
 	// Return device and config copy even if error occurs as caller may still use device.
-	return d, configCopy, err
+	return dev, configCopy, err
 }
 
 // deviceStart loads a new device and calls its Start() function.
@@ -1237,16 +1237,16 @@ func (vm *qemu) deviceStart(deviceName string, rawConfig deviceConfig.Device, in
 	logger := logging.AddContext(logger.Log, log.Ctx{"device": deviceName, "type": rawConfig["type"], "project": vm.Project(), "instance": vm.Name()})
 	logger.Debug("Starting device")
 
-	d, _, err := vm.deviceLoad(deviceName, rawConfig)
+	dev, _, err := vm.deviceLoad(deviceName, rawConfig)
 	if err != nil {
 		return nil, err
 	}
 
-	if instanceRunning && !d.CanHotPlug() {
+	if instanceRunning && !dev.CanHotPlug() {
 		return nil, fmt.Errorf("Device cannot be started when instance is running")
 	}
 
-	runConf, err := d.Start()
+	runConf, err := dev.Start()
 	if err != nil {
 		return nil, err
 	}
@@ -1259,7 +1259,7 @@ func (vm *qemu) deviceStop(deviceName string, rawConfig deviceConfig.Device, ins
 	logger := logging.AddContext(logger.Log, log.Ctx{"device": deviceName, "type": rawConfig["type"], "project": vm.Project(), "instance": vm.Name()})
 	logger.Debug("Stopping device")
 
-	d, _, err := vm.deviceLoad(deviceName, rawConfig)
+	dev, _, err := vm.deviceLoad(deviceName, rawConfig)
 
 	// If deviceLoad fails with unsupported device type then return.
 	if err == device.ErrUnsupportedDevType {
@@ -1271,19 +1271,18 @@ func (vm *qemu) deviceStop(deviceName string, rawConfig deviceConfig.Device, ins
 	// versions we still need to allow previously valid devices to be stopped.
 	if err != nil {
 		// If there is no device returned, then we cannot proceed, so return as error.
-		if d == nil {
+		if dev == nil {
 			return fmt.Errorf("Device stop validation failed for %q: %v", deviceName, err)
-
 		}
 
 		logger.Error("Device stop validation failed", log.Ctx{"err": err})
 	}
 
-	if instanceRunning && !d.CanHotPlug() {
+	if instanceRunning && !dev.CanHotPlug() {
 		return fmt.Errorf("Device cannot be stopped when instance is running")
 	}
 
-	runConf, err := d.Stop()
+	runConf, err := dev.Stop()
 	if err != nil {
 		return err
 	}
@@ -3094,12 +3093,12 @@ func (vm *qemu) Update(args db.InstanceArgs, userRequested bool) error {
 			return []string{} // Device types aren't the same, so this cannot be an update.
 		}
 
-		d, err := device.New(vm, vm.state, "", newDevice, nil, nil)
+		dev, err := device.New(vm, vm.state, "", newDevice, nil, nil)
 		if err != nil {
 			return []string{} // Couldn't create Device, so this cannot be an update.
 		}
 
-		return d.UpdatableFields()
+		return dev.UpdatableFields()
 	})
 
 	if userRequested {
@@ -3381,12 +3380,12 @@ func (vm *qemu) updateDevices(removeDevices deviceConfig.Devices, addDevices dev
 
 // deviceUpdate loads a new device and calls its Update() function.
 func (vm *qemu) deviceUpdate(deviceName string, rawConfig deviceConfig.Device, oldDevices deviceConfig.Devices, instanceRunning bool) error {
-	d, _, err := vm.deviceLoad(deviceName, rawConfig)
+	dev, _, err := vm.deviceLoad(deviceName, rawConfig)
 	if err != nil {
 		return err
 	}
 
-	err = d.Update(oldDevices, instanceRunning)
+	err = dev.Update(oldDevices, instanceRunning)
 	if err != nil {
 		return err
 	}
@@ -3655,22 +3654,22 @@ func (vm *qemu) Delete() error {
 }
 
 func (vm *qemu) deviceAdd(deviceName string, rawConfig deviceConfig.Device, instanceRunning bool) error {
-	d, _, err := vm.deviceLoad(deviceName, rawConfig)
+	dev, _, err := vm.deviceLoad(deviceName, rawConfig)
 	if err != nil {
 		return err
 	}
 
-	if instanceRunning && !d.CanHotPlug() {
+	if instanceRunning && !dev.CanHotPlug() {
 		return fmt.Errorf("Device cannot be added when instance is running")
 	}
 
-	return d.Add()
+	return dev.Add()
 }
 
 func (vm *qemu) deviceRemove(deviceName string, rawConfig deviceConfig.Device, instanceRunning bool) error {
 	logger := logging.AddContext(logger.Log, log.Ctx{"device": deviceName, "type": rawConfig["type"], "project": vm.Project(), "instance": vm.Name()})
 
-	d, _, err := vm.deviceLoad(deviceName, rawConfig)
+	dev, _, err := vm.deviceLoad(deviceName, rawConfig)
 
 	// If deviceLoad fails with unsupported device type then return.
 	if err == device.ErrUnsupportedDevType {
@@ -3682,18 +3681,18 @@ func (vm *qemu) deviceRemove(deviceName string, rawConfig deviceConfig.Device, i
 	// versions we still need to allow previously valid devices to be stopped.
 	if err != nil {
 		// If there is no device returned, then we cannot proceed, so return as error.
-		if d == nil {
+		if dev == nil {
 			return fmt.Errorf("Device remove validation failed for %q: %v", deviceName, err)
 		}
 
 		logger.Error("Device remove validation failed", log.Ctx{"err": err})
 	}
 
-	if instanceRunning && !d.CanHotPlug() {
+	if instanceRunning && !dev.CanHotPlug() {
 		return fmt.Errorf("Device cannot be removed when instance is running")
 	}
 
-	return d.Remove()
+	return dev.Remove()
 }
 
 // Export publishes the instance.
@@ -4386,14 +4385,14 @@ func (vm *qemu) RenderState() (*api.InstanceState, error) {
 					continue
 				}
 
-				d, _, err := vm.deviceLoad(k, m)
+				dev, _, err := vm.deviceLoad(k, m)
 				if err != nil {
 					logger.Warn("Could not load device", log.Ctx{"project": vm.Project(), "instance": vm.Name(), "device": k, "err": err})
 					continue
 				}
 
 				// Only some NIC types support fallback state mechanisms when there is no agent.
-				nic, ok := d.(device.NICState)
+				nic, ok := dev.(device.NICState)
 				if !ok {
 					continue
 				}

From 89f2886ce4ffc80a2e4be3d3c3f63180fbf8232c Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?St=C3=A9phane=20Graber?= <stgraber at ubuntu.com>
Date: Mon, 23 Nov 2020 15:00:07 -0500
Subject: [PATCH 4/6] lxd/instance/qemu: Replace vm with d
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Signed-off-by: Stéphane Graber <stgraber at ubuntu.com>
---
 lxd/instance/drivers/driver_qemu.go | 1436 +++++++++++++--------------
 1 file changed, 718 insertions(+), 718 deletions(-)

diff --git a/lxd/instance/drivers/driver_qemu.go b/lxd/instance/drivers/driver_qemu.go
index dba993416c..47fc5a43da 100644
--- a/lxd/instance/drivers/driver_qemu.go
+++ b/lxd/instance/drivers/driver_qemu.go
@@ -81,27 +81,27 @@ var vmConsoleLock sync.Mutex
 // qemuLoad creates a Qemu instance from the supplied InstanceArgs.
 func qemuLoad(s *state.State, args db.InstanceArgs, profiles []api.Profile) (instance.Instance, error) {
 	// Create the instance struct.
-	vm := qemuInstantiate(s, args, nil)
+	d := qemuInstantiate(s, args, nil)
 
 	// Expand config and devices.
-	err := vm.expandConfig(profiles)
+	err := d.expandConfig(profiles)
 	if err != nil {
 		return nil, err
 	}
 
-	err = vm.expandDevices(profiles)
+	err = d.expandDevices(profiles)
 	if err != nil {
 		return nil, err
 	}
 
-	return vm, nil
+	return d, nil
 }
 
 // qemuInstantiate creates a Qemu struct without expanding config. The expandedDevices argument is
 // used during device config validation when the devices have already been expanded and we do not
 // have access to the profiles used to do it. This can be safely passed as nil if not required.
 func qemuInstantiate(s *state.State, args db.InstanceArgs, expandedDevices deviceConfig.Devices) *qemu {
-	vm := &qemu{
+	d := &qemu{
 		common: common{
 			id:           args.ID,
 			dbType:       args.Type,
@@ -124,36 +124,36 @@ func qemuInstantiate(s *state.State, args db.InstanceArgs, expandedDevices devic
 	}
 
 	// Get the architecture name.
-	archName, err := osarch.ArchitectureName(vm.architecture)
+	archName, err := osarch.ArchitectureName(d.architecture)
 	if err == nil {
-		vm.architectureName = archName
+		d.architectureName = archName
 	}
 
 	// Cleanup the zero values.
-	if vm.expiryDate.IsZero() {
-		vm.expiryDate = time.Time{}
+	if d.expiryDate.IsZero() {
+		d.expiryDate = time.Time{}
 	}
 
-	if vm.creationDate.IsZero() {
-		vm.creationDate = time.Time{}
+	if d.creationDate.IsZero() {
+		d.creationDate = time.Time{}
 	}
 
-	if vm.lastUsedDate.IsZero() {
-		vm.lastUsedDate = time.Time{}
+	if d.lastUsedDate.IsZero() {
+		d.lastUsedDate = time.Time{}
 	}
 
 	// This is passed during expanded config validation.
 	if expandedDevices != nil {
-		vm.expandedDevices = expandedDevices
+		d.expandedDevices = expandedDevices
 	}
 
-	return vm
+	return d
 }
 
 // qemuCreate creates a new storage volume record and returns an initialised Instance.
 func qemuCreate(s *state.State, args db.InstanceArgs) (instance.Instance, error) {
 	// Create the instance struct.
-	vm := &qemu{
+	d := &qemu{
 		common: common{
 			id:           args.ID,
 			dbType:       args.Type,
@@ -178,66 +178,66 @@ func qemuCreate(s *state.State, args db.InstanceArgs) (instance.Instance, error)
 	revert := revert.New()
 	defer revert.Fail()
 
-	// Use vm.Delete() in revert on error as this function doesn't just create DB records, it can also cause
+	// Use d.Delete() in revert on error as this function doesn't just create DB records, it can also cause
 	// other modifications to the host when devices are added.
-	revert.Add(func() { vm.Delete() })
+	revert.Add(func() { d.Delete() })
 
 	// Get the architecture name.
-	archName, err := osarch.ArchitectureName(vm.architecture)
+	archName, err := osarch.ArchitectureName(d.architecture)
 	if err == nil {
-		vm.architectureName = archName
+		d.architectureName = archName
 	}
 
 	// Cleanup the zero values.
-	if vm.expiryDate.IsZero() {
-		vm.expiryDate = time.Time{}
+	if d.expiryDate.IsZero() {
+		d.expiryDate = time.Time{}
 	}
 
-	if vm.creationDate.IsZero() {
-		vm.creationDate = time.Time{}
+	if d.creationDate.IsZero() {
+		d.creationDate = time.Time{}
 	}
 
-	if vm.lastUsedDate.IsZero() {
-		vm.lastUsedDate = time.Time{}
+	if d.lastUsedDate.IsZero() {
+		d.lastUsedDate = time.Time{}
 	}
 
 	ctxMap := log.Ctx{
 		"project":   args.Project,
-		"name":      vm.name,
-		"ephemeral": vm.ephemeral,
+		"name":      d.name,
+		"ephemeral": d.ephemeral,
 	}
 
 	logger.Info("Creating instance", ctxMap)
 
 	// Load the config.
-	err = vm.init()
+	err = d.init()
 	if err != nil {
 		return nil, errors.Wrap(err, "Failed to expand config")
 	}
 
 	// Validate expanded config.
-	err = instance.ValidConfig(s.OS, vm.expandedConfig, false, true)
+	err = instance.ValidConfig(s.OS, d.expandedConfig, false, true)
 	if err != nil {
 		return nil, errors.Wrap(err, "Invalid config")
 	}
 
-	err = instance.ValidDevices(s, s.Cluster, vm.Project(), vm.Type(), vm.expandedDevices, true)
+	err = instance.ValidDevices(s, s.Cluster, d.Project(), d.Type(), d.expandedDevices, true)
 	if err != nil {
 		return nil, errors.Wrap(err, "Invalid devices")
 	}
 
 	// Retrieve the container's storage pool.
 	var storageInstance instance.Instance
-	if vm.IsSnapshot() {
-		parentName, _, _ := shared.InstanceGetParentAndSnapshotName(vm.name)
+	if d.IsSnapshot() {
+		parentName, _, _ := shared.InstanceGetParentAndSnapshotName(d.name)
 
 		// Load the parent.
-		storageInstance, err = instance.LoadByProjectAndName(vm.state, vm.project, parentName)
+		storageInstance, err = instance.LoadByProjectAndName(d.state, d.project, parentName)
 		if err != nil {
 			return nil, errors.Wrap(err, "Invalid parent")
 		}
 	} else {
-		storageInstance = vm
+		storageInstance = d
 	}
 
 	// Retrieve the instance's storage pool.
@@ -251,39 +251,39 @@ func qemuCreate(s *state.State, args db.InstanceArgs) (instance.Instance, error)
 	}
 
 	// Initialize the storage pool.
-	vm.storagePool, err = storagePools.GetPoolByName(vm.state, rootDiskDevice["pool"])
+	d.storagePool, err = storagePools.GetPoolByName(d.state, rootDiskDevice["pool"])
 	if err != nil {
 		return nil, errors.Wrapf(err, "Failed loading storage pool")
 	}
 
 	// Fill default config.
 	volumeConfig := map[string]string{}
-	err = vm.storagePool.FillInstanceConfig(vm, volumeConfig)
+	err = d.storagePool.FillInstanceConfig(d, volumeConfig)
 	if err != nil {
 		return nil, errors.Wrapf(err, "Failed filling default config")
 	}
 
 	// Create a new database entry for the instance's storage volume.
-	if vm.IsSnapshot() {
-		_, err = s.Cluster.CreateStorageVolumeSnapshot(args.Project, args.Name, "", db.StoragePoolVolumeTypeVM, vm.storagePool.ID(), volumeConfig, time.Time{})
+	if d.IsSnapshot() {
+		_, err = s.Cluster.CreateStorageVolumeSnapshot(args.Project, args.Name, "", db.StoragePoolVolumeTypeVM, d.storagePool.ID(), volumeConfig, time.Time{})
 
 	} else {
-		_, err = s.Cluster.CreateStoragePoolVolume(args.Project, args.Name, "", db.StoragePoolVolumeTypeVM, vm.storagePool.ID(), volumeConfig, db.StoragePoolVolumeContentTypeBlock)
+		_, err = s.Cluster.CreateStoragePoolVolume(args.Project, args.Name, "", db.StoragePoolVolumeTypeVM, d.storagePool.ID(), volumeConfig, db.StoragePoolVolumeContentTypeBlock)
 	}
 	if err != nil {
 		return nil, errors.Wrapf(err, "Failed creating storage record")
 	}
 
-	if !vm.IsSnapshot() {
+	if !d.IsSnapshot() {
 		// Update MAAS.
-		err = vm.maasUpdate(nil)
+		err = d.maasUpdate(nil)
 		if err != nil {
 			return nil, err
 		}
 
 		// Add devices to instance.
-		for k, m := range vm.expandedDevices {
-			err = vm.deviceAdd(k, m, false)
+		for k, m := range d.expandedDevices {
+			err = d.deviceAdd(k, m, false)
 			if err != nil && err != device.ErrUnsupportedDevType {
 				return nil, errors.Wrapf(err, "Failed to add device %q", k)
 			}
@@ -291,10 +291,10 @@ func qemuCreate(s *state.State, args db.InstanceArgs) (instance.Instance, error)
 	}
 
 	logger.Info("Created instance", ctxMap)
-	vm.state.Events.SendLifecycle(vm.project, "virtual-machine-created", fmt.Sprintf("/1.0/virtual-machines/%s", vm.name), nil)
+	d.state.Events.SendLifecycle(d.project, "virtual-machine-created", fmt.Sprintf("/1.0/virtual-machines/%s", d.name), nil)
 
 	revert.Success()
-	return vm, nil
+	return d, nil
 }
 
 // qemu is the QEMU virtual machine driver.
@@ -328,18 +328,18 @@ type qemu struct {
 
 // getAgentClient returns the current agent client handle. To avoid TLS setup each time this
 // function is called, the handle is cached internally in the Qemu struct.
-func (vm *qemu) getAgentClient() (*http.Client, error) {
-	if vm.agentClient != nil {
-		return vm.agentClient, nil
+func (d *qemu) getAgentClient() (*http.Client, error) {
+	if d.agentClient != nil {
+		return d.agentClient, nil
 	}
 
 	// The connection uses mutual authentication, so use the LXD server's key & cert for client.
-	agentCert, _, clientCert, clientKey, err := vm.generateAgentCert()
+	agentCert, _, clientCert, clientKey, err := d.generateAgentCert()
 	if err != nil {
 		return nil, err
 	}
 
-	agent, err := vsock.HTTPClient(vm.vsockID(), clientCert, clientKey, agentCert)
+	agent, err := vsock.HTTPClient(d.vsockID(), clientCert, clientKey, agentCert)
 	if err != nil {
 		return nil, err
 	}
@@ -349,24 +349,24 @@ func (vm *qemu) getAgentClient() (*http.Client, error) {
 
 // getStoragePool returns the current storage pool handle. To avoid a DB lookup each time this
 // function is called, the handle is cached internally in the Qemu struct.
-func (vm *qemu) getStoragePool() (storagePools.Pool, error) {
-	if vm.storagePool != nil {
-		return vm.storagePool, nil
+func (d *qemu) getStoragePool() (storagePools.Pool, error) {
+	if d.storagePool != nil {
+		return d.storagePool, nil
 	}
 
-	pool, err := storagePools.GetPoolByInstance(vm.state, vm)
+	pool, err := storagePools.GetPoolByInstance(d.state, d)
 	if err != nil {
 		return nil, err
 	}
-	vm.storagePool = pool
+	d.storagePool = pool
 
-	return vm.storagePool, nil
+	return d.storagePool, nil
 }
 
-func (vm *qemu) getMonitorEventHandler() func(event string, data map[string]interface{}) {
-	projectName := vm.Project()
-	instanceName := vm.Name()
-	state := vm.state
+func (d *qemu) getMonitorEventHandler() func(event string, data map[string]interface{}) {
+	projectName := d.Project()
+	instanceName := d.Name()
+	state := d.state
 
 	return func(event string, data map[string]interface{}) {
 		if !shared.StringInSlice(event, []string{"SHUTDOWN"}) {
@@ -396,15 +396,15 @@ func (vm *qemu) getMonitorEventHandler() func(event string, data map[string]inte
 }
 
 // mount the instance's config volume if needed.
-func (vm *qemu) mount() (*storagePools.MountInfo, error) {
+func (d *qemu) mount() (*storagePools.MountInfo, error) {
 	var pool storagePools.Pool
-	pool, err := vm.getStoragePool()
+	pool, err := d.getStoragePool()
 	if err != nil {
 		return nil, err
 	}
 
-	if vm.IsSnapshot() {
-		mountInfo, err := pool.MountInstanceSnapshot(vm, nil)
+	if d.IsSnapshot() {
+		mountInfo, err := pool.MountInstanceSnapshot(d, nil)
 		if err != nil {
 			return nil, err
 		}
@@ -412,7 +412,7 @@ func (vm *qemu) mount() (*storagePools.MountInfo, error) {
 		return mountInfo, nil
 	}
 
-	mountInfo, err := pool.MountInstance(vm, nil)
+	mountInfo, err := pool.MountInstance(d, nil)
 	if err != nil {
 		return nil, err
 	}
@@ -421,13 +421,13 @@ func (vm *qemu) mount() (*storagePools.MountInfo, error) {
 }
 
 // unmount the instance's config volume if needed.
-func (vm *qemu) unmount() (bool, error) {
-	pool, err := vm.getStoragePool()
+func (d *qemu) unmount() (bool, error) {
+	pool, err := d.getStoragePool()
 	if err != nil {
 		return false, err
 	}
 
-	unmounted, err := pool.UnmountInstance(vm, nil)
+	unmounted, err := pool.UnmountInstance(d, nil)
 	if err != nil {
 		return false, err
 	}
@@ -436,18 +436,18 @@ func (vm *qemu) unmount() (bool, error) {
 }
 
 // generateAgentCert creates the necessary server key and certificate if needed.
-func (vm *qemu) generateAgentCert() (string, string, string, string, error) {
+func (d *qemu) generateAgentCert() (string, string, string, string, error) {
 	// Mount the instance's config volume if needed.
-	_, err := vm.mount()
+	_, err := d.mount()
 	if err != nil {
 		return "", "", "", "", err
 	}
-	defer vm.unmount()
+	defer d.unmount()
 
-	agentCertFile := filepath.Join(vm.Path(), "agent.crt")
-	agentKeyFile := filepath.Join(vm.Path(), "agent.key")
-	clientCertFile := filepath.Join(vm.Path(), "agent-client.crt")
-	clientKeyFile := filepath.Join(vm.Path(), "agent-client.key")
+	agentCertFile := filepath.Join(d.Path(), "agent.crt")
+	agentKeyFile := filepath.Join(d.Path(), "agent.key")
+	clientCertFile := filepath.Join(d.Path(), "agent-client.crt")
+	clientKeyFile := filepath.Join(d.Path(), "agent-client.key")
 
 	// Create server certificate.
 	err = shared.FindOrGenCert(agentCertFile, agentKeyFile, false, false)
@@ -486,9 +486,9 @@ func (vm *qemu) generateAgentCert() (string, string, string, string, error) {
 }
 
 // Freeze freezes the instance.
-func (vm *qemu) Freeze() error {
+func (d *qemu) Freeze() error {
 	// Connect to the monitor.
-	monitor, err := qmp.Connect(vm.monitorPath(), qemuSerialChardevName, vm.getMonitorEventHandler())
+	monitor, err := qmp.Connect(d.monitorPath(), qemuSerialChardevName, d.getMonitorEventHandler())
 	if err != nil {
 		return err
 	}
@@ -503,26 +503,26 @@ func (vm *qemu) Freeze() error {
 }
 
 // onStop is run when the instance stops.
-func (vm *qemu) onStop(target string) error {
+func (d *qemu) onStop(target string) error {
 	ctxMap := log.Ctx{
-		"project":   vm.project,
-		"name":      vm.name,
-		"ephemeral": vm.ephemeral,
+		"project":   d.project,
+		"name":      d.name,
+		"ephemeral": d.ephemeral,
 	}
 
 	// Pick up the existing stop operation lock created in Stop() function.
-	op := operationlock.Get(vm.id)
+	op := operationlock.Get(d.id)
 	if op != nil && op.Action() != "stop" {
 		return fmt.Errorf("Instance is already running a %s operation", op.Action())
 	}
 
 	// Cleanup.
-	vm.cleanupDevices()
-	os.Remove(vm.pidFilePath())
-	os.Remove(vm.monitorPath())
-	vm.unmount()
+	d.cleanupDevices()
+	os.Remove(d.pidFilePath())
+	os.Remove(d.monitorPath())
+	d.unmount()
 
-	pidPath := filepath.Join(vm.LogPath(), "virtiofsd.pid")
+	pidPath := filepath.Join(d.LogPath(), "virtiofsd.pid")
 
 	proc, err := subprocess.ImportProcess(pidPath)
 	if err == nil {
@@ -531,7 +531,7 @@ func (vm *qemu) onStop(target string) error {
 	}
 
 	// Record power state.
-	err = vm.state.Cluster.UpdateInstancePowerState(vm.id, "STOPPED")
+	err = d.state.Cluster.UpdateInstancePowerState(d.id, "STOPPED")
 	if err != nil {
 		if op != nil {
 			op.Done(err)
@@ -540,19 +540,19 @@ func (vm *qemu) onStop(target string) error {
 	}
 
 	// Unload the apparmor profile
-	err = apparmor.InstanceUnload(vm.state, vm)
+	err = apparmor.InstanceUnload(d.state, d)
 	if err != nil {
 		ctxMap["err"] = err
 		logger.Error("Failed to unload AppArmor profile", ctxMap)
 	}
 
 	if target == "reboot" {
-		err = vm.Start(false)
-		vm.state.Events.SendLifecycle(vm.project, "virtual-machine-restarted",
-			fmt.Sprintf("/1.0/virtual-machines/%s", vm.name), nil)
-	} else if vm.ephemeral {
+		err = d.Start(false)
+		d.state.Events.SendLifecycle(d.project, "virtual-machine-restarted",
+			fmt.Sprintf("/1.0/virtual-machines/%s", d.name), nil)
+	} else if d.ephemeral {
 		// Destroy ephemeral virtual machines
-		err = vm.Delete()
+		err = d.Delete()
 	}
 	if err != nil {
 		return err
@@ -566,19 +566,19 @@ func (vm *qemu) onStop(target string) error {
 }
 
 // Shutdown shuts the instance down.
-func (vm *qemu) Shutdown(timeout time.Duration) error {
-	if !vm.IsRunning() {
+func (d *qemu) Shutdown(timeout time.Duration) error {
+	if !d.IsRunning() {
 		return fmt.Errorf("The instance is already stopped")
 	}
 
 	// Setup a new operation
-	op, err := operationlock.Create(vm.id, "stop", true, true)
+	op, err := operationlock.Create(d.id, "stop", true, true)
 	if err != nil {
 		return err
 	}
 
 	// Connect to the monitor.
-	monitor, err := qmp.Connect(vm.monitorPath(), qemuSerialChardevName, vm.getMonitorEventHandler())
+	monitor, err := qmp.Connect(d.monitorPath(), qemuSerialChardevName, d.getMonitorEventHandler())
 	if err != nil {
 		op.Done(err)
 		return err
@@ -623,21 +623,21 @@ func (vm *qemu) Shutdown(timeout time.Duration) error {
 
 	// Wait for onStop.
 	err = op.Wait()
-	if err != nil && vm.IsRunning() {
+	if err != nil && d.IsRunning() {
 		return err
 	}
 
 	op.Done(nil)
-	vm.state.Events.SendLifecycle(vm.project, "virtual-machine-shutdown", fmt.Sprintf("/1.0/virtual-machines/%s", vm.name), nil)
+	d.state.Events.SendLifecycle(d.project, "virtual-machine-shutdown", fmt.Sprintf("/1.0/virtual-machines/%s", d.name), nil)
 	return nil
 }
 
 // Restart restart the instance.
-func (vm *qemu) Restart(timeout time.Duration) error {
-	return vm.common.restart(vm, timeout)
+func (d *qemu) Restart(timeout time.Duration) error {
+	return d.common.restart(d, timeout)
 }
 
-func (vm *qemu) ovmfPath() string {
+func (d *qemu) ovmfPath() string {
 	if os.Getenv("LXD_OVMF_PATH") != "" {
 		return os.Getenv("LXD_OVMF_PATH")
 	}
@@ -646,19 +646,19 @@ func (vm *qemu) ovmfPath() string {
 }
 
 // Start starts the instance.
-func (vm *qemu) Start(stateful bool) error {
+func (d *qemu) Start(stateful bool) error {
 	// Ensure the correct vhost_vsock kernel module is loaded before establishing the vsock.
 	err := util.LoadModule("vhost_vsock")
 	if err != nil {
 		return err
 	}
 
-	if vm.IsRunning() {
+	if d.IsRunning() {
 		return fmt.Errorf("The instance is already running")
 	}
 
 	// Setup a new operation
-	op, err := operationlock.Create(vm.id, "start", false, false)
+	op, err := operationlock.Create(d.id, "start", false, false)
 	if err != nil {
 		return errors.Wrap(err, "Create instance start operation")
 	}
@@ -668,10 +668,10 @@ func (vm *qemu) Start(stateful bool) error {
 	defer revert.Fail()
 
 	// Start accumulating device paths.
-	vm.devPaths = []string{}
+	d.devPaths = []string{}
 
 	// Rotate the log file.
-	logfile := vm.LogFilePath()
+	logfile := d.LogFilePath()
 	if shared.PathExists(logfile) {
 		os.Remove(logfile + ".old")
 		err := os.Rename(logfile, logfile+".old")
@@ -681,41 +681,41 @@ func (vm *qemu) Start(stateful bool) error {
 	}
 
 	// Mount the instance's config volume.
-	mountInfo, err := vm.mount()
+	mountInfo, err := d.mount()
 	if err != nil {
 		op.Done(err)
 		return err
 	}
 
-	revert.Add(func() { vm.unmount() })
+	revert.Add(func() { d.unmount() })
 
-	err = vm.generateConfigShare()
+	err = d.generateConfigShare()
 	if err != nil {
 		op.Done(err)
 		return err
 	}
 
 	// Create all needed paths.
-	err = os.MkdirAll(vm.LogPath(), 0700)
+	err = os.MkdirAll(d.LogPath(), 0700)
 	if err != nil {
 		op.Done(err)
 		return err
 	}
 
-	err = os.MkdirAll(vm.DevicesPath(), 0711)
+	err = os.MkdirAll(d.DevicesPath(), 0711)
 	if err != nil {
 		op.Done(err)
 		return err
 	}
 
-	err = os.MkdirAll(vm.ShmountsPath(), 0711)
+	err = os.MkdirAll(d.ShmountsPath(), 0711)
 	if err != nil {
 		op.Done(err)
 		return err
 	}
 
 	// Setup virtiofsd for config path.
-	sockPath := filepath.Join(vm.LogPath(), "virtio-fs.config.sock")
+	sockPath := filepath.Join(d.LogPath(), "virtio-fs.config.sock")
 
 	// Remove old socket if needed.
 	os.Remove(sockPath)
@@ -729,7 +729,7 @@ func (vm *qemu) Start(stateful bool) error {
 
 	if cmd != "" {
 		// Start the virtiofsd process in non-daemon mode.
-		proc, err := subprocess.NewProcess(cmd, []string{fmt.Sprintf("--socket-path=%s", sockPath), "-o", fmt.Sprintf("source=%s", filepath.Join(vm.Path(), "config"))}, "", "")
+		proc, err := subprocess.NewProcess(cmd, []string{fmt.Sprintf("--socket-path=%s", sockPath), "-o", fmt.Sprintf("source=%s", filepath.Join(d.Path(), "config"))}, "", "")
 		if err != nil {
 			return err
 		}
@@ -741,7 +741,7 @@ func (vm *qemu) Start(stateful bool) error {
 
 		revert.Add(func() { proc.Stop() })
 
-		pidPath := filepath.Join(vm.LogPath(), "virtiofsd.pid")
+		pidPath := filepath.Join(d.LogPath(), "virtiofsd.pid")
 
 		err = proc.Save(pidPath)
 		if err != nil {
@@ -765,30 +765,30 @@ func (vm *qemu) Start(stateful bool) error {
 	}
 
 	// Generate UUID if not present.
-	instUUID := vm.localConfig["volatile.uuid"]
+	instUUID := d.localConfig["volatile.uuid"]
 	if instUUID == "" {
 		instUUID = uuid.New()
-		vm.VolatileSet(map[string]string{"volatile.uuid": instUUID})
+		d.VolatileSet(map[string]string{"volatile.uuid": instUUID})
 	}
 
 	// Copy OVMF settings firmware to nvram file.
 	// This firmware file can be modified by the VM so it must be copied from the defaults.
-	if !shared.PathExists(vm.nvramPath()) {
-		err = vm.setupNvram()
+	if !shared.PathExists(d.nvramPath()) {
+		err = d.setupNvram()
 		if err != nil {
 			op.Done(err)
 			return err
 		}
 	}
 
-	devConfs := make([]*deviceConfig.RunConfig, 0, len(vm.expandedDevices))
+	devConfs := make([]*deviceConfig.RunConfig, 0, len(d.expandedDevices))
 
 	// Setup devices in sorted order, this ensures that device mounts are added in path order.
-	for _, entry := range vm.expandedDevices.Sorted() {
+	for _, entry := range d.expandedDevices.Sorted() {
 		dev := entry // Ensure device variable has local scope for revert.
 
 		// Start the device.
-		runConf, err := vm.deviceStart(dev.Name, dev.Config, false)
+		runConf, err := d.deviceStart(dev.Name, dev.Config, false)
 		if err != nil {
 			op.Done(err)
 			return errors.Wrapf(err, "Failed to start device %q", dev.Name)
@@ -799,7 +799,7 @@ func (vm *qemu) Start(stateful bool) error {
 		}
 
 		revert.Add(func() {
-			err := vm.deviceStop(dev.Name, dev.Config, false)
+			err := d.deviceStop(dev.Name, dev.Config, false)
 			if err != nil {
 				logger.Errorf("Failed to cleanup device %q: %v", dev.Name, err)
 			}
@@ -809,7 +809,7 @@ func (vm *qemu) Start(stateful bool) error {
 	}
 
 	// Get qemu configuration.
-	qemuBinary, qemuBus, err := vm.qemuArchConfig()
+	qemuBinary, qemuBus, err := d.qemuArchConfig()
 	if err != nil {
 		op.Done(err)
 		return err
@@ -818,7 +818,7 @@ func (vm *qemu) Start(stateful bool) error {
 	// Define a set of files to open and pass their file descriptors to qemu command.
 	fdFiles := make([]string, 0)
 
-	confFile, err := vm.generateQemuConfigFile(mountInfo, qemuBus, devConfs, &fdFiles)
+	confFile, err := d.generateQemuConfigFile(mountInfo, qemuBus, devConfs, &fdFiles)
 	if err != nil {
 		op.Done(err)
 		return err
@@ -835,7 +835,7 @@ func (vm *qemu) Start(stateful bool) error {
 		"--",
 		qemuPath,
 		"-S",
-		"-name", vm.Name(),
+		"-name", d.Name(),
 		"-uuid", instUUID,
 		"-daemonize",
 		"-cpu", "host",
@@ -846,19 +846,19 @@ func (vm *qemu) Start(stateful bool) error {
 		"-no-user-config",
 		"-sandbox", "on,obsolete=deny,elevateprivileges=allow,spawn=deny,resourcecontrol=deny",
 		"-readconfig", confFile,
-		"-pidfile", vm.pidFilePath(),
-		"-D", vm.LogFilePath(),
-		"-chroot", vm.Path(),
+		"-pidfile", d.pidFilePath(),
+		"-D", d.LogFilePath(),
+		"-chroot", d.Path(),
 	}
 
 	// SMBIOS only on x86_64 and aarch64.
-	if shared.IntInSlice(vm.architecture, []int{osarch.ARCH_64BIT_INTEL_X86, osarch.ARCH_64BIT_ARMV8_LITTLE_ENDIAN}) {
+	if shared.IntInSlice(d.architecture, []int{osarch.ARCH_64BIT_INTEL_X86, osarch.ARCH_64BIT_ARMV8_LITTLE_ENDIAN}) {
 		qemuCmd = append(qemuCmd, "-smbios", "type=2,manufacturer=Canonical Ltd.,product=LXD")
 	}
 
 	// Attempt to drop privileges.
-	if vm.state.OS.UnprivUser != "" {
-		qemuCmd = append(qemuCmd, "-runas", vm.state.OS.UnprivUser)
+	if d.state.OS.UnprivUser != "" {
+		qemuCmd = append(qemuCmd, "-runas", d.state.OS.UnprivUser)
 
 		// Change ownership of config directory files so they are accessible to the
 		// unprivileged qemu process so that the 9p share can work.
@@ -867,14 +867,14 @@ func (vm *qemu) Start(stateful bool) error {
 		// to the VM. In order to ensure that non-root users in the VM cannot access these
 		// files be sure to mount the 9P share in the VM with the "access=0" option to allow
 		// only root user in VM to access the mounted share.
-		err := filepath.Walk(filepath.Join(vm.Path(), "config"),
+		err := filepath.Walk(filepath.Join(d.Path(), "config"),
 			func(path string, info os.FileInfo, err error) error {
 				if err != nil {
 					op.Done(err)
 					return err
 				}
 
-				err = os.Chown(path, int(vm.state.OS.UnprivUID), -1)
+				err = os.Chown(path, int(d.state.OS.UnprivUID), -1)
 				if err != nil {
 					op.Done(err)
 					return err
@@ -889,7 +889,7 @@ func (vm *qemu) Start(stateful bool) error {
 	}
 
 	// Handle hugepages on architectures where we don't set NUMA nodes.
-	if vm.architecture != osarch.ARCH_64BIT_INTEL_X86 && shared.IsTrue(vm.expandedConfig["limits.memory.hugepages"]) {
+	if d.architecture != osarch.ARCH_64BIT_INTEL_X86 && shared.IsTrue(d.expandedConfig["limits.memory.hugepages"]) {
 		hugetlb, err := util.HugepagesPath()
 		if err != nil {
 			op.Done(err)
@@ -899,8 +899,8 @@ func (vm *qemu) Start(stateful bool) error {
 		qemuCmd = append(qemuCmd, "-mem-path", hugetlb, "-mem-prealloc")
 	}
 
-	if vm.expandedConfig["raw.qemu"] != "" {
-		fields, err := shellquote.Split(vm.expandedConfig["raw.qemu"])
+	if d.expandedConfig["raw.qemu"] != "" {
+		fields, err := shellquote.Split(d.expandedConfig["raw.qemu"])
 		if err != nil {
 			op.Done(err)
 			return err
@@ -920,19 +920,19 @@ func (vm *qemu) Start(stateful bool) error {
 	}
 
 	// Setup background process.
-	p, err := subprocess.NewProcess(vm.state.OS.ExecPath, append(forkLimitsCmd, qemuCmd...), vm.EarlyLogFilePath(), vm.EarlyLogFilePath())
+	p, err := subprocess.NewProcess(d.state.OS.ExecPath, append(forkLimitsCmd, qemuCmd...), d.EarlyLogFilePath(), d.EarlyLogFilePath())
 	if err != nil {
 		return err
 	}
 
 	// Load the AppArmor profile
-	err = apparmor.InstanceLoad(vm.state, vm)
+	err = apparmor.InstanceLoad(d.state, d)
 	if err != nil {
 		op.Done(err)
 		return err
 	}
 
-	p.SetApparmor(apparmor.InstanceProfileName(vm))
+	p.SetApparmor(apparmor.InstanceProfileName(d))
 
 	// Open any extra files and pass their file handles to qemu command.
 	files := []*os.File{}
@@ -947,7 +947,7 @@ func (vm *qemu) Start(stateful bool) error {
 		var f *os.File
 		mode := info.Mode()
 		if mode&os.ModeSocket != 0 {
-			c, err := vm.openUnixSocket(file)
+			c, err := d.openUnixSocket(file)
 			if err != nil {
 				err = errors.Wrapf(err, "Error opening socket file %q", file)
 				op.Done(err)
@@ -982,13 +982,13 @@ func (vm *qemu) Start(stateful bool) error {
 
 	_, err = p.Wait()
 	if err != nil {
-		stderr, _ := ioutil.ReadFile(vm.EarlyLogFilePath())
+		stderr, _ := ioutil.ReadFile(d.EarlyLogFilePath())
 		err = errors.Wrapf(err, "Failed to run: %s: %s", strings.Join(p.Args, " "), string(stderr))
 		op.Done(err)
 		return err
 	}
 
-	pid, err := vm.pid()
+	pid, err := d.pid()
 	if err != nil {
 		logger.Errorf(`Failed to get VM process ID "%d"`, pid)
 		return err
@@ -1008,19 +1008,19 @@ func (vm *qemu) Start(stateful bool) error {
 	})
 
 	// Start QMP monitoring.
-	monitor, err := qmp.Connect(vm.monitorPath(), qemuSerialChardevName, vm.getMonitorEventHandler())
+	monitor, err := qmp.Connect(d.monitorPath(), qemuSerialChardevName, d.getMonitorEventHandler())
 	if err != nil {
 		op.Done(err)
 		return err
 	}
 
 	// Apply CPU pinning.
-	cpuLimit, ok := vm.expandedConfig["limits.cpu"]
+	cpuLimit, ok := d.expandedConfig["limits.cpu"]
 	if ok && cpuLimit != "" {
 		_, err := strconv.Atoi(cpuLimit)
 		if err != nil {
 			// Expand to a set of CPU identifiers and get the pinning map.
-			_, _, _, pins, _, err := vm.cpuTopology(cpuLimit)
+			_, _, _, pins, _, err := d.cpuTopology(cpuLimit)
 			if err != nil {
 				op.Done(err)
 				return err
@@ -1060,9 +1060,9 @@ func (vm *qemu) Start(stateful bool) error {
 	}
 
 	// Database updates
-	err = vm.state.Cluster.Transaction(func(tx *db.ClusterTx) error {
+	err = d.state.Cluster.Transaction(func(tx *db.ClusterTx) error {
 		// Record current state
-		err = tx.UpdateInstancePowerState(vm.id, "RUNNING")
+		err = tx.UpdateInstancePowerState(d.id, "RUNNING")
 		if err != nil {
 			err = errors.Wrap(err, "Error updating instance state")
 			op.Done(err)
@@ -1070,7 +1070,7 @@ func (vm *qemu) Start(stateful bool) error {
 		}
 
 		// Update time instance last started time
-		err = tx.UpdateInstanceLastUsedDate(vm.id, time.Now().UTC())
+		err = tx.UpdateInstanceLastUsedDate(d.id, time.Now().UTC())
 		if err != nil {
 			err = errors.Wrap(err, "Error updating instance last used")
 			op.Done(err)
@@ -1085,12 +1085,12 @@ func (vm *qemu) Start(stateful bool) error {
 	}
 
 	revert.Success()
-	vm.state.Events.SendLifecycle(vm.project, "virtual-machine-started", fmt.Sprintf("/1.0/virtual-machines/%s", vm.name), nil)
+	d.state.Events.SendLifecycle(d.project, "virtual-machine-started", fmt.Sprintf("/1.0/virtual-machines/%s", d.name), nil)
 	return nil
 }
 
 // openUnixSocket connects to a UNIX socket and returns the connection.
-func (vm *qemu) openUnixSocket(sockPath string) (*net.UnixConn, error) {
+func (d *qemu) openUnixSocket(sockPath string) (*net.UnixConn, error) {
 	addr, err := net.ResolveUnixAddr("unix", sockPath)
 	if err != nil {
 		return nil, err
@@ -1104,30 +1104,30 @@ func (vm *qemu) openUnixSocket(sockPath string) (*net.UnixConn, error) {
 	return c, nil
 }
 
-func (vm *qemu) setupNvram() error {
+func (d *qemu) setupNvram() error {
 	// UEFI only on x86_64 and aarch64.
-	if !shared.IntInSlice(vm.architecture, []int{osarch.ARCH_64BIT_INTEL_X86, osarch.ARCH_64BIT_ARMV8_LITTLE_ENDIAN}) {
+	if !shared.IntInSlice(d.architecture, []int{osarch.ARCH_64BIT_INTEL_X86, osarch.ARCH_64BIT_ARMV8_LITTLE_ENDIAN}) {
 		return nil
 	}
 
 	// Mount the instance's config volume.
-	_, err := vm.mount()
+	_, err := d.mount()
 	if err != nil {
 		return err
 	}
-	defer vm.unmount()
+	defer d.unmount()
 
-	srcOvmfFile := filepath.Join(vm.ovmfPath(), "OVMF_VARS.fd")
-	if vm.expandedConfig["security.secureboot"] == "" || shared.IsTrue(vm.expandedConfig["security.secureboot"]) {
-		srcOvmfFile = filepath.Join(vm.ovmfPath(), "OVMF_VARS.ms.fd")
+	srcOvmfFile := filepath.Join(d.ovmfPath(), "OVMF_VARS.fd")
+	if d.expandedConfig["security.secureboot"] == "" || shared.IsTrue(d.expandedConfig["security.secureboot"]) {
+		srcOvmfFile = filepath.Join(d.ovmfPath(), "OVMF_VARS.ms.fd")
 	}
 
 	if !shared.PathExists(srcOvmfFile) {
 		return fmt.Errorf("Required EFI firmware settings file missing: %s", srcOvmfFile)
 	}
 
-	os.Remove(vm.nvramPath())
-	err = shared.FileCopy(srcOvmfFile, vm.nvramPath())
+	os.Remove(d.nvramPath())
+	err = shared.FileCopy(srcOvmfFile, d.nvramPath())
 	if err != nil {
 		return err
 	}
@@ -1135,14 +1135,14 @@ func (vm *qemu) setupNvram() error {
 	return nil
 }
 
-func (vm *qemu) qemuArchConfig() (string, string, error) {
-	if vm.architecture == osarch.ARCH_64BIT_INTEL_X86 {
+func (d *qemu) qemuArchConfig() (string, string, error) {
+	if d.architecture == osarch.ARCH_64BIT_INTEL_X86 {
 		return "qemu-system-x86_64", "pcie", nil
-	} else if vm.architecture == osarch.ARCH_64BIT_ARMV8_LITTLE_ENDIAN {
+	} else if d.architecture == osarch.ARCH_64BIT_ARMV8_LITTLE_ENDIAN {
 		return "qemu-system-aarch64", "pcie", nil
-	} else if vm.architecture == osarch.ARCH_64BIT_POWERPC_LITTLE_ENDIAN {
+	} else if d.architecture == osarch.ARCH_64BIT_POWERPC_LITTLE_ENDIAN {
 		return "qemu-system-ppc64", "pci", nil
-	} else if vm.architecture == osarch.ARCH_64BIT_S390_BIG_ENDIAN {
+	} else if d.architecture == osarch.ARCH_64BIT_S390_BIG_ENDIAN {
 		return "qemu-system-s390x", "ccw", nil
 	}
 
@@ -1151,11 +1151,11 @@ func (vm *qemu) qemuArchConfig() (string, string, error) {
 
 // deviceVolatileGetFunc returns a function that retrieves a named device's volatile config and
 // removes its device prefix from the keys.
-func (vm *qemu) deviceVolatileGetFunc(devName string) func() map[string]string {
+func (d *qemu) deviceVolatileGetFunc(devName string) func() map[string]string {
 	return func() map[string]string {
 		volatile := make(map[string]string)
 		prefix := fmt.Sprintf("volatile.%s.", devName)
-		for k, v := range vm.localConfig {
+		for k, v := range d.localConfig {
 			if strings.HasPrefix(k, prefix) {
 				volatile[strings.TrimPrefix(k, prefix)] = v
 			}
@@ -1166,58 +1166,58 @@ func (vm *qemu) deviceVolatileGetFunc(devName string) func() map[string]string {
 
 // deviceVolatileSetFunc returns a function that can be called to save a named device's volatile
 // config using keys that do not have the device's name prefixed.
-func (vm *qemu) deviceVolatileSetFunc(devName string) func(save map[string]string) error {
+func (d *qemu) deviceVolatileSetFunc(devName string) func(save map[string]string) error {
 	return func(save map[string]string) error {
 		volatileSave := make(map[string]string)
 		for k, v := range save {
 			volatileSave[fmt.Sprintf("volatile.%s.%s", devName, k)] = v
 		}
 
-		return vm.VolatileSet(volatileSave)
+		return d.VolatileSet(volatileSave)
 	}
 }
 
 // RegisterDevices calls the Register() function on all of the instance's devices.
-func (vm *qemu) RegisterDevices() {
-	devices := vm.ExpandedDevices()
+func (d *qemu) RegisterDevices() {
+	devices := d.ExpandedDevices()
 	for _, entry := range devices.Sorted() {
-		dev, _, err := vm.deviceLoad(entry.Name, entry.Config)
+		dev, _, err := d.deviceLoad(entry.Name, entry.Config)
 		if err == device.ErrUnsupportedDevType {
 			continue
 		}
 
 		if err != nil {
-			logger.Error("Failed to load device to register", log.Ctx{"err": err, "instance": vm.Name(), "device": entry.Name})
+			logger.Error("Failed to load device to register", log.Ctx{"err": err, "instance": d.Name(), "device": entry.Name})
 			continue
 		}
 
 		// Check whether device wants to register for any events.
 		err = dev.Register()
 		if err != nil {
-			logger.Error("Failed to register device", log.Ctx{"err": err, "instance": vm.Name(), "device": entry.Name})
+			logger.Error("Failed to register device", log.Ctx{"err": err, "instance": d.Name(), "device": entry.Name})
 			continue
 		}
 	}
 }
 
 // SaveConfigFile is not used by VMs.
-func (vm *qemu) SaveConfigFile() error {
+func (d *qemu) SaveConfigFile() error {
 	return instance.ErrNotImplemented
 }
 
 // OnHook is the top-level hook handler.
-func (vm *qemu) OnHook(hookName string, args map[string]string) error {
+func (d *qemu) OnHook(hookName string, args map[string]string) error {
 	return instance.ErrNotImplemented
 }
 
 // deviceLoad instantiates and validates a new device and returns it along with enriched config.
-func (vm *qemu) deviceLoad(deviceName string, rawConfig deviceConfig.Device) (device.Device, deviceConfig.Device, error) {
+func (d *qemu) deviceLoad(deviceName string, rawConfig deviceConfig.Device) (device.Device, deviceConfig.Device, error) {
 	var configCopy deviceConfig.Device
 	var err error
 
 	// Create copy of config and load some fields from volatile if device is nic or infiniband.
 	if shared.StringInSlice(rawConfig["type"], []string{"nic", "infiniband"}) {
-		configCopy, err = vm.FillNetworkDevice(deviceName, rawConfig)
+		configCopy, err = d.FillNetworkDevice(deviceName, rawConfig)
 		if err != nil {
 			return nil, nil, err
 		}
@@ -1226,18 +1226,18 @@ func (vm *qemu) deviceLoad(deviceName string, rawConfig deviceConfig.Device) (de
 		configCopy = rawConfig.Clone()
 	}
 
-	dev, err := device.New(vm, vm.state, deviceName, configCopy, vm.deviceVolatileGetFunc(deviceName), vm.deviceVolatileSetFunc(deviceName))
+	dev, err := device.New(d, d.state, deviceName, configCopy, d.deviceVolatileGetFunc(deviceName), d.deviceVolatileSetFunc(deviceName))
 
 	// Return device and config copy even if error occurs as caller may still use device.
 	return dev, configCopy, err
 }
 
 // deviceStart loads a new device and calls its Start() function.
-func (vm *qemu) deviceStart(deviceName string, rawConfig deviceConfig.Device, instanceRunning bool) (*deviceConfig.RunConfig, error) {
-	logger := logging.AddContext(logger.Log, log.Ctx{"device": deviceName, "type": rawConfig["type"], "project": vm.Project(), "instance": vm.Name()})
+func (d *qemu) deviceStart(deviceName string, rawConfig deviceConfig.Device, instanceRunning bool) (*deviceConfig.RunConfig, error) {
+	logger := logging.AddContext(logger.Log, log.Ctx{"device": deviceName, "type": rawConfig["type"], "project": d.Project(), "instance": d.Name()})
 	logger.Debug("Starting device")
 
-	dev, _, err := vm.deviceLoad(deviceName, rawConfig)
+	dev, _, err := d.deviceLoad(deviceName, rawConfig)
 	if err != nil {
 		return nil, err
 	}
@@ -1255,11 +1255,11 @@ func (vm *qemu) deviceStart(deviceName string, rawConfig deviceConfig.Device, in
 }
 
 // deviceStop loads a new device and calls its Stop() function.
-func (vm *qemu) deviceStop(deviceName string, rawConfig deviceConfig.Device, instanceRunning bool) error {
-	logger := logging.AddContext(logger.Log, log.Ctx{"device": deviceName, "type": rawConfig["type"], "project": vm.Project(), "instance": vm.Name()})
+func (d *qemu) deviceStop(deviceName string, rawConfig deviceConfig.Device, instanceRunning bool) error {
+	logger := logging.AddContext(logger.Log, log.Ctx{"device": deviceName, "type": rawConfig["type"], "project": d.Project(), "instance": d.Name()})
 	logger.Debug("Stopping device")
 
-	dev, _, err := vm.deviceLoad(deviceName, rawConfig)
+	dev, _, err := d.deviceLoad(deviceName, rawConfig)
 
 	// If deviceLoad fails with unsupported device type then return.
 	if err == device.ErrUnsupportedDevType {
@@ -1289,7 +1289,7 @@ func (vm *qemu) deviceStop(deviceName string, rawConfig deviceConfig.Device, ins
 
 	if runConf != nil {
 		// Run post stop hooks irrespective of run state of instance.
-		err = vm.runHooks(runConf.PostHooks)
+		err = d.runHooks(runConf.PostHooks)
 		if err != nil {
 			return err
 		}
@@ -1299,7 +1299,7 @@ func (vm *qemu) deviceStop(deviceName string, rawConfig deviceConfig.Device, ins
 }
 
 // runHooks executes the callback functions returned from a function.
-func (vm *qemu) runHooks(hooks []func() error) error {
+func (d *qemu) runHooks(hooks []func() error) error {
 	// Run any post start hooks.
 	if len(hooks) > 0 {
 		for _, hook := range hooks {
@@ -1313,24 +1313,24 @@ func (vm *qemu) runHooks(hooks []func() error) error {
 	return nil
 }
 
-func (vm *qemu) monitorPath() string {
-	return filepath.Join(vm.LogPath(), "qemu.monitor")
+func (d *qemu) monitorPath() string {
+	return filepath.Join(d.LogPath(), "qemu.monitor")
 }
 
-func (vm *qemu) nvramPath() string {
-	return filepath.Join(vm.Path(), "qemu.nvram")
+func (d *qemu) nvramPath() string {
+	return filepath.Join(d.Path(), "qemu.nvram")
 }
 
-func (vm *qemu) spicePath() string {
-	return filepath.Join(vm.LogPath(), "qemu.spice")
+func (d *qemu) spicePath() string {
+	return filepath.Join(d.LogPath(), "qemu.spice")
 }
 
 // generateConfigShare generates the config share directory that will be exported to the VM via
 // a 9P share. Due to the unknown size of templates inside the images this directory is created
 // inside the VM's config volume so that it can be restricted by quota.
 // Requires the instance be mounted before calling this function.
-func (vm *qemu) generateConfigShare() error {
-	configDrivePath := filepath.Join(vm.Path(), "config")
+func (d *qemu) generateConfigShare() error {
+	configDrivePath := filepath.Join(d.Path(), "config")
 
 	// Create config drive dir.
 	os.RemoveAll(configDrivePath)
@@ -1345,8 +1345,8 @@ func (vm *qemu) generateConfigShare() error {
 		return err
 	}
 
-	if vm.ExpandedConfig()["user.user-data"] != "" {
-		err = ioutil.WriteFile(filepath.Join(configDrivePath, "cloud-init", "user-data"), []byte(vm.ExpandedConfig()["user.user-data"]), 0400)
+	if d.ExpandedConfig()["user.user-data"] != "" {
+		err = ioutil.WriteFile(filepath.Join(configDrivePath, "cloud-init", "user-data"), []byte(d.ExpandedConfig()["user.user-data"]), 0400)
 		if err != nil {
 			return err
 		}
@@ -1357,8 +1357,8 @@ func (vm *qemu) generateConfigShare() error {
 		}
 	}
 
-	if vm.ExpandedConfig()["user.vendor-data"] != "" {
-		err = ioutil.WriteFile(filepath.Join(configDrivePath, "cloud-init", "vendor-data"), []byte(vm.ExpandedConfig()["user.vendor-data"]), 0400)
+	if d.ExpandedConfig()["user.vendor-data"] != "" {
+		err = ioutil.WriteFile(filepath.Join(configDrivePath, "cloud-init", "vendor-data"), []byte(d.ExpandedConfig()["user.vendor-data"]), 0400)
 		if err != nil {
 			return err
 		}
@@ -1369,8 +1369,8 @@ func (vm *qemu) generateConfigShare() error {
 		}
 	}
 
-	if vm.ExpandedConfig()["user.network-config"] != "" {
-		err = ioutil.WriteFile(filepath.Join(configDrivePath, "cloud-init", "network-config"), []byte(vm.ExpandedConfig()["user.network-config"]), 0400)
+	if d.ExpandedConfig()["user.network-config"] != "" {
+		err = ioutil.WriteFile(filepath.Join(configDrivePath, "cloud-init", "network-config"), []byte(d.ExpandedConfig()["user.network-config"]), 0400)
 		if err != nil {
 			return err
 		}
@@ -1379,7 +1379,7 @@ func (vm *qemu) generateConfigShare() error {
 	}
 
 	// Append any user.meta-data to our predefined meta-data config.
-	err = ioutil.WriteFile(filepath.Join(configDrivePath, "cloud-init", "meta-data"), []byte(fmt.Sprintf("instance-id: %s\nlocal-hostname: %s\n%s\n", vm.Name(), vm.Name(), vm.ExpandedConfig()["user.meta-data"])), 0400)
+	err = ioutil.WriteFile(filepath.Join(configDrivePath, "cloud-init", "meta-data"), []byte(fmt.Sprintf("instance-id: %s\nlocal-hostname: %s\n%s\n", d.Name(), d.Name(), d.ExpandedConfig()["user.meta-data"])), 0400)
 	if err != nil {
 		return err
 	}
@@ -1406,7 +1406,7 @@ func (vm *qemu) generateConfigShare() error {
 		}
 	}
 
-	agentCert, agentKey, clientCert, _, err := vm.generateAgentCert()
+	agentCert, agentKey, clientCert, _, err := d.generateAgentCert()
 	if err != nil {
 		return err
 	}
@@ -1554,7 +1554,7 @@ echo "To start it now, unmount this filesystem and run: systemctl start lxd-agen
 	}
 
 	// Instance data for devlxd.
-	err = vm.writeInstanceData()
+	err = d.writeInstanceData()
 	if err != nil {
 		return err
 	}
@@ -1567,27 +1567,27 @@ echo "To start it now, unmount this filesystem and run: systemctl start lxd-agen
 
 	// Template anything that needs templating.
 	key := "volatile.apply_template"
-	if vm.localConfig[key] != "" {
+	if d.localConfig[key] != "" {
 		// Run any template that needs running.
-		err = vm.templateApplyNow(vm.localConfig[key], filepath.Join(configDrivePath, "files"))
+		err = d.templateApplyNow(d.localConfig[key], filepath.Join(configDrivePath, "files"))
 		if err != nil {
 			return err
 		}
 
 		// Remove the volatile key from the DB.
-		err := vm.state.Cluster.DeleteInstanceConfigKey(vm.id, key)
+		err := d.state.Cluster.DeleteInstanceConfigKey(d.id, key)
 		if err != nil {
 			return err
 		}
 	}
 
-	err = vm.templateApplyNow("start", filepath.Join(configDrivePath, "files"))
+	err = d.templateApplyNow("start", filepath.Join(configDrivePath, "files"))
 	if err != nil {
 		return err
 	}
 
 	// Copy the template metadata itself too.
-	metaPath := filepath.Join(vm.Path(), "metadata.yaml")
+	metaPath := filepath.Join(d.Path(), "metadata.yaml")
 	if shared.PathExists(metaPath) {
 		err = shared.FileCopy(metaPath, filepath.Join(configDrivePath, "files/metadata.yaml"))
 		if err != nil {
@@ -1598,9 +1598,9 @@ echo "To start it now, unmount this filesystem and run: systemctl start lxd-agen
 	return nil
 }
 
-func (vm *qemu) templateApplyNow(trigger string, path string) error {
+func (d *qemu) templateApplyNow(trigger string, path string) error {
 	// If there's no metadata, just return.
-	fname := filepath.Join(vm.Path(), "metadata.yaml")
+	fname := filepath.Join(d.Path(), "metadata.yaml")
 	if !shared.PathExists(fname) {
 		return nil
 	}
@@ -1618,9 +1618,9 @@ func (vm *qemu) templateApplyNow(trigger string, path string) error {
 	}
 
 	// Figure out the instance architecture.
-	arch, err := osarch.ArchitectureName(vm.architecture)
+	arch, err := osarch.ArchitectureName(d.architecture)
 	if err != nil {
-		arch, err = osarch.ArchitectureName(vm.state.OS.Architectures[0])
+		arch, err = osarch.ArchitectureName(d.state.OS.Architectures[0])
 		if err != nil {
 			return errors.Wrap(err, "Failed to detect system architecture")
 		}
@@ -1628,11 +1628,11 @@ func (vm *qemu) templateApplyNow(trigger string, path string) error {
 
 	// Generate the container metadata.
 	instanceMeta := make(map[string]string)
-	instanceMeta["name"] = vm.name
+	instanceMeta["name"] = d.name
 	instanceMeta["type"] = "virtual-machine"
 	instanceMeta["architecture"] = arch
 
-	if vm.ephemeral {
+	if d.ephemeral {
 		instanceMeta["ephemeral"] = "true"
 	} else {
 		instanceMeta["ephemeral"] = "false"
@@ -1667,20 +1667,20 @@ func (vm *qemu) templateApplyNow(trigger string, path string) error {
 			defer w.Close()
 
 			// Read the template.
-			tplString, err := ioutil.ReadFile(filepath.Join(vm.TemplatesPath(), tpl.Template))
+			tplString, err := ioutil.ReadFile(filepath.Join(d.TemplatesPath(), tpl.Template))
 			if err != nil {
 				return errors.Wrap(err, "Failed to read template file")
 			}
 
 			// Restrict filesystem access to within the container's rootfs.
-			tplSet := pongo2.NewSet(fmt.Sprintf("%s-%s", vm.name, tpl.Template), pongoTemplate.ChrootLoader{Path: vm.TemplatesPath()})
+			tplSet := pongo2.NewSet(fmt.Sprintf("%s-%s", d.name, tpl.Template), pongoTemplate.ChrootLoader{Path: d.TemplatesPath()})
 			tplRender, err := tplSet.FromString("{% autoescape off %}" + string(tplString) + "{% endautoescape %}")
 			if err != nil {
 				return errors.Wrap(err, "Failed to render template")
 			}
 
 			configGet := func(confKey, confDefault *pongo2.Value) *pongo2.Value {
-				val, ok := vm.expandedConfig[confKey.String()]
+				val, ok := d.expandedConfig[confKey.String()]
 				if !ok {
 					return confDefault
 				}
@@ -1693,8 +1693,8 @@ func (vm *qemu) templateApplyNow(trigger string, path string) error {
 				"path":       tplPath,
 				"instance":   instanceMeta,
 				"container":  instanceMeta, // FIXME: remove once most images have moved away.
-				"config":     vm.expandedConfig,
-				"devices":    vm.expandedDevices,
+				"config":     d.expandedConfig,
+				"devices":    d.expandedDevices,
 				"properties": tpl.Properties,
 				"config_get": configGet}, w)
 
@@ -1710,7 +1710,7 @@ func (vm *qemu) templateApplyNow(trigger string, path string) error {
 
 // deviceBootPriorities returns a map keyed on device name containing the boot index to use.
 // Qemu tries to boot devices in order of boot index (lowest first).
-func (vm *qemu) deviceBootPriorities() (map[string]int, error) {
+func (d *qemu) deviceBootPriorities() (map[string]int, error) {
 	type devicePrios struct {
 		Name     string
 		BootPrio uint32
@@ -1718,7 +1718,7 @@ func (vm *qemu) deviceBootPriorities() (map[string]int, error) {
 
 	devices := []devicePrios{}
 
-	for devName, devConf := range vm.expandedDevices {
+	for devName, devConf := range d.expandedDevices {
 		if devConf["type"] != "disk" && devConf["type"] != "nic" {
 			continue
 		}
@@ -1749,33 +1749,33 @@ func (vm *qemu) deviceBootPriorities() (map[string]int, error) {
 
 // generateQemuConfigFile writes the qemu config file and returns its location.
 // It writes the config file inside the VM's log path.
-func (vm *qemu) generateQemuConfigFile(mountInfo *storagePools.MountInfo, busName string, devConfs []*deviceConfig.RunConfig, fdFiles *[]string) (string, error) {
+func (d *qemu) generateQemuConfigFile(mountInfo *storagePools.MountInfo, busName string, devConfs []*deviceConfig.RunConfig, fdFiles *[]string) (string, error) {
 	var sb *strings.Builder = &strings.Builder{}
 
 	err := qemuBase.Execute(sb, map[string]interface{}{
-		"architecture": vm.architectureName,
-		"spicePath":    vm.spicePath(),
+		"architecture": d.architectureName,
+		"spicePath":    d.spicePath(),
 	})
 	if err != nil {
 		return "", err
 	}
 
-	err = vm.addCPUMemoryConfig(sb)
+	err = d.addCPUMemoryConfig(sb)
 	if err != nil {
 		return "", err
 	}
 
 	err = qemuDriveFirmware.Execute(sb, map[string]interface{}{
-		"architecture": vm.architectureName,
-		"roPath":       filepath.Join(vm.ovmfPath(), "OVMF_CODE.fd"),
-		"nvramPath":    vm.nvramPath(),
+		"architecture": d.architectureName,
+		"roPath":       filepath.Join(d.ovmfPath(), "OVMF_CODE.fd"),
+		"nvramPath":    d.nvramPath(),
 	})
 	if err != nil {
 		return "", err
 	}
 
 	err = qemuControlSocket.Execute(sb, map[string]interface{}{
-		"path": vm.monitorPath(),
+		"path": d.monitorPath(),
 	})
 	if err != nil {
 		return "", err
@@ -1842,7 +1842,7 @@ func (vm *qemu) generateQemuConfigFile(mountInfo *storagePools.MountInfo, busNam
 		"devAddr":       devAddr,
 		"multifunction": multi,
 
-		"vsockID": vm.vsockID(),
+		"vsockID": d.vsockID(),
 	})
 	if err != nil {
 		return "", err
@@ -1863,7 +1863,7 @@ func (vm *qemu) generateQemuConfigFile(mountInfo *storagePools.MountInfo, busNam
 	}
 
 	// s390x doesn't really have USB.
-	if vm.architecture != osarch.ARCH_64BIT_S390_BIG_ENDIAN {
+	if d.architecture != osarch.ARCH_64BIT_S390_BIG_ENDIAN {
 		devBus, devAddr, multi = bus.allocate(busFunctionGroupGeneric)
 		err = qemuUSB.Execute(sb, map[string]interface{}{
 			"bus":           bus.name,
@@ -1895,13 +1895,13 @@ func (vm *qemu) generateQemuConfigFile(mountInfo *storagePools.MountInfo, busNam
 		"multifunction": multi,
 		"protocol":      "9p",
 
-		"path": filepath.Join(vm.Path(), "config"),
+		"path": filepath.Join(d.Path(), "config"),
 	})
 	if err != nil {
 		return "", err
 	}
 
-	sockPath := filepath.Join(vm.LogPath(), "virtio-fs.config.sock")
+	sockPath := filepath.Join(d.LogPath(), "virtio-fs.config.sock")
 	if shared.PathExists(sockPath) {
 		devBus, devAddr, multi = bus.allocate(busFunctionGroup9p)
 		err = qemuDriveConfig.Execute(sb, map[string]interface{}{
@@ -1925,14 +1925,14 @@ func (vm *qemu) generateQemuConfigFile(mountInfo *storagePools.MountInfo, busNam
 		"devAddr":       devAddr,
 		"multifunction": multi,
 
-		"architecture": vm.architectureName,
+		"architecture": d.architectureName,
 	})
 	if err != nil {
 		return "", err
 	}
 
 	// Dynamic devices.
-	bootIndexes, err := vm.deviceBootPriorities()
+	bootIndexes, err := d.deviceBootPriorities()
 	if err != nil {
 		return "", errors.Wrap(err, "Error calculating boot indexes")
 	}
@@ -1949,11 +1949,11 @@ func (vm *qemu) generateQemuConfigFile(mountInfo *storagePools.MountInfo, busNam
 		if len(runConf.Mounts) > 0 {
 			for _, drive := range runConf.Mounts {
 				if drive.TargetPath == "/" {
-					err = vm.addRootDriveConfig(sb, mountInfo, bootIndexes, drive)
+					err = d.addRootDriveConfig(sb, mountInfo, bootIndexes, drive)
 				} else if drive.FSType == "9p" {
-					err = vm.addDriveDirConfig(sb, bus, fdFiles, &agentMounts, drive)
+					err = d.addDriveDirConfig(sb, bus, fdFiles, &agentMounts, drive)
 				} else {
-					err = vm.addDriveConfig(sb, bootIndexes, drive)
+					err = d.addDriveConfig(sb, bootIndexes, drive)
 				}
 				if err != nil {
 					return "", err
@@ -1963,7 +1963,7 @@ func (vm *qemu) generateQemuConfigFile(mountInfo *storagePools.MountInfo, busNam
 
 		// Add network device.
 		if len(runConf.NetworkInterface) > 0 {
-			err = vm.addNetDevConfig(sb, bus, bootIndexes, runConf.NetworkInterface, fdFiles)
+			err = d.addNetDevConfig(sb, bus, bootIndexes, runConf.NetworkInterface, fdFiles)
 			if err != nil {
 				return "", err
 			}
@@ -1971,7 +1971,7 @@ func (vm *qemu) generateQemuConfigFile(mountInfo *storagePools.MountInfo, busNam
 
 		// Add GPU device.
 		if len(runConf.GPUDevice) > 0 {
-			err = vm.addGPUDevConfig(sb, bus, runConf.GPUDevice)
+			err = d.addGPUDevConfig(sb, bus, runConf.GPUDevice)
 			if err != nil {
 				return "", err
 			}
@@ -1979,7 +1979,7 @@ func (vm *qemu) generateQemuConfigFile(mountInfo *storagePools.MountInfo, busNam
 
 		// Add USB device.
 		if len(runConf.USBDevice) > 0 {
-			err = vm.addUSBDeviceConfig(sb, bus, runConf.USBDevice)
+			err = d.addUSBDeviceConfig(sb, bus, runConf.USBDevice)
 			if err != nil {
 				return "", err
 			}
@@ -1987,7 +1987,7 @@ func (vm *qemu) generateQemuConfigFile(mountInfo *storagePools.MountInfo, busNam
 
 		// Add TPM device.
 		if len(runConf.TPMDevice) > 0 {
-			err = vm.addTPMDeviceConfig(sb, runConf.TPMDevice)
+			err = d.addTPMDeviceConfig(sb, runConf.TPMDevice)
 			if err != nil {
 				return "", err
 			}
@@ -2001,27 +2001,27 @@ func (vm *qemu) generateQemuConfigFile(mountInfo *storagePools.MountInfo, busNam
 		return "", errors.Wrapf(err, "Failed marshalling agent mounts to JSON")
 	}
 
-	agentMountFile := filepath.Join(vm.Path(), "config", "agent-mounts.json")
+	agentMountFile := filepath.Join(d.Path(), "config", "agent-mounts.json")
 	err = ioutil.WriteFile(agentMountFile, agentMountJSON, 0400)
 	if err != nil {
 		return "", errors.Wrapf(err, "Failed writing agent mounts file")
 	}
 
 	// Write the config file to disk.
-	configPath := filepath.Join(vm.LogPath(), "qemu.conf")
+	configPath := filepath.Join(d.LogPath(), "qemu.conf")
 	return configPath, ioutil.WriteFile(configPath, []byte(sb.String()), 0640)
 }
 
 // addCPUMemoryConfig adds the qemu config required for setting the number of virtualised CPUs and memory.
-func (vm *qemu) addCPUMemoryConfig(sb *strings.Builder) error {
+func (d *qemu) addCPUMemoryConfig(sb *strings.Builder) error {
 	// Default to a single core.
-	cpus := vm.expandedConfig["limits.cpu"]
+	cpus := d.expandedConfig["limits.cpu"]
 	if cpus == "" {
 		cpus = "1"
 	}
 
 	ctx := map[string]interface{}{
-		"architecture": vm.architectureName,
+		"architecture": d.architectureName,
 	}
 
 	cpuCount, err := strconv.Atoi(cpus)
@@ -2035,7 +2035,7 @@ func (vm *qemu) addCPUMemoryConfig(sb *strings.Builder) error {
 		hostNodes = []uint64{0}
 	} else {
 		// Expand to a set of CPU identifiers and get the pinning map.
-		nrSockets, nrCores, nrThreads, vcpus, numaNodes, err := vm.cpuTopology(cpus)
+		nrSockets, nrCores, nrThreads, vcpus, numaNodes, err := d.cpuTopology(cpus)
 		if err != nil {
 			return err
 		}
@@ -2087,7 +2087,7 @@ func (vm *qemu) addCPUMemoryConfig(sb *strings.Builder) error {
 	}
 
 	// Configure memory limit.
-	memSize := vm.expandedConfig["limits.memory"]
+	memSize := d.expandedConfig["limits.memory"]
 	if memSize == "" {
 		memSize = qemuDefaultMemSize // Default if no memory limit specified.
 	}
@@ -2098,7 +2098,7 @@ func (vm *qemu) addCPUMemoryConfig(sb *strings.Builder) error {
 	}
 
 	ctx["hugepages"] = ""
-	if shared.IsTrue(vm.expandedConfig["limits.memory.hugepages"]) {
+	if shared.IsTrue(d.expandedConfig["limits.memory.hugepages"]) {
 		hugetlb, err := util.HugepagesPath()
 		if err != nil {
 			return err
@@ -2114,7 +2114,7 @@ func (vm *qemu) addCPUMemoryConfig(sb *strings.Builder) error {
 	ctx["memory"] = nodeMemory
 
 	err = qemuMemory.Execute(sb, map[string]interface{}{
-		"architecture": vm.architectureName,
+		"architecture": d.architectureName,
 		"memSizeBytes": memSizeBytes,
 	})
 	if err != nil {
@@ -2127,19 +2127,19 @@ func (vm *qemu) addCPUMemoryConfig(sb *strings.Builder) error {
 
 // addFileDescriptor adds a file path to the list of files to open and pass file descriptor to qemu.
 // Returns the file descriptor number that qemu will receive.
-func (vm *qemu) addFileDescriptor(fdFiles *[]string, filePath string) int {
+func (d *qemu) addFileDescriptor(fdFiles *[]string, filePath string) int {
 	// Append the tap device file path to the list of files to be opened and passed to qemu.
 	*fdFiles = append(*fdFiles, filePath)
 	return 2 + len(*fdFiles) // Use 2+fdFiles count, as first user file descriptor is 3.
 }
 
 // addRootDriveConfig adds the qemu config required for adding the root drive.
-func (vm *qemu) addRootDriveConfig(sb *strings.Builder, mountInfo *storagePools.MountInfo, bootIndexes map[string]int, rootDriveConf deviceConfig.MountEntryItem) error {
+func (d *qemu) addRootDriveConfig(sb *strings.Builder, mountInfo *storagePools.MountInfo, bootIndexes map[string]int, rootDriveConf deviceConfig.MountEntryItem) error {
 	if rootDriveConf.TargetPath != "/" {
 		return fmt.Errorf("Non-root drive config supplied")
 	}
 
-	pool, err := vm.getStoragePool()
+	pool, err := d.getStoragePool()
 	if err != nil {
 		return err
 	}
@@ -2162,11 +2162,11 @@ func (vm *qemu) addRootDriveConfig(sb *strings.Builder, mountInfo *storagePools.
 		driveConf.Opts = append(driveConf.Opts, qemuUnsafeIO)
 	}
 
-	return vm.addDriveConfig(sb, bootIndexes, driveConf)
+	return d.addDriveConfig(sb, bootIndexes, driveConf)
 }
 
 // addDriveDirConfig adds the qemu config required for adding a supplementary drive directory share.
-func (vm *qemu) addDriveDirConfig(sb *strings.Builder, bus *qemuBus, fdFiles *[]string, agentMounts *[]instancetype.VMAgentMount, driveConf deviceConfig.MountEntryItem) error {
+func (d *qemu) addDriveDirConfig(sb *strings.Builder, bus *qemuBus, fdFiles *[]string, agentMounts *[]instancetype.VMAgentMount, driveConf deviceConfig.MountEntryItem) error {
 	mountTag := fmt.Sprintf("lxd_%s", driveConf.DevName)
 
 	agentMount := instancetype.VMAgentMount{
@@ -2189,7 +2189,7 @@ func (vm *qemu) addDriveDirConfig(sb *strings.Builder, bus *qemuBus, fdFiles *[]
 	// Record the 9p mount for the agent.
 	*agentMounts = append(*agentMounts, agentMount)
 
-	sockPath := filepath.Join(vm.Path(), fmt.Sprintf("%s.sock", driveConf.DevName))
+	sockPath := filepath.Join(d.Path(), fmt.Sprintf("%s.sock", driveConf.DevName))
 
 	if shared.PathExists(sockPath) {
 		devBus, devAddr, multi := bus.allocate(busFunctionGroup9p)
@@ -2230,7 +2230,7 @@ func (vm *qemu) addDriveDirConfig(sb *strings.Builder, bus *qemuBus, fdFiles *[]
 	}
 
 	// Only use proxy for writable shares.
-	proxyFD := vm.addFileDescriptor(fdFiles, driveConf.DevPath)
+	proxyFD := d.addFileDescriptor(fdFiles, driveConf.DevPath)
 	return qemuDriveDir.Execute(sb, map[string]interface{}{
 		"bus":           bus.name,
 		"devBus":        devBus,
@@ -2246,7 +2246,7 @@ func (vm *qemu) addDriveDirConfig(sb *strings.Builder, bus *qemuBus, fdFiles *[]
 }
 
 // addDriveConfig adds the qemu config required for adding a supplementary drive.
-func (vm *qemu) addDriveConfig(sb *strings.Builder, bootIndexes map[string]int, driveConf deviceConfig.MountEntryItem) error {
+func (d *qemu) addDriveConfig(sb *strings.Builder, bootIndexes map[string]int, driveConf deviceConfig.MountEntryItem) error {
 	// Use native kernel async IO and O_DIRECT by default.
 	aioMode := "native"
 	cacheMode := "none" // Bypass host cache, use O_DIRECT semantics.
@@ -2277,7 +2277,7 @@ func (vm *qemu) addDriveConfig(sb *strings.Builder, bootIndexes map[string]int,
 	}
 
 	if !strings.HasPrefix(driveConf.DevPath, "rbd:") {
-		vm.devPaths = append(vm.devPaths, driveConf.DevPath)
+		d.devPaths = append(d.devPaths, driveConf.DevPath)
 	}
 
 	return qemuDrive.Execute(sb, map[string]interface{}{
@@ -2291,7 +2291,7 @@ func (vm *qemu) addDriveConfig(sb *strings.Builder, bootIndexes map[string]int,
 }
 
 // addNetDevConfig adds the qemu config required for adding a network device.
-func (vm *qemu) addNetDevConfig(sb *strings.Builder, bus *qemuBus, bootIndexes map[string]int, nicConfig []deviceConfig.RunConfigItem, fdFiles *[]string) error {
+func (d *qemu) addNetDevConfig(sb *strings.Builder, bus *qemuBus, bootIndexes map[string]int, nicConfig []deviceConfig.RunConfigItem, fdFiles *[]string) error {
 	var devName, nicName, devHwaddr, pciSlotName string
 	for _, nicItem := range nicConfig {
 		if nicItem.Key == "devName" {
@@ -2327,7 +2327,7 @@ func (vm *qemu) addNetDevConfig(sb *strings.Builder, bus *qemuBus, bootIndexes m
 		}
 
 		// Append the tap device file path to the list of files to be opened and passed to qemu.
-		tplFields["tapFD"] = vm.addFileDescriptor(fdFiles, fmt.Sprintf("/dev/tap%d", ifindex))
+		tplFields["tapFD"] = d.addFileDescriptor(fdFiles, fmt.Sprintf("/dev/tap%d", ifindex))
 		tpl = qemuNetDevTapFD
 	} else if shared.PathExists(fmt.Sprintf("/sys/class/net/%s/tun_flags", nicName)) {
 		// Detect TAP (via TUN driver) device.
@@ -2351,7 +2351,7 @@ func (vm *qemu) addNetDevConfig(sb *strings.Builder, bus *qemuBus, bootIndexes m
 }
 
 // addGPUDevConfig adds the qemu config required for adding a GPU device.
-func (vm *qemu) addGPUDevConfig(sb *strings.Builder, bus *qemuBus, gpuConfig []deviceConfig.RunConfigItem) error {
+func (d *qemu) addGPUDevConfig(sb *strings.Builder, bus *qemuBus, gpuConfig []deviceConfig.RunConfigItem) error {
 	var devName, pciSlotName, vgpu string
 	for _, gpuItem := range gpuConfig {
 		if gpuItem.Key == "devName" {
@@ -2364,7 +2364,7 @@ func (vm *qemu) addGPUDevConfig(sb *strings.Builder, bus *qemuBus, gpuConfig []d
 	}
 
 	// Pass-through VGA mode if enabled on the host device and architecture is x86_64.
-	vgaMode := shared.PathExists(filepath.Join("/sys/bus/pci/devices", pciSlotName, "boot_vga")) && vm.architecture == osarch.ARCH_64BIT_INTEL_X86
+	vgaMode := shared.PathExists(filepath.Join("/sys/bus/pci/devices", pciSlotName, "boot_vga")) && d.architecture == osarch.ARCH_64BIT_INTEL_X86
 
 	devBus, devAddr, multi := bus.allocate(fmt.Sprintf("lxd_%s", devName))
 	tplFields := map[string]interface{}{
@@ -2439,7 +2439,7 @@ func (vm *qemu) addGPUDevConfig(sb *strings.Builder, bus *qemuBus, gpuConfig []d
 	return nil
 }
 
-func (vm *qemu) addUSBDeviceConfig(sb *strings.Builder, bus *qemuBus, usbConfig []deviceConfig.RunConfigItem) error {
+func (d *qemu) addUSBDeviceConfig(sb *strings.Builder, bus *qemuBus, usbConfig []deviceConfig.RunConfigItem) error {
 	var devName, hostDevice string
 
 	for _, usbItem := range usbConfig {
@@ -2461,12 +2461,12 @@ func (vm *qemu) addUSBDeviceConfig(sb *strings.Builder, bus *qemuBus, usbConfig
 	}
 
 	// Add path to devPaths. This way, the path will be included in the apparmor profile.
-	vm.devPaths = append(vm.devPaths, hostDevice)
+	d.devPaths = append(d.devPaths, hostDevice)
 
 	return nil
 }
 
-func (vm *qemu) addTPMDeviceConfig(sb *strings.Builder, tpmConfig []deviceConfig.RunConfigItem) error {
+func (d *qemu) addTPMDeviceConfig(sb *strings.Builder, tpmConfig []deviceConfig.RunConfigItem) error {
 	var devName, socketPath string
 
 	for _, tpmItem := range tpmConfig {
@@ -2491,13 +2491,13 @@ func (vm *qemu) addTPMDeviceConfig(sb *strings.Builder, tpmConfig []deviceConfig
 }
 
 // pidFilePath returns the path where the qemu process should write its PID.
-func (vm *qemu) pidFilePath() string {
-	return filepath.Join(vm.LogPath(), "qemu.pid")
+func (d *qemu) pidFilePath() string {
+	return filepath.Join(d.LogPath(), "qemu.pid")
 }
 
 // pid gets the PID of the running qemu process.
-func (vm *qemu) pid() (int, error) {
-	pidStr, err := ioutil.ReadFile(vm.pidFilePath())
+func (d *qemu) pid() (int, error) {
+	pidStr, err := ioutil.ReadFile(d.pidFilePath())
 	if os.IsNotExist(err) {
 		return 0, nil
 	}
@@ -2515,9 +2515,9 @@ func (vm *qemu) pid() (int, error) {
 }
 
 // Stop the VM.
-func (vm *qemu) Stop(stateful bool) error {
+func (d *qemu) Stop(stateful bool) error {
 	// Check that we're not already stopped.
-	if !vm.IsRunning() {
+	if !d.IsRunning() {
 		return fmt.Errorf("The instance is already stopped")
 	}
 
@@ -2527,13 +2527,13 @@ func (vm *qemu) Stop(stateful bool) error {
 	}
 
 	// Setup a new operation.
-	op, err := operationlock.Create(vm.id, "stop", false, true)
+	op, err := operationlock.Create(d.id, "stop", false, true)
 	if err != nil {
 		return err
 	}
 
 	// Connect to the monitor.
-	monitor, err := qmp.Connect(vm.monitorPath(), qemuSerialChardevName, vm.getMonitorEventHandler())
+	monitor, err := qmp.Connect(d.monitorPath(), qemuSerialChardevName, d.getMonitorEventHandler())
 	if err != nil {
 		// If we fail to connect, it's most likely because the VM is already off.
 		op.Done(nil)
@@ -2569,18 +2569,18 @@ func (vm *qemu) Stop(stateful bool) error {
 
 	// Wait for onStop.
 	err = op.Wait()
-	if err != nil && vm.IsRunning() {
+	if err != nil && d.IsRunning() {
 		return err
 	}
 
-	vm.state.Events.SendLifecycle(vm.project, "virtual-machine-stopped", fmt.Sprintf("/1.0/virtual-machines/%s", vm.name), nil)
+	d.state.Events.SendLifecycle(d.project, "virtual-machine-stopped", fmt.Sprintf("/1.0/virtual-machines/%s", d.name), nil)
 	return nil
 }
 
 // Unfreeze restores the instance to running.
-func (vm *qemu) Unfreeze() error {
+func (d *qemu) Unfreeze() error {
 	// Connect to the monitor.
-	monitor, err := qmp.Connect(vm.monitorPath(), qemuSerialChardevName, vm.getMonitorEventHandler())
+	monitor, err := qmp.Connect(d.monitorPath(), qemuSerialChardevName, d.getMonitorEventHandler())
 	if err != nil {
 		return err
 	}
@@ -2595,12 +2595,12 @@ func (vm *qemu) Unfreeze() error {
 }
 
 // IsPrivileged does not apply to virtual machines. Always returns false.
-func (vm *qemu) IsPrivileged() bool {
+func (d *qemu) IsPrivileged() bool {
 	return false
 }
 
 // Restore restores an instance snapshot.
-func (vm *qemu) Restore(source instance.Instance, stateful bool) error {
+func (d *qemu) Restore(source instance.Instance, stateful bool) error {
 	if stateful {
 		return fmt.Errorf("Stateful snapshots of VMs aren't supported yet")
 	}
@@ -2609,25 +2609,25 @@ func (vm *qemu) Restore(source instance.Instance, stateful bool) error {
 
 	// Stop the instance.
 	wasRunning := false
-	if vm.IsRunning() {
+	if d.IsRunning() {
 		wasRunning = true
 
-		ephemeral := vm.IsEphemeral()
+		ephemeral := d.IsEphemeral()
 		if ephemeral {
 			// Unset ephemeral flag.
 			args := db.InstanceArgs{
-				Architecture: vm.Architecture(),
-				Config:       vm.LocalConfig(),
-				Description:  vm.Description(),
-				Devices:      vm.LocalDevices(),
+				Architecture: d.Architecture(),
+				Config:       d.LocalConfig(),
+				Description:  d.Description(),
+				Devices:      d.LocalDevices(),
 				Ephemeral:    false,
-				Profiles:     vm.Profiles(),
-				Project:      vm.Project(),
-				Type:         vm.Type(),
-				Snapshot:     vm.IsSnapshot(),
+				Profiles:     d.Profiles(),
+				Project:      d.Project(),
+				Type:         d.Type(),
+				Snapshot:     d.IsSnapshot(),
 			}
 
-			err := vm.Update(args, false)
+			err := d.Update(args, false)
 			if err != nil {
 				return err
 			}
@@ -2635,42 +2635,42 @@ func (vm *qemu) Restore(source instance.Instance, stateful bool) error {
 			// On function return, set the flag back on.
 			defer func() {
 				args.Ephemeral = ephemeral
-				vm.Update(args, false)
+				d.Update(args, false)
 			}()
 		}
 
 		// This will unmount the instance storage.
-		err := vm.Stop(false)
+		err := d.Stop(false)
 		if err != nil {
 			return err
 		}
 	}
 
 	ctxMap = log.Ctx{
-		"project":   vm.project,
-		"name":      vm.name,
-		"created":   vm.creationDate,
-		"ephemeral": vm.ephemeral,
-		"used":      vm.lastUsedDate,
+		"project":   d.project,
+		"name":      d.name,
+		"created":   d.creationDate,
+		"ephemeral": d.ephemeral,
+		"used":      d.lastUsedDate,
 		"source":    source.Name()}
 
 	logger.Info("Restoring instance", ctxMap)
 
 	// Load the storage driver.
-	pool, err := storagePools.GetPoolByInstance(vm.state, vm)
+	pool, err := storagePools.GetPoolByInstance(d.state, d)
 	if err != nil {
 		return err
 	}
 
 	// Ensure that storage is mounted for backup.yaml updates.
-	_, err = pool.MountInstance(vm, nil)
+	_, err = pool.MountInstance(d, nil)
 	if err != nil {
 		return err
 	}
-	defer pool.UnmountInstance(vm, nil)
+	defer pool.UnmountInstance(d, nil)
 
 	// Restore the rootfs.
-	err = pool.RestoreInstanceSnapshot(vm, source, nil)
+	err = pool.RestoreInstanceSnapshot(d, source, nil)
 	if err != nil {
 		return err
 	}
@@ -2689,7 +2689,7 @@ func (vm *qemu) Restore(source instance.Instance, stateful bool) error {
 	}
 
 	// Don't pass as user-requested as there's no way to fix a bad config.
-	err = vm.Update(args, false)
+	err = d.Update(args, false)
 	if err != nil {
 		logger.Error("Failed restoring instance configuration", ctxMap)
 		return err
@@ -2697,17 +2697,17 @@ func (vm *qemu) Restore(source instance.Instance, stateful bool) error {
 
 	// The old backup file may be out of date (e.g. it doesn't have all the current snapshots of
 	// the instance listed); let's write a new one to be safe.
-	err = vm.UpdateBackupFile()
+	err = d.UpdateBackupFile()
 	if err != nil {
 		return err
 	}
 
-	vm.state.Events.SendLifecycle(vm.project, "virtual-machine-snapshot-restored", fmt.Sprintf("/1.0/virtual-machines/%s", vm.name), map[string]interface{}{"snapshot_name": vm.name})
+	d.state.Events.SendLifecycle(d.project, "virtual-machine-snapshot-restored", fmt.Sprintf("/1.0/virtual-machines/%s", d.name), map[string]interface{}{"snapshot_name": d.name})
 
 	// Restart the insance.
 	if wasRunning {
 		logger.Info("Restored instance", ctxMap)
-		return vm.Start(false)
+		return d.Start(false)
 	}
 
 	logger.Info("Restored instance", ctxMap)
@@ -2715,17 +2715,17 @@ func (vm *qemu) Restore(source instance.Instance, stateful bool) error {
 }
 
 // Snapshots returns a list of snapshots.
-func (vm *qemu) Snapshots() ([]instance.Instance, error) {
+func (d *qemu) Snapshots() ([]instance.Instance, error) {
 	var snaps []db.Instance
 
-	if vm.IsSnapshot() {
+	if d.IsSnapshot() {
 		return []instance.Instance{}, nil
 	}
 
 	// Get all the snapshots
-	err := vm.state.Cluster.Transaction(func(tx *db.ClusterTx) error {
+	err := d.state.Cluster.Transaction(func(tx *db.ClusterTx) error {
 		var err error
-		snaps, err = tx.GetInstanceSnapshotsWithName(vm.Project(), vm.name)
+		snaps, err = tx.GetInstanceSnapshotsWithName(d.Project(), d.name)
 		if err != nil {
 			return err
 		}
@@ -2737,7 +2737,7 @@ func (vm *qemu) Snapshots() ([]instance.Instance, error) {
 	}
 
 	// Build the snapshot list
-	snapshots, err := instance.LoadAllInternal(vm.state, snaps)
+	snapshots, err := instance.LoadAllInternal(d.state, snaps)
 	if err != nil {
 		return nil, err
 	}
@@ -2751,57 +2751,57 @@ func (vm *qemu) Snapshots() ([]instance.Instance, error) {
 }
 
 // Backups returns a list of backups.
-func (vm *qemu) Backups() ([]backup.InstanceBackup, error) {
+func (d *qemu) Backups() ([]backup.InstanceBackup, error) {
 	return []backup.InstanceBackup{}, nil
 }
 
 // Rename the instance.
-func (vm *qemu) Rename(newName string) error {
-	oldName := vm.Name()
+func (d *qemu) Rename(newName string) error {
+	oldName := d.Name()
 	ctxMap := log.Ctx{
-		"project":   vm.project,
-		"name":      vm.name,
-		"created":   vm.creationDate,
-		"ephemeral": vm.ephemeral,
-		"used":      vm.lastUsedDate,
+		"project":   d.project,
+		"name":      d.name,
+		"created":   d.creationDate,
+		"ephemeral": d.ephemeral,
+		"used":      d.lastUsedDate,
 		"newname":   newName}
 
 	logger.Info("Renaming instance", ctxMap)
 
 	// Sanity checks.
-	err := instance.ValidName(newName, vm.IsSnapshot())
+	err := instance.ValidName(newName, d.IsSnapshot())
 	if err != nil {
 		return err
 	}
 
-	if vm.IsRunning() {
+	if d.IsRunning() {
 		return fmt.Errorf("Renaming of running instance not allowed")
 	}
 
 	// Clean things up.
-	vm.cleanup()
+	d.cleanup()
 
-	pool, err := storagePools.GetPoolByInstance(vm.state, vm)
+	pool, err := storagePools.GetPoolByInstance(d.state, d)
 	if err != nil {
 		return errors.Wrap(err, "Load instance storage pool")
 	}
 
-	if vm.IsSnapshot() {
+	if d.IsSnapshot() {
 		_, newSnapName, _ := shared.InstanceGetParentAndSnapshotName(newName)
-		err = pool.RenameInstanceSnapshot(vm, newSnapName, nil)
+		err = pool.RenameInstanceSnapshot(d, newSnapName, nil)
 		if err != nil {
 			return errors.Wrap(err, "Rename instance snapshot")
 		}
 	} else {
-		err = pool.RenameInstance(vm, newName, nil)
+		err = pool.RenameInstance(d, newName, nil)
 		if err != nil {
 			return errors.Wrap(err, "Rename instance")
 		}
 	}
 
-	if !vm.IsSnapshot() {
+	if !d.IsSnapshot() {
 		// Rename all the instance snapshot database entries.
-		results, err := vm.state.Cluster.GetInstanceSnapshotsNames(vm.project, oldName)
+		results, err := d.state.Cluster.GetInstanceSnapshotsNames(d.project, oldName)
 		if err != nil {
 			logger.Error("Failed to get instance snapshots", ctxMap)
 			return err
@@ -2811,8 +2811,8 @@ func (vm *qemu) Rename(newName string) error {
 			// Rename the snapshot.
 			oldSnapName := strings.SplitN(sname, shared.SnapshotDelimiter, 2)[1]
 			baseSnapName := filepath.Base(sname)
-			err := vm.state.Cluster.Transaction(func(tx *db.ClusterTx) error {
-				return tx.RenameInstanceSnapshot(vm.project, oldName, oldSnapName, baseSnapName)
+			err := d.state.Cluster.Transaction(func(tx *db.ClusterTx) error {
+				return tx.RenameInstanceSnapshot(d.project, oldName, oldSnapName, baseSnapName)
 			})
 			if err != nil {
 				logger.Error("Failed renaming snapshot", ctxMap)
@@ -2822,14 +2822,14 @@ func (vm *qemu) Rename(newName string) error {
 	}
 
 	// Rename the instance database entry.
-	err = vm.state.Cluster.Transaction(func(tx *db.ClusterTx) error {
-		if vm.IsSnapshot() {
+	err = d.state.Cluster.Transaction(func(tx *db.ClusterTx) error {
+		if d.IsSnapshot() {
 			oldParts := strings.SplitN(oldName, shared.SnapshotDelimiter, 2)
 			newParts := strings.SplitN(newName, shared.SnapshotDelimiter, 2)
-			return tx.RenameInstanceSnapshot(vm.project, oldParts[0], oldParts[1], newParts[1])
+			return tx.RenameInstanceSnapshot(d.project, oldParts[0], oldParts[1], newParts[1])
 		}
 
-		return tx.RenameInstance(vm.project, oldName, newName)
+		return tx.RenameInstance(d.project, oldName, newName)
 	})
 	if err != nil {
 		logger.Error("Failed renaming instance", ctxMap)
@@ -2837,10 +2837,10 @@ func (vm *qemu) Rename(newName string) error {
 	}
 
 	// Rename the logging path.
-	newFullName := project.Instance(vm.Project(), vm.Name())
+	newFullName := project.Instance(d.Project(), d.Name())
 	os.RemoveAll(shared.LogPath(newFullName))
-	if shared.PathExists(vm.LogPath()) {
-		err := os.Rename(vm.LogPath(), shared.LogPath(newFullName))
+	if shared.PathExists(d.LogPath()) {
+		err := os.Rename(d.LogPath(), shared.LogPath(newFullName))
 		if err != nil {
 			logger.Error("Failed renaming instance", ctxMap)
 			return err
@@ -2848,8 +2848,8 @@ func (vm *qemu) Rename(newName string) error {
 	}
 
 	// Rename the MAAS entry.
-	if !vm.IsSnapshot() {
-		err = vm.maasRename(newName)
+	if !d.IsSnapshot() {
+		err = d.maasRename(newName)
 		if err != nil {
 			return err
 		}
@@ -2859,11 +2859,11 @@ func (vm *qemu) Rename(newName string) error {
 	defer revert.Fail()
 
 	// Set the new name in the struct.
-	vm.name = newName
-	revert.Add(func() { vm.name = oldName })
+	d.name = newName
+	revert.Add(func() { d.name = oldName })
 
 	// Rename the backups.
-	backups, err := vm.Backups()
+	backups, err := d.Backups()
 	if err != nil {
 		return err
 	}
@@ -2883,23 +2883,23 @@ func (vm *qemu) Rename(newName string) error {
 	}
 
 	// Update lease files.
-	network.UpdateDNSMasqStatic(vm.state, "")
+	network.UpdateDNSMasqStatic(d.state, "")
 
-	err = vm.UpdateBackupFile()
+	err = d.UpdateBackupFile()
 	if err != nil {
 		return err
 	}
 
 	logger.Info("Renamed instance", ctxMap)
 
-	if vm.IsSnapshot() {
-		vm.state.Events.SendLifecycle(vm.project, "virtual-machine-snapshot-renamed",
+	if d.IsSnapshot() {
+		d.state.Events.SendLifecycle(d.project, "virtual-machine-snapshot-renamed",
 			fmt.Sprintf("/1.0/virtual-machines/%s", oldName), map[string]interface{}{
 				"new_name":      newName,
 				"snapshot_name": oldName,
 			})
 	} else {
-		vm.state.Events.SendLifecycle(vm.project, "virtual-machine-renamed",
+		d.state.Events.SendLifecycle(d.project, "virtual-machine-renamed",
 			fmt.Sprintf("/1.0/virtual-machines/%s", oldName), map[string]interface{}{
 				"new_name": newName,
 			})
@@ -2910,7 +2910,7 @@ func (vm *qemu) Rename(newName string) error {
 }
 
 // Update the instance config.
-func (vm *qemu) Update(args db.InstanceArgs, userRequested bool) error {
+func (d *qemu) Update(args db.InstanceArgs, userRequested bool) error {
 	revert := revert.New()
 	defer revert.Fail()
 
@@ -2920,7 +2920,7 @@ func (vm *qemu) Update(args db.InstanceArgs, userRequested bool) error {
 	}
 
 	if args.Architecture == 0 {
-		args.Architecture = vm.architecture
+		args.Architecture = d.architecture
 	}
 
 	if args.Config == nil {
@@ -2937,20 +2937,20 @@ func (vm *qemu) Update(args db.InstanceArgs, userRequested bool) error {
 
 	if userRequested {
 		// Validate the new config.
-		err := instance.ValidConfig(vm.state.OS, args.Config, false, false)
+		err := instance.ValidConfig(d.state.OS, args.Config, false, false)
 		if err != nil {
 			return errors.Wrap(err, "Invalid config")
 		}
 
 		// Validate the new devices without using expanded devices validation (expensive checks disabled).
-		err = instance.ValidDevices(vm.state, vm.state.Cluster, vm.Project(), vm.Type(), args.Devices, false)
+		err = instance.ValidDevices(d.state, d.state.Cluster, d.Project(), d.Type(), args.Devices, false)
 		if err != nil {
 			return errors.Wrap(err, "Invalid devices")
 		}
 	}
 
 	// Validate the new profiles.
-	profiles, err := vm.state.Cluster.GetProfileNames(args.Project)
+	profiles, err := d.state.Cluster.GetProfileNames(args.Project)
 	if err != nil {
 		return errors.Wrap(err, "Failed to get profiles")
 	}
@@ -2977,80 +2977,80 @@ func (vm *qemu) Update(args db.InstanceArgs, userRequested bool) error {
 	}
 
 	// Get a copy of the old configuration.
-	oldDescription := vm.Description()
+	oldDescription := d.Description()
 	oldArchitecture := 0
-	err = shared.DeepCopy(&vm.architecture, &oldArchitecture)
+	err = shared.DeepCopy(&d.architecture, &oldArchitecture)
 	if err != nil {
 		return err
 	}
 
 	oldEphemeral := false
-	err = shared.DeepCopy(&vm.ephemeral, &oldEphemeral)
+	err = shared.DeepCopy(&d.ephemeral, &oldEphemeral)
 	if err != nil {
 		return err
 	}
 
 	oldExpandedDevices := deviceConfig.Devices{}
-	err = shared.DeepCopy(&vm.expandedDevices, &oldExpandedDevices)
+	err = shared.DeepCopy(&d.expandedDevices, &oldExpandedDevices)
 	if err != nil {
 		return err
 	}
 
 	oldExpandedConfig := map[string]string{}
-	err = shared.DeepCopy(&vm.expandedConfig, &oldExpandedConfig)
+	err = shared.DeepCopy(&d.expandedConfig, &oldExpandedConfig)
 	if err != nil {
 		return err
 	}
 
 	oldLocalDevices := deviceConfig.Devices{}
-	err = shared.DeepCopy(&vm.localDevices, &oldLocalDevices)
+	err = shared.DeepCopy(&d.localDevices, &oldLocalDevices)
 	if err != nil {
 		return err
 	}
 
 	oldLocalConfig := map[string]string{}
-	err = shared.DeepCopy(&vm.localConfig, &oldLocalConfig)
+	err = shared.DeepCopy(&d.localConfig, &oldLocalConfig)
 	if err != nil {
 		return err
 	}
 
 	oldProfiles := []string{}
-	err = shared.DeepCopy(&vm.profiles, &oldProfiles)
+	err = shared.DeepCopy(&d.profiles, &oldProfiles)
 	if err != nil {
 		return err
 	}
 
-	oldExpiryDate := vm.expiryDate
+	oldExpiryDate := d.expiryDate
 
 	// Revert local changes if update fails.
 	revert.Add(func() {
-		vm.description = oldDescription
-		vm.architecture = oldArchitecture
-		vm.ephemeral = oldEphemeral
-		vm.expandedConfig = oldExpandedConfig
-		vm.expandedDevices = oldExpandedDevices
-		vm.localConfig = oldLocalConfig
-		vm.localDevices = oldLocalDevices
-		vm.profiles = oldProfiles
-		vm.expiryDate = oldExpiryDate
+		d.description = oldDescription
+		d.architecture = oldArchitecture
+		d.ephemeral = oldEphemeral
+		d.expandedConfig = oldExpandedConfig
+		d.expandedDevices = oldExpandedDevices
+		d.localConfig = oldLocalConfig
+		d.localDevices = oldLocalDevices
+		d.profiles = oldProfiles
+		d.expiryDate = oldExpiryDate
 	})
 
 	// Apply the various changes to local vars.
-	vm.description = args.Description
-	vm.architecture = args.Architecture
-	vm.ephemeral = args.Ephemeral
-	vm.localConfig = args.Config
-	vm.localDevices = args.Devices
-	vm.profiles = args.Profiles
-	vm.expiryDate = args.ExpiryDate
+	d.description = args.Description
+	d.architecture = args.Architecture
+	d.ephemeral = args.Ephemeral
+	d.localConfig = args.Config
+	d.localDevices = args.Devices
+	d.profiles = args.Profiles
+	d.expiryDate = args.ExpiryDate
 
 	// Expand the config.
-	err = vm.expandConfig(nil)
+	err = d.expandConfig(nil)
 	if err != nil {
 		return errors.Wrap(err, "Expand config")
 	}
 
-	err = vm.expandDevices(nil)
+	err = d.expandDevices(nil)
 	if err != nil {
 		return errors.Wrap(err, "Expand devices")
 	}
@@ -3058,15 +3058,15 @@ func (vm *qemu) Update(args db.InstanceArgs, userRequested bool) error {
 	// Diff the configurations.
 	changedConfig := []string{}
 	for key := range oldExpandedConfig {
-		if oldExpandedConfig[key] != vm.expandedConfig[key] {
+		if oldExpandedConfig[key] != d.expandedConfig[key] {
 			if !shared.StringInSlice(key, changedConfig) {
 				changedConfig = append(changedConfig, key)
 			}
 		}
 	}
 
-	for key := range vm.expandedConfig {
-		if oldExpandedConfig[key] != vm.expandedConfig[key] {
+	for key := range d.expandedConfig {
+		if oldExpandedConfig[key] != d.expandedConfig[key] {
 			if !shared.StringInSlice(key, changedConfig) {
 				changedConfig = append(changedConfig, key)
 			}
@@ -3074,17 +3074,17 @@ func (vm *qemu) Update(args db.InstanceArgs, userRequested bool) error {
 	}
 
 	// Diff the devices.
-	removeDevices, addDevices, updateDevices, allUpdatedKeys := oldExpandedDevices.Update(vm.expandedDevices, func(oldDevice deviceConfig.Device, newDevice deviceConfig.Device) []string {
+	removeDevices, addDevices, updateDevices, allUpdatedKeys := oldExpandedDevices.Update(d.expandedDevices, func(oldDevice deviceConfig.Device, newDevice deviceConfig.Device) []string {
 		// This function needs to return a list of fields that are excluded from differences
 		// between oldDevice and newDevice. The result of this is that as long as the
 		// devices are otherwise identical except for the fields returned here, then the
 		// device is considered to be being "updated" rather than "added & removed".
-		oldNICType, err := nictype.NICType(vm.state, vm.Project(), newDevice)
+		oldNICType, err := nictype.NICType(d.state, d.Project(), newDevice)
 		if err != nil {
 			return []string{} // Cannot hot-update due to config error.
 		}
 
-		newNICType, err := nictype.NICType(vm.state, vm.Project(), oldDevice)
+		newNICType, err := nictype.NICType(d.state, d.Project(), oldDevice)
 		if err != nil {
 			return []string{} // Cannot hot-update due to config error.
 		}
@@ -3093,7 +3093,7 @@ func (vm *qemu) Update(args db.InstanceArgs, userRequested bool) error {
 			return []string{} // Device types aren't the same, so this cannot be an update.
 		}
 
-		dev, err := device.New(vm, vm.state, "", newDevice, nil, nil)
+		dev, err := device.New(d, d.state, "", newDevice, nil, nil)
 		if err != nil {
 			return []string{} // Couldn't create Device, so this cannot be an update.
 		}
@@ -3103,22 +3103,22 @@ func (vm *qemu) Update(args db.InstanceArgs, userRequested bool) error {
 
 	if userRequested {
 		// Do some validation of the config diff.
-		err = instance.ValidConfig(vm.state.OS, vm.expandedConfig, false, true)
+		err = instance.ValidConfig(d.state.OS, d.expandedConfig, false, true)
 		if err != nil {
 			return errors.Wrap(err, "Invalid expanded config")
 		}
 
 		// Do full expanded validation of the devices diff.
-		err = instance.ValidDevices(vm.state, vm.state.Cluster, vm.Project(), vm.Type(), vm.expandedDevices, true)
+		err = instance.ValidDevices(d.state, d.state.Cluster, d.Project(), d.Type(), d.expandedDevices, true)
 		if err != nil {
 			return errors.Wrap(err, "Invalid expanded devices")
 		}
 	}
 
-	isRunning := vm.IsRunning()
+	isRunning := d.IsRunning()
 
 	// Use the device interface to apply update changes.
-	err = vm.updateDevices(removeDevices, addDevices, updateDevices, oldExpandedDevices, isRunning, userRequested)
+	err = d.updateDevices(removeDevices, addDevices, updateDevices, oldExpandedDevices, isRunning, userRequested)
 	if err != nil {
 		return err
 	}
@@ -3136,10 +3136,10 @@ func (vm *qemu) Update(args db.InstanceArgs, userRequested bool) error {
 
 		// Apply live update for each key.
 		for _, key := range changedConfig {
-			value := vm.expandedConfig[key]
+			value := d.expandedConfig[key]
 
 			if key == "limits.memory" {
-				err = vm.updateMemoryLimit(value)
+				err = d.updateMemoryLimit(value)
 				if err != nil {
 					if err != nil {
 						return errors.Wrapf(err, "Failed updating memory limit")
@@ -3158,8 +3158,8 @@ func (vm *qemu) Update(args db.InstanceArgs, userRequested bool) error {
 		}
 	}
 
-	if !vm.IsSnapshot() && updateMAAS {
-		err = vm.maasUpdate(oldExpandedDevices.CloneNative())
+	if !d.IsSnapshot() && updateMAAS {
+		err = d.maasUpdate(oldExpandedDevices.CloneNative())
 		if err != nil {
 			return err
 		}
@@ -3167,39 +3167,39 @@ func (vm *qemu) Update(args db.InstanceArgs, userRequested bool) error {
 
 	if shared.StringInSlice("security.secureboot", changedConfig) {
 		// Re-generate the NVRAM.
-		err = vm.setupNvram()
+		err = d.setupNvram()
 		if err != nil {
 			return err
 		}
 	}
 
 	// Finally, apply the changes to the database.
-	err = vm.state.Cluster.Transaction(func(tx *db.ClusterTx) error {
+	err = d.state.Cluster.Transaction(func(tx *db.ClusterTx) error {
 		// Snapshots should update only their descriptions and expiry date.
-		if vm.IsSnapshot() {
-			return tx.UpdateInstanceSnapshot(vm.id, vm.description, vm.expiryDate)
+		if d.IsSnapshot() {
+			return tx.UpdateInstanceSnapshot(d.id, d.description, d.expiryDate)
 		}
 
-		object, err := tx.GetInstance(vm.project, vm.name)
+		object, err := tx.GetInstance(d.project, d.name)
 		if err != nil {
 			return err
 		}
 
-		object.Description = vm.description
-		object.Architecture = vm.architecture
-		object.Ephemeral = vm.ephemeral
-		object.ExpiryDate = vm.expiryDate
-		object.Config = vm.localConfig
-		object.Profiles = vm.profiles
-		object.Devices = vm.localDevices.CloneNative()
+		object.Description = d.description
+		object.Architecture = d.architecture
+		object.Ephemeral = d.ephemeral
+		object.ExpiryDate = d.expiryDate
+		object.Config = d.localConfig
+		object.Profiles = d.profiles
+		object.Devices = d.localDevices.CloneNative()
 
-		return tx.UpdateInstance(vm.project, vm.name, *object)
+		return tx.UpdateInstance(d.project, d.name, *object)
 	})
 	if err != nil {
 		return errors.Wrap(err, "Failed to update database")
 	}
 
-	err = vm.UpdateBackupFile()
+	err = d.UpdateBackupFile()
 	if err != nil && !os.IsNotExist(err) {
 		return errors.Wrap(err, "Failed to write backup file")
 	}
@@ -3208,7 +3208,7 @@ func (vm *qemu) Update(args db.InstanceArgs, userRequested bool) error {
 	revert.Success()
 
 	if isRunning {
-		err = vm.writeInstanceData()
+		err = d.writeInstanceData()
 		if err != nil {
 			return errors.Wrap(err, "Failed to write instance-data file")
 		}
@@ -3222,10 +3222,10 @@ func (vm *qemu) Update(args db.InstanceArgs, userRequested bool) error {
 			msg := map[string]string{
 				"key":       key,
 				"old_value": oldExpandedConfig[key],
-				"value":     vm.expandedConfig[key],
+				"value":     d.expandedConfig[key],
 			}
 
-			err = vm.devlxdEventSend("config", msg)
+			err = d.devlxdEventSend("config", msg)
 			if err != nil {
 				return err
 			}
@@ -3234,24 +3234,24 @@ func (vm *qemu) Update(args db.InstanceArgs, userRequested bool) error {
 
 	var endpoint string
 
-	if vm.IsSnapshot() {
-		parentName, snapName, _ := shared.InstanceGetParentAndSnapshotName(vm.name)
+	if d.IsSnapshot() {
+		parentName, snapName, _ := shared.InstanceGetParentAndSnapshotName(d.name)
 		endpoint = fmt.Sprintf("/1.0/virtual-machines/%s/snapshots/%s", parentName, snapName)
 	} else {
-		endpoint = fmt.Sprintf("/1.0/virtual-machines/%s", vm.name)
+		endpoint = fmt.Sprintf("/1.0/virtual-machines/%s", d.name)
 	}
 
-	vm.state.Events.SendLifecycle(vm.project, "virtual-machine-updated", endpoint, nil)
+	d.state.Events.SendLifecycle(d.project, "virtual-machine-updated", endpoint, nil)
 	return nil
 }
 
 // updateMemoryLimit live updates the VM's memory limit by reszing the balloon device.
-func (vm *qemu) updateMemoryLimit(newLimit string) error {
+func (d *qemu) updateMemoryLimit(newLimit string) error {
 	if newLimit == "" {
 		return nil
 	}
 
-	if shared.IsTrue(vm.expandedConfig["limits.memory.hugepages"]) {
+	if shared.IsTrue(d.expandedConfig["limits.memory.hugepages"]) {
 		return fmt.Errorf("Cannot live update memory limit when using huge pages")
 	}
 
@@ -3263,7 +3263,7 @@ func (vm *qemu) updateMemoryLimit(newLimit string) error {
 	newSizeMB := newSizeBytes / 1024 / 1024
 
 	// Connect to the monitor.
-	monitor, err := qmp.Connect(vm.monitorPath(), qemuSerialChardevName, vm.getMonitorEventHandler())
+	monitor, err := qmp.Connect(d.monitorPath(), qemuSerialChardevName, d.getMonitorEventHandler())
 	if err != nil {
 		return err // The VM isn't running as no monitor socket available.
 	}
@@ -3318,11 +3318,11 @@ func (vm *qemu) updateMemoryLimit(newLimit string) error {
 	return fmt.Errorf("Failed setting memory to %dMiB (currently %dMiB) as it was taking too long", newSizeMB, curSizeMB)
 }
 
-func (vm *qemu) updateDevices(removeDevices deviceConfig.Devices, addDevices deviceConfig.Devices, updateDevices deviceConfig.Devices, oldExpandedDevices deviceConfig.Devices, instanceRunning bool, userRequested bool) error {
+func (d *qemu) updateDevices(removeDevices deviceConfig.Devices, addDevices deviceConfig.Devices, updateDevices deviceConfig.Devices, oldExpandedDevices deviceConfig.Devices, instanceRunning bool, userRequested bool) error {
 	// Remove devices in reverse order to how they were added.
 	for _, dev := range removeDevices.Reversed() {
 		if instanceRunning {
-			err := vm.deviceStop(dev.Name, dev.Config, instanceRunning)
+			err := d.deviceStop(dev.Name, dev.Config, instanceRunning)
 			if err == device.ErrUnsupportedDevType {
 				continue // No point in trying to remove device below.
 			} else if err != nil {
@@ -3330,7 +3330,7 @@ func (vm *qemu) updateDevices(removeDevices deviceConfig.Devices, addDevices dev
 			}
 		}
 
-		err := vm.deviceRemove(dev.Name, dev.Config, instanceRunning)
+		err := d.deviceRemove(dev.Name, dev.Config, instanceRunning)
 		if err != nil && err != device.ErrUnsupportedDevType {
 			return errors.Wrapf(err, "Failed to remove device %q", dev.Name)
 		}
@@ -3338,7 +3338,7 @@ func (vm *qemu) updateDevices(removeDevices deviceConfig.Devices, addDevices dev
 		// Check whether we are about to add the same device back with updated config and
 		// if not, or if the device type has changed, then remove all volatile keys for
 		// this device (as its an actual removal or a device type change).
-		err = vm.deviceResetVolatile(dev.Name, dev.Config, addDevices[dev.Name])
+		err = d.deviceResetVolatile(dev.Name, dev.Config, addDevices[dev.Name])
 		if err != nil {
 			return errors.Wrapf(err, "Failed to reset volatile data for device %q", dev.Name)
 		}
@@ -3346,7 +3346,7 @@ func (vm *qemu) updateDevices(removeDevices deviceConfig.Devices, addDevices dev
 
 	// Add devices in sorted order, this ensures that device mounts are added in path order.
 	for _, dev := range addDevices.Sorted() {
-		err := vm.deviceAdd(dev.Name, dev.Config, instanceRunning)
+		err := d.deviceAdd(dev.Name, dev.Config, instanceRunning)
 		if err == device.ErrUnsupportedDevType {
 			continue // No point in trying to start device below.
 		} else if err != nil {
@@ -3356,12 +3356,12 @@ func (vm *qemu) updateDevices(removeDevices deviceConfig.Devices, addDevices dev
 
 			// If update is non-user requested (i.e from a snapshot restore), there's nothing we can
 			// do to fix the config and we don't want to prevent the snapshot restore so log and allow.
-			logger.Error("Failed to add device, skipping as non-user requested", log.Ctx{"project": vm.Project(), "instance": vm.Name(), "device": dev.Name, "err": err})
+			logger.Error("Failed to add device, skipping as non-user requested", log.Ctx{"project": d.Project(), "instance": d.Name(), "device": dev.Name, "err": err})
 			continue
 		}
 
 		if instanceRunning {
-			_, err := vm.deviceStart(dev.Name, dev.Config, instanceRunning)
+			_, err := d.deviceStart(dev.Name, dev.Config, instanceRunning)
 			if err != nil && err != device.ErrUnsupportedDevType {
 				return errors.Wrapf(err, "Failed to start device %q", dev.Name)
 			}
@@ -3369,7 +3369,7 @@ func (vm *qemu) updateDevices(removeDevices deviceConfig.Devices, addDevices dev
 	}
 
 	for _, dev := range updateDevices.Sorted() {
-		err := vm.deviceUpdate(dev.Name, dev.Config, oldExpandedDevices, instanceRunning)
+		err := d.deviceUpdate(dev.Name, dev.Config, oldExpandedDevices, instanceRunning)
 		if err != nil && err != device.ErrUnsupportedDevType {
 			return errors.Wrapf(err, "Failed to update device %q", dev.Name)
 		}
@@ -3379,8 +3379,8 @@ func (vm *qemu) updateDevices(removeDevices deviceConfig.Devices, addDevices dev
 }
 
 // deviceUpdate loads a new device and calls its Update() function.
-func (vm *qemu) deviceUpdate(deviceName string, rawConfig deviceConfig.Device, oldDevices deviceConfig.Devices, instanceRunning bool) error {
-	dev, _, err := vm.deviceLoad(deviceName, rawConfig)
+func (d *qemu) deviceUpdate(deviceName string, rawConfig deviceConfig.Device, oldDevices deviceConfig.Devices, instanceRunning bool) error {
+	dev, _, err := d.deviceLoad(deviceName, rawConfig)
 	if err != nil {
 		return err
 	}
@@ -3395,16 +3395,16 @@ func (vm *qemu) deviceUpdate(deviceName string, rawConfig deviceConfig.Device, o
 
 // deviceResetVolatile resets a device's volatile data when its removed or updated in such a way
 // that it is removed then added immediately afterwards.
-func (vm *qemu) deviceResetVolatile(devName string, oldConfig, newConfig deviceConfig.Device) error {
+func (d *qemu) deviceResetVolatile(devName string, oldConfig, newConfig deviceConfig.Device) error {
 	volatileClear := make(map[string]string)
 	devicePrefix := fmt.Sprintf("volatile.%s.", devName)
 
-	newNICType, err := nictype.NICType(vm.state, vm.Project(), newConfig)
+	newNICType, err := nictype.NICType(d.state, d.Project(), newConfig)
 	if err != nil {
 		return err
 	}
 
-	oldNICType, err := nictype.NICType(vm.state, vm.Project(), oldConfig)
+	oldNICType, err := nictype.NICType(d.state, d.Project(), oldConfig)
 	if err != nil {
 		return err
 	}
@@ -3413,7 +3413,7 @@ func (vm *qemu) deviceResetVolatile(devName string, oldConfig, newConfig deviceC
 	// This will occur if the newConfig is empty (i.e the device is actually being removed) or
 	// if the device type is being changed but keeping the same name.
 	if newConfig["type"] != oldConfig["type"] || newNICType != oldNICType {
-		for k := range vm.localConfig {
+		for k := range d.localConfig {
 			if !strings.HasPrefix(k, devicePrefix) {
 				continue
 			}
@@ -3421,13 +3421,13 @@ func (vm *qemu) deviceResetVolatile(devName string, oldConfig, newConfig deviceC
 			volatileClear[k] = ""
 		}
 
-		return vm.VolatileSet(volatileClear)
+		return d.VolatileSet(volatileClear)
 	}
 
 	// If the device type remains the same, then just remove any volatile keys that have
 	// the same key name present in the new config (i.e the new config is replacing the
 	// old volatile key).
-	for k := range vm.localConfig {
+	for k := range d.localConfig {
 		if !strings.HasPrefix(k, devicePrefix) {
 			continue
 		}
@@ -3438,17 +3438,17 @@ func (vm *qemu) deviceResetVolatile(devName string, oldConfig, newConfig deviceC
 		}
 	}
 
-	return vm.VolatileSet(volatileClear)
+	return d.VolatileSet(volatileClear)
 }
 
-func (vm *qemu) removeUnixDevices() error {
+func (d *qemu) removeUnixDevices() error {
 	// Check that we indeed have devices to remove.
-	if !shared.PathExists(vm.DevicesPath()) {
+	if !shared.PathExists(d.DevicesPath()) {
 		return nil
 	}
 
 	// Load the directory listing.
-	dents, err := ioutil.ReadDir(vm.DevicesPath())
+	dents, err := ioutil.ReadDir(d.DevicesPath())
 	if err != nil {
 		return err
 	}
@@ -3461,7 +3461,7 @@ func (vm *qemu) removeUnixDevices() error {
 		}
 
 		// Remove the entry
-		devicePath := filepath.Join(vm.DevicesPath(), f.Name())
+		devicePath := filepath.Join(d.DevicesPath(), f.Name())
 		err := os.Remove(devicePath)
 		if err != nil {
 			logger.Error("Failed removing unix device", log.Ctx{"err": err, "path": devicePath})
@@ -3471,14 +3471,14 @@ func (vm *qemu) removeUnixDevices() error {
 	return nil
 }
 
-func (vm *qemu) removeDiskDevices() error {
+func (d *qemu) removeDiskDevices() error {
 	// Check that we indeed have devices to remove.vm
-	if !shared.PathExists(vm.DevicesPath()) {
+	if !shared.PathExists(d.DevicesPath()) {
 		return nil
 	}
 
 	// Load the directory listing.
-	dents, err := ioutil.ReadDir(vm.DevicesPath())
+	dents, err := ioutil.ReadDir(d.DevicesPath())
 	if err != nil {
 		return err
 	}
@@ -3491,10 +3491,10 @@ func (vm *qemu) removeDiskDevices() error {
 		}
 
 		// Always try to unmount the host side
-		_ = unix.Unmount(filepath.Join(vm.DevicesPath(), f.Name()), unix.MNT_DETACH)
+		_ = unix.Unmount(filepath.Join(d.DevicesPath(), f.Name()), unix.MNT_DETACH)
 
 		// Remove the entry
-		diskPath := filepath.Join(vm.DevicesPath(), f.Name())
+		diskPath := filepath.Join(d.DevicesPath(), f.Name())
 		err := os.Remove(diskPath)
 		if err != nil {
 			logger.Error("Failed to remove disk device path", log.Ctx{"err": err, "path": diskPath})
@@ -3504,26 +3504,26 @@ func (vm *qemu) removeDiskDevices() error {
 	return nil
 }
 
-func (vm *qemu) cleanup() {
+func (d *qemu) cleanup() {
 	// Unmount any leftovers
-	vm.removeUnixDevices()
-	vm.removeDiskDevices()
+	d.removeUnixDevices()
+	d.removeDiskDevices()
 
 	// Remove the security profiles
-	apparmor.InstanceDelete(vm.state, vm)
+	apparmor.InstanceDelete(d.state, d)
 
 	// Remove the devices path
-	os.Remove(vm.DevicesPath())
+	os.Remove(d.DevicesPath())
 
 	// Remove the shmounts path
-	os.RemoveAll(vm.ShmountsPath())
+	os.RemoveAll(d.ShmountsPath())
 }
 
 // cleanupDevices performs any needed device cleanup steps when instance is stopped.
-func (vm *qemu) cleanupDevices() {
-	for _, dev := range vm.expandedDevices.Reversed() {
+func (d *qemu) cleanupDevices() {
+	for _, dev := range d.expandedDevices.Reversed() {
 		// Use the device interface if device supports it.
-		err := vm.deviceStop(dev.Name, dev.Config, false)
+		err := d.deviceStop(dev.Name, dev.Config, false)
 		if err == device.ErrUnsupportedDevType {
 			continue
 		} else if err != nil {
@@ -3532,14 +3532,14 @@ func (vm *qemu) cleanupDevices() {
 	}
 }
 
-func (vm *qemu) init() error {
+func (d *qemu) init() error {
 	// Compute the expanded config and device list.
-	err := vm.expandConfig(nil)
+	err := d.expandConfig(nil)
 	if err != nil {
 		return err
 	}
 
-	err = vm.expandDevices(nil)
+	err = d.expandDevices(nil)
 	if err != nil {
 		return err
 	}
@@ -3548,18 +3548,18 @@ func (vm *qemu) init() error {
 }
 
 // Delete the instance.
-func (vm *qemu) Delete() error {
+func (d *qemu) Delete() error {
 	ctxMap := log.Ctx{
-		"project":   vm.project,
-		"name":      vm.name,
-		"created":   vm.creationDate,
-		"ephemeral": vm.ephemeral,
-		"used":      vm.lastUsedDate}
+		"project":   d.project,
+		"name":      d.name,
+		"created":   d.creationDate,
+		"ephemeral": d.ephemeral,
+		"used":      d.lastUsedDate}
 
 	logger.Info("Deleting instance", ctxMap)
 
 	// Check if instance is delete protected.
-	if shared.IsTrue(vm.expandedConfig["security.protection.delete"]) && !vm.IsSnapshot() {
+	if shared.IsTrue(d.expandedConfig["security.protection.delete"]) && !d.IsSnapshot() {
 		return fmt.Errorf("Instance is protected")
 	}
 
@@ -3568,14 +3568,14 @@ func (vm *qemu) Delete() error {
 	isImport := false
 
 	// Attempt to initialize storage interface for the instance.
-	pool, err := vm.getStoragePool()
+	pool, err := d.getStoragePool()
 	if err != nil && err != db.ErrNoSuchObject {
 		return err
 	} else if pool != nil {
-		if vm.IsSnapshot() {
+		if d.IsSnapshot() {
 			if !isImport {
 				// Remove snapshot volume and database record.
-				err = pool.DeleteInstanceSnapshot(vm, nil)
+				err = pool.DeleteInstanceSnapshot(d, nil)
 				if err != nil {
 					return err
 				}
@@ -3583,14 +3583,14 @@ func (vm *qemu) Delete() error {
 		} else {
 			// Remove all snapshots by initialising each snapshot as an Instance and
 			// calling its Delete function.
-			err := instance.DeleteSnapshots(vm.state, vm.Project(), vm.Name())
+			err := instance.DeleteSnapshots(d.state, d.Project(), d.Name())
 			if err != nil {
 				return err
 			}
 
 			if !isImport {
 				// Remove the storage volume, snapshot volumes and database records.
-				err = pool.DeleteInstance(vm, nil)
+				err = pool.DeleteInstance(d, nil)
 				if err != nil {
 					return err
 				}
@@ -3599,9 +3599,9 @@ func (vm *qemu) Delete() error {
 	}
 
 	// Perform other cleanup steps if not snapshot.
-	if !vm.IsSnapshot() {
+	if !d.IsSnapshot() {
 		// Remove all backups.
-		backups, err := vm.Backups()
+		backups, err := d.Backups()
 		if err != nil {
 			return err
 		}
@@ -3614,47 +3614,47 @@ func (vm *qemu) Delete() error {
 		}
 
 		// Delete the MAAS entry.
-		err = vm.maasDelete()
+		err = d.maasDelete()
 		if err != nil {
-			logger.Error("Failed deleting instance MAAS record", log.Ctx{"project": vm.Project(), "instance": vm.Name(), "err": err})
+			logger.Error("Failed deleting instance MAAS record", log.Ctx{"project": d.Project(), "instance": d.Name(), "err": err})
 			return err
 		}
 
 		// Run device removal function for each device.
-		for k, m := range vm.expandedDevices {
-			err = vm.deviceRemove(k, m, false)
+		for k, m := range d.expandedDevices {
+			err = d.deviceRemove(k, m, false)
 			if err != nil && err != device.ErrUnsupportedDevType {
 				return errors.Wrapf(err, "Failed to remove device %q", k)
 			}
 		}
 
 		// Clean things up.
-		vm.cleanup()
+		d.cleanup()
 	}
 
 	// Remove the database record of the instance or snapshot instance.
-	if err := vm.state.Cluster.DeleteInstance(vm.Project(), vm.Name()); err != nil {
-		logger.Error("Failed deleting instance entry", log.Ctx{"project": vm.Project(), "instance": vm.Name(), "err": err})
+	if err := d.state.Cluster.DeleteInstance(d.Project(), d.Name()); err != nil {
+		logger.Error("Failed deleting instance entry", log.Ctx{"project": d.Project(), "instance": d.Name(), "err": err})
 		return err
 	}
 
 	logger.Info("Deleted instance", ctxMap)
 
-	if vm.IsSnapshot() {
-		vm.state.Events.SendLifecycle(vm.project, "virtual-machine-snapshot-deleted",
-			fmt.Sprintf("/1.0/virtual-machines/%s", vm.name), map[string]interface{}{
-				"snapshot_name": vm.name,
+	if d.IsSnapshot() {
+		d.state.Events.SendLifecycle(d.project, "virtual-machine-snapshot-deleted",
+			fmt.Sprintf("/1.0/virtual-machines/%s", d.name), map[string]interface{}{
+				"snapshot_name": d.name,
 			})
 	} else {
-		vm.state.Events.SendLifecycle(vm.project, "virtual-machine-deleted",
-			fmt.Sprintf("/1.0/virtual-machines/%s", vm.name), nil)
+		d.state.Events.SendLifecycle(d.project, "virtual-machine-deleted",
+			fmt.Sprintf("/1.0/virtual-machines/%s", d.name), nil)
 	}
 
 	return nil
 }
 
-func (vm *qemu) deviceAdd(deviceName string, rawConfig deviceConfig.Device, instanceRunning bool) error {
-	dev, _, err := vm.deviceLoad(deviceName, rawConfig)
+func (d *qemu) deviceAdd(deviceName string, rawConfig deviceConfig.Device, instanceRunning bool) error {
+	dev, _, err := d.deviceLoad(deviceName, rawConfig)
 	if err != nil {
 		return err
 	}
@@ -3666,10 +3666,10 @@ func (vm *qemu) deviceAdd(deviceName string, rawConfig deviceConfig.Device, inst
 	return dev.Add()
 }
 
-func (vm *qemu) deviceRemove(deviceName string, rawConfig deviceConfig.Device, instanceRunning bool) error {
-	logger := logging.AddContext(logger.Log, log.Ctx{"device": deviceName, "type": rawConfig["type"], "project": vm.Project(), "instance": vm.Name()})
+func (d *qemu) deviceRemove(deviceName string, rawConfig deviceConfig.Device, instanceRunning bool) error {
+	logger := logging.AddContext(logger.Log, log.Ctx{"device": deviceName, "type": rawConfig["type"], "project": d.Project(), "instance": d.Name()})
 
-	dev, _, err := vm.deviceLoad(deviceName, rawConfig)
+	dev, _, err := d.deviceLoad(deviceName, rawConfig)
 
 	// If deviceLoad fails with unsupported device type then return.
 	if err == device.ErrUnsupportedDevType {
@@ -3696,35 +3696,35 @@ func (vm *qemu) deviceRemove(deviceName string, rawConfig deviceConfig.Device, i
 }
 
 // Export publishes the instance.
-func (vm *qemu) Export(w io.Writer, properties map[string]string) (api.ImageMetadata, error) {
+func (d *qemu) Export(w io.Writer, properties map[string]string) (api.ImageMetadata, error) {
 	ctxMap := log.Ctx{
-		"project":   vm.project,
-		"name":      vm.name,
-		"created":   vm.creationDate,
-		"ephemeral": vm.ephemeral,
-		"used":      vm.lastUsedDate}
+		"project":   d.project,
+		"name":      d.name,
+		"created":   d.creationDate,
+		"ephemeral": d.ephemeral,
+		"used":      d.lastUsedDate}
 
 	meta := api.ImageMetadata{}
 
-	if vm.IsRunning() {
+	if d.IsRunning() {
 		return meta, fmt.Errorf("Cannot export a running instance as an image")
 	}
 
 	logger.Info("Exporting instance", ctxMap)
 
 	// Start the storage.
-	mountInfo, err := vm.mount()
+	mountInfo, err := d.mount()
 	if err != nil {
 		logger.Error("Failed exporting instance", ctxMap)
 		return meta, err
 	}
-	defer vm.unmount()
+	defer d.unmount()
 
 	// Create the tarball.
 	tarWriter := instancewriter.NewInstanceTarWriter(w, nil)
 
 	// Path inside the tar image is the pathname starting after cDir.
-	cDir := vm.Path()
+	cDir := d.Path()
 	offset := len(cDir) + 1
 
 	writeToTar := func(path string, fi os.FileInfo, err error) error {
@@ -3755,9 +3755,9 @@ func (vm *qemu) Export(w io.Writer, properties map[string]string) (api.ImageMeta
 
 		// Get the instance's architecture.
 		var arch string
-		if vm.IsSnapshot() {
-			parentName, _, _ := shared.InstanceGetParentAndSnapshotName(vm.name)
-			parent, err := instance.LoadByProjectAndName(vm.state, vm.project, parentName)
+		if d.IsSnapshot() {
+			parentName, _, _ := shared.InstanceGetParentAndSnapshotName(d.name)
+			parent, err := instance.LoadByProjectAndName(d.state, d.project, parentName)
 			if err != nil {
 				tarWriter.Close()
 				logger.Error("Failed exporting instance", ctxMap)
@@ -3766,11 +3766,11 @@ func (vm *qemu) Export(w io.Writer, properties map[string]string) (api.ImageMeta
 
 			arch, _ = osarch.ArchitectureName(parent.Architecture())
 		} else {
-			arch, _ = osarch.ArchitectureName(vm.architecture)
+			arch, _ = osarch.ArchitectureName(d.architecture)
 		}
 
 		if arch == "" {
-			arch, err = osarch.ArchitectureName(vm.state.OS.Architectures[0])
+			arch, err = osarch.ArchitectureName(d.state.OS.Architectures[0])
 			if err != nil {
 				logger.Error("Failed exporting instance", ctxMap)
 				return meta, err
@@ -3908,7 +3908,7 @@ func (vm *qemu) Export(w io.Writer, properties map[string]string) (api.ImageMeta
 	}
 
 	// Include all the templates.
-	fnam = vm.TemplatesPath()
+	fnam = d.TemplatesPath()
 	if shared.PathExists(fnam) {
 		err = filepath.Walk(fnam, writeToTar)
 		if err != nil {
@@ -3928,17 +3928,17 @@ func (vm *qemu) Export(w io.Writer, properties map[string]string) (api.ImageMeta
 }
 
 // Migrate migrates the instance to another node.
-func (vm *qemu) Migrate(args *instance.CriuMigrationArgs) error {
+func (d *qemu) Migrate(args *instance.CriuMigrationArgs) error {
 	return instance.ErrNotImplemented
 }
 
 // CGroupSet is not implemented for VMs.
-func (vm *qemu) CGroup() (*cgroup.CGroup, error) {
+func (d *qemu) CGroup() (*cgroup.CGroup, error) {
 	return nil, instance.ErrNotImplemented
 }
 
 // VolatileSet sets one or more volatile config keys.
-func (vm *qemu) VolatileSet(changes map[string]string) error {
+func (d *qemu) VolatileSet(changes map[string]string) error {
 	// Sanity check.
 	for key := range changes {
 		if !strings.HasPrefix(key, "volatile.") {
@@ -3948,13 +3948,13 @@ func (vm *qemu) VolatileSet(changes map[string]string) error {
 
 	// Update the database.
 	var err error
-	if vm.IsSnapshot() {
-		err = vm.state.Cluster.Transaction(func(tx *db.ClusterTx) error {
-			return tx.UpdateInstanceSnapshotConfig(vm.id, changes)
+	if d.IsSnapshot() {
+		err = d.state.Cluster.Transaction(func(tx *db.ClusterTx) error {
+			return tx.UpdateInstanceSnapshotConfig(d.id, changes)
 		})
 	} else {
-		err = vm.state.Cluster.Transaction(func(tx *db.ClusterTx) error {
-			return tx.UpdateInstanceConfig(vm.id, changes)
+		err = d.state.Cluster.Transaction(func(tx *db.ClusterTx) error {
+			return tx.UpdateInstanceConfig(d.id, changes)
 		})
 	}
 	if err != nil {
@@ -3964,33 +3964,33 @@ func (vm *qemu) VolatileSet(changes map[string]string) error {
 	// Apply the change locally.
 	for key, value := range changes {
 		if value == "" {
-			delete(vm.expandedConfig, key)
-			delete(vm.localConfig, key)
+			delete(d.expandedConfig, key)
+			delete(d.localConfig, key)
 			continue
 		}
 
-		vm.expandedConfig[key] = value
-		vm.localConfig[key] = value
+		d.expandedConfig[key] = value
+		d.localConfig[key] = value
 	}
 
 	return nil
 }
 
 // FileExists is not implemented for VMs.
-func (vm *qemu) FileExists(path string) error {
+func (d *qemu) FileExists(path string) error {
 	return instance.ErrNotImplemented
 }
 
 // FilePull retrieves a file from the instance.
-func (vm *qemu) FilePull(srcPath string, dstPath string) (int64, int64, os.FileMode, string, []string, error) {
-	client, err := vm.getAgentClient()
+func (d *qemu) FilePull(srcPath string, dstPath string) (int64, int64, os.FileMode, string, []string, error) {
+	client, err := d.getAgentClient()
 	if err != nil {
 		return 0, 0, 0, "", nil, err
 	}
 
 	agent, err := lxdClient.ConnectLXDHTTP(nil, client)
 	if err != nil {
-		logger.Errorf("Failed to connect to lxd-agent on %s: %v", vm.Name(), err)
+		logger.Errorf("Failed to connect to lxd-agent on %s: %v", d.Name(), err)
 		return 0, 0, 0, "", nil, fmt.Errorf("Failed to connect to lxd-agent")
 	}
 	defer agent.Disconnect()
@@ -4026,15 +4026,15 @@ func (vm *qemu) FilePull(srcPath string, dstPath string) (int64, int64, os.FileM
 }
 
 // FilePush pushes a file into the instance.
-func (vm *qemu) FilePush(fileType string, srcPath string, dstPath string, uid int64, gid int64, mode int, write string) error {
-	client, err := vm.getAgentClient()
+func (d *qemu) FilePush(fileType string, srcPath string, dstPath string, uid int64, gid int64, mode int, write string) error {
+	client, err := d.getAgentClient()
 	if err != nil {
 		return err
 	}
 
 	agent, err := lxdClient.ConnectLXDHTTP(nil, client)
 	if err != nil {
-		logger.Errorf("Failed to connect to lxd-agent on %s: %v", vm.Name(), err)
+		logger.Errorf("Failed to connect to lxd-agent on %s: %v", d.Name(), err)
 		return fmt.Errorf("Failed to connect to lxd-agent")
 	}
 	defer agent.Disconnect()
@@ -4073,9 +4073,9 @@ func (vm *qemu) FilePush(fileType string, srcPath string, dstPath string, uid in
 }
 
 // FileRemove removes a file from the instance.
-func (vm *qemu) FileRemove(path string) error {
+func (d *qemu) FileRemove(path string) error {
 	// Connect to the agent.
-	client, err := vm.getAgentClient()
+	client, err := d.getAgentClient()
 	if err != nil {
 		return err
 	}
@@ -4096,30 +4096,30 @@ func (vm *qemu) FileRemove(path string) error {
 }
 
 // Console gets access to the instance's console.
-func (vm *qemu) Console(protocol string) (*os.File, chan error, error) {
+func (d *qemu) Console(protocol string) (*os.File, chan error, error) {
 	switch protocol {
 	case instance.ConsoleTypeConsole:
-		return vm.console()
+		return d.console()
 	case instance.ConsoleTypeVGA:
-		return vm.vga()
+		return d.vga()
 	default:
 		return nil, nil, fmt.Errorf("Unknown protocol %q", protocol)
 	}
 }
 
-func (vm *qemu) console() (*os.File, chan error, error) {
+func (d *qemu) console() (*os.File, chan error, error) {
 	chDisconnect := make(chan error, 1)
 
 	// Avoid duplicate connects.
 	vmConsoleLock.Lock()
-	if vmConsole[vm.id] {
+	if vmConsole[d.id] {
 		vmConsoleLock.Unlock()
 		return nil, nil, fmt.Errorf("There is already an active console for this instance")
 	}
 	vmConsoleLock.Unlock()
 
 	// Connect to the monitor.
-	monitor, err := qmp.Connect(vm.monitorPath(), qemuSerialChardevName, vm.getMonitorEventHandler())
+	monitor, err := qmp.Connect(d.monitorPath(), qemuSerialChardevName, d.getMonitorEventHandler())
 	if err != nil {
 		return nil, nil, err // The VM isn't running as no monitor socket available.
 	}
@@ -4132,7 +4132,7 @@ func (vm *qemu) console() (*os.File, chan error, error) {
 
 	// Record the console is in use.
 	vmConsoleLock.Lock()
-	vmConsole[vm.id] = true
+	vmConsole[d.id] = true
 	vmConsoleLock.Unlock()
 
 	// Handle console disconnection.
@@ -4140,18 +4140,18 @@ func (vm *qemu) console() (*os.File, chan error, error) {
 		<-chDisconnect
 
 		vmConsoleLock.Lock()
-		delete(vmConsole, vm.id)
+		delete(vmConsole, d.id)
 		vmConsoleLock.Unlock()
 	}()
 
 	return console, chDisconnect, nil
 }
 
-func (vm *qemu) vga() (*os.File, chan error, error) {
+func (d *qemu) vga() (*os.File, chan error, error) {
 	// Open the spice socket
-	conn, err := net.Dial("unix", vm.spicePath())
+	conn, err := net.Dial("unix", d.spicePath())
 	if err != nil {
-		return nil, nil, errors.Wrapf(err, "Connect to SPICE socket %q", vm.spicePath())
+		return nil, nil, errors.Wrapf(err, "Connect to SPICE socket %q", d.spicePath())
 	}
 
 	file, err := (conn.(*net.UnixConn)).File()
@@ -4164,18 +4164,18 @@ func (vm *qemu) vga() (*os.File, chan error, error) {
 }
 
 // Exec a command inside the instance.
-func (vm *qemu) Exec(req api.InstanceExecPost, stdin *os.File, stdout *os.File, stderr *os.File) (instance.Cmd, error) {
+func (d *qemu) Exec(req api.InstanceExecPost, stdin *os.File, stdout *os.File, stderr *os.File) (instance.Cmd, error) {
 	revert := revert.New()
 	defer revert.Fail()
 
-	client, err := vm.getAgentClient()
+	client, err := d.getAgentClient()
 	if err != nil {
 		return nil, err
 	}
 
 	agent, err := lxdClient.ConnectLXDHTTP(nil, client)
 	if err != nil {
-		logger.Errorf("Failed to connect to lxd-agent on %s: %v", vm.Name(), err)
+		logger.Errorf("Failed to connect to lxd-agent on %s: %v", d.Name(), err)
 		return nil, fmt.Errorf("Failed to connect to lxd-agent")
 	}
 	revert.Add(agent.Disconnect)
@@ -4237,26 +4237,26 @@ func (vm *qemu) Exec(req api.InstanceExecPost, stdin *os.File, stdout *os.File,
 }
 
 // Render returns info about the instance.
-func (vm *qemu) Render(options ...func(response interface{}) error) (interface{}, interface{}, error) {
-	if vm.IsSnapshot() {
+func (d *qemu) Render(options ...func(response interface{}) error) (interface{}, interface{}, error) {
+	if d.IsSnapshot() {
 		// Prepare the ETag
-		etag := []interface{}{vm.expiryDate}
+		etag := []interface{}{d.expiryDate}
 
 		snapState := api.InstanceSnapshot{
-			CreatedAt:       vm.creationDate,
-			ExpandedConfig:  vm.expandedConfig,
-			ExpandedDevices: vm.expandedDevices.CloneNative(),
-			LastUsedAt:      vm.lastUsedDate,
-			Name:            strings.SplitN(vm.name, "/", 2)[1],
-			Stateful:        vm.stateful,
+			CreatedAt:       d.creationDate,
+			ExpandedConfig:  d.expandedConfig,
+			ExpandedDevices: d.expandedDevices.CloneNative(),
+			LastUsedAt:      d.lastUsedDate,
+			Name:            strings.SplitN(d.name, "/", 2)[1],
+			Stateful:        d.stateful,
 			Size:            -1, // Default to uninitialised/error state (0 means no CoW usage).
 		}
-		snapState.Architecture = vm.architectureName
-		snapState.Config = vm.localConfig
-		snapState.Devices = vm.localDevices.CloneNative()
-		snapState.Ephemeral = vm.ephemeral
-		snapState.Profiles = vm.profiles
-		snapState.ExpiresAt = vm.expiryDate
+		snapState.Architecture = d.architectureName
+		snapState.Config = d.localConfig
+		snapState.Devices = d.localDevices.CloneNative()
+		snapState.Ephemeral = d.ephemeral
+		snapState.Profiles = d.profiles
+		snapState.ExpiresAt = d.expiryDate
 
 		for _, option := range options {
 			err := option(&snapState)
@@ -4269,27 +4269,27 @@ func (vm *qemu) Render(options ...func(response interface{}) error) (interface{}
 	}
 
 	// Prepare the ETag
-	etag := []interface{}{vm.architecture, vm.localConfig, vm.localDevices, vm.ephemeral, vm.profiles}
+	etag := []interface{}{d.architecture, d.localConfig, d.localDevices, d.ephemeral, d.profiles}
 
 	instState := api.Instance{
-		ExpandedConfig:  vm.expandedConfig,
-		ExpandedDevices: vm.expandedDevices.CloneNative(),
-		Name:            vm.name,
-		Status:          vm.statusCode().String(),
-		StatusCode:      vm.statusCode(),
-		Location:        vm.node,
-		Type:            vm.Type().String(),
-	}
-
-	instState.Description = vm.description
-	instState.Architecture = vm.architectureName
-	instState.Config = vm.localConfig
-	instState.CreatedAt = vm.creationDate
-	instState.Devices = vm.localDevices.CloneNative()
-	instState.Ephemeral = vm.ephemeral
-	instState.LastUsedAt = vm.lastUsedDate
-	instState.Profiles = vm.profiles
-	instState.Stateful = vm.stateful
+		ExpandedConfig:  d.expandedConfig,
+		ExpandedDevices: d.expandedDevices.CloneNative(),
+		Name:            d.name,
+		Status:          d.statusCode().String(),
+		StatusCode:      d.statusCode(),
+		Location:        d.node,
+		Type:            d.Type().String(),
+	}
+
+	instState.Description = d.description
+	instState.Architecture = d.architectureName
+	instState.Config = d.localConfig
+	instState.CreatedAt = d.creationDate
+	instState.Devices = d.localDevices.CloneNative()
+	instState.Ephemeral = d.ephemeral
+	instState.LastUsedAt = d.lastUsedDate
+	instState.Profiles = d.profiles
+	instState.Stateful = d.stateful
 
 	for _, option := range options {
 		err := option(&instState)
@@ -4302,13 +4302,13 @@ func (vm *qemu) Render(options ...func(response interface{}) error) (interface{}
 }
 
 // RenderFull returns all info about the instance.
-func (vm *qemu) RenderFull() (*api.InstanceFull, interface{}, error) {
-	if vm.IsSnapshot() {
+func (d *qemu) RenderFull() (*api.InstanceFull, interface{}, error) {
+	if d.IsSnapshot() {
 		return nil, nil, fmt.Errorf("RenderFull doesn't work with snapshots")
 	}
 
 	// Get the Instance struct.
-	base, etag, err := vm.Render()
+	base, etag, err := d.Render()
 	if err != nil {
 		return nil, nil, err
 	}
@@ -4317,13 +4317,13 @@ func (vm *qemu) RenderFull() (*api.InstanceFull, interface{}, error) {
 	vmState := api.InstanceFull{Instance: *base.(*api.Instance)}
 
 	// Add the InstanceState.
-	vmState.State, err = vm.RenderState()
+	vmState.State, err = d.RenderState()
 	if err != nil {
 		return nil, nil, err
 	}
 
 	// Add the InstanceSnapshots.
-	snaps, err := vm.Snapshots()
+	snaps, err := d.Snapshots()
 	if err != nil {
 		return nil, nil, err
 	}
@@ -4342,7 +4342,7 @@ func (vm *qemu) RenderFull() (*api.InstanceFull, interface{}, error) {
 	}
 
 	// Add the InstanceBackups.
-	backups, err := vm.Backups()
+	backups, err := d.Backups()
 	if err != nil {
 		return nil, nil, err
 	}
@@ -4361,33 +4361,33 @@ func (vm *qemu) RenderFull() (*api.InstanceFull, interface{}, error) {
 }
 
 // RenderState returns just state info about the instance.
-func (vm *qemu) RenderState() (*api.InstanceState, error) {
+func (d *qemu) RenderState() (*api.InstanceState, error) {
 	var err error
 
 	status := &api.InstanceState{}
-	statusCode := vm.statusCode()
-	pid, _ := vm.pid()
+	statusCode := d.statusCode()
+	pid, _ := d.pid()
 
 	if statusCode == api.Running {
 		// Try and get state info from agent.
-		status, err = vm.agentGetState()
+		status, err = d.agentGetState()
 		if err != nil {
 			if err != errQemuAgentOffline {
-				logger.Warn("Could not get VM state from agent", log.Ctx{"project": vm.Project(), "instance": vm.Name(), "err": err})
+				logger.Warn("Could not get VM state from agent", log.Ctx{"project": d.Project(), "instance": d.Name(), "err": err})
 			}
 
 			// Fallback data if agent is not reachable.
 			status = &api.InstanceState{}
 			status.Processes = -1
 			networks := map[string]api.InstanceStateNetwork{}
-			for k, m := range vm.ExpandedDevices() {
+			for k, m := range d.ExpandedDevices() {
 				if m["type"] != "nic" {
 					continue
 				}
 
-				dev, _, err := vm.deviceLoad(k, m)
+				dev, _, err := d.deviceLoad(k, m)
 				if err != nil {
-					logger.Warn("Could not load device", log.Ctx{"project": vm.Project(), "instance": vm.Name(), "device": k, "err": err})
+					logger.Warn("Could not load device", log.Ctx{"project": d.Project(), "instance": d.Name(), "device": k, "err": err})
 					continue
 				}
 
@@ -4411,7 +4411,7 @@ func (vm *qemu) RenderState() (*api.InstanceState, error) {
 		}
 
 		// Populate host_name for network devices.
-		for k, m := range vm.ExpandedDevices() {
+		for k, m := range d.ExpandedDevices() {
 			// We only care about nics.
 			if m["type"] != "nic" {
 				continue
@@ -4420,7 +4420,7 @@ func (vm *qemu) RenderState() (*api.InstanceState, error) {
 			// Get hwaddr from static or volatile config.
 			hwaddr := m["hwaddr"]
 			if hwaddr == "" {
-				hwaddr = vm.localConfig[fmt.Sprintf("volatile.%s.hwaddr", k)]
+				hwaddr = d.localConfig[fmt.Sprintf("volatile.%s.hwaddr", k)]
 			}
 
 			// We have to match on hwaddr as device name can be different from the configured device
@@ -4428,7 +4428,7 @@ func (vm *qemu) RenderState() (*api.InstanceState, error) {
 			for netName, netStatus := range status.Network {
 				if netStatus.Hwaddr == hwaddr {
 					if netStatus.HostName == "" {
-						netStatus.HostName = vm.localConfig[fmt.Sprintf("volatile.%s.host_name", k)]
+						netStatus.HostName = d.localConfig[fmt.Sprintf("volatile.%s.host_name", k)]
 						status.Network[netName] = netStatus
 					}
 				}
@@ -4439,28 +4439,28 @@ func (vm *qemu) RenderState() (*api.InstanceState, error) {
 	status.Pid = int64(pid)
 	status.Status = statusCode.String()
 	status.StatusCode = statusCode
-	status.Disk, err = vm.diskState()
+	status.Disk, err = d.diskState()
 	if err != nil && err != storageDrivers.ErrNotSupported {
-		logger.Warn("Error getting disk usage", log.Ctx{"project": vm.Project(), "instance": vm.Name(), "err": err})
+		logger.Warn("Error getting disk usage", log.Ctx{"project": d.Project(), "instance": d.Name(), "err": err})
 	}
 
 	return status, nil
 }
 
 // diskState gets disk usage info.
-func (vm *qemu) diskState() (map[string]api.InstanceStateDisk, error) {
-	pool, err := vm.getStoragePool()
+func (d *qemu) diskState() (map[string]api.InstanceStateDisk, error) {
+	pool, err := d.getStoragePool()
 	if err != nil {
 		return nil, err
 	}
 
 	// Get the root disk device config.
-	rootDiskName, _, err := shared.GetRootDiskDevice(vm.ExpandedDevices().CloneNative())
+	rootDiskName, _, err := shared.GetRootDiskDevice(d.ExpandedDevices().CloneNative())
 	if err != nil {
 		return nil, err
 	}
 
-	usage, err := pool.GetInstanceUsage(vm)
+	usage, err := pool.GetInstanceUsage(d)
 	if err != nil {
 		return nil, err
 	}
@@ -4472,9 +4472,9 @@ func (vm *qemu) diskState() (map[string]api.InstanceStateDisk, error) {
 
 // agentGetState connects to the agent inside of the VM and does
 // an API call to get the current state.
-func (vm *qemu) agentGetState() (*api.InstanceState, error) {
+func (d *qemu) agentGetState() (*api.InstanceState, error) {
 	// Check if the agent is running.
-	monitor, err := qmp.Connect(vm.monitorPath(), qemuSerialChardevName, vm.getMonitorEventHandler())
+	monitor, err := qmp.Connect(d.monitorPath(), qemuSerialChardevName, d.getMonitorEventHandler())
 	if err != nil {
 		return nil, err
 	}
@@ -4483,7 +4483,7 @@ func (vm *qemu) agentGetState() (*api.InstanceState, error) {
 		return nil, errQemuAgentOffline
 	}
 
-	client, err := vm.getAgentClient()
+	client, err := d.getAgentClient()
 	if err != nil {
 		return nil, err
 	}
@@ -4503,89 +4503,89 @@ func (vm *qemu) agentGetState() (*api.InstanceState, error) {
 }
 
 // IsRunning returns whether or not the instance is running.
-func (vm *qemu) IsRunning() bool {
-	state := vm.State()
+func (d *qemu) IsRunning() bool {
+	state := d.State()
 	return state != "STOPPED"
 }
 
 // IsFrozen returns whether the instance frozen or not.
-func (vm *qemu) IsFrozen() bool {
-	return vm.State() == "FROZEN"
+func (d *qemu) IsFrozen() bool {
+	return d.State() == "FROZEN"
 }
 
 // IsEphemeral returns whether the instanc is ephemeral or not.
-func (vm *qemu) IsEphemeral() bool {
-	return vm.ephemeral
+func (d *qemu) IsEphemeral() bool {
+	return d.ephemeral
 }
 
 // IsSnapshot returns whether instance is snapshot or not.
-func (vm *qemu) IsSnapshot() bool {
-	return vm.snapshot
+func (d *qemu) IsSnapshot() bool {
+	return d.snapshot
 }
 
 // IsStateful retuens whether instance is stateful or not.
-func (vm *qemu) IsStateful() bool {
-	return vm.stateful
+func (d *qemu) IsStateful() bool {
+	return d.stateful
 }
 
 // DeviceEventHandler handles events occurring on the instance's devices.
-func (vm *qemu) DeviceEventHandler(runConf *deviceConfig.RunConfig) error {
+func (d *qemu) DeviceEventHandler(runConf *deviceConfig.RunConfig) error {
 	return fmt.Errorf("DeviceEventHandler Not implemented")
 }
 
 // ID returns the instance's ID.
-func (vm *qemu) ID() int {
-	return vm.id
+func (d *qemu) ID() int {
+	return d.id
 }
 
 // vsockID returns the vsock context ID, 3 being the first ID that can be used.
-func (vm *qemu) vsockID() int {
-	return vm.id + 3
+func (d *qemu) vsockID() int {
+	return d.id + 3
 }
 
 // Location returns instance's location.
-func (vm *qemu) Location() string {
-	return vm.node
+func (d *qemu) Location() string {
+	return d.node
 }
 
 // Name returns the instance's name.
-func (vm *qemu) Name() string {
-	return vm.name
+func (d *qemu) Name() string {
+	return d.name
 }
 
 // Description returns the instance's description.
-func (vm *qemu) Description() string {
-	return vm.description
+func (d *qemu) Description() string {
+	return d.description
 }
 
 // CreationDate returns the instance's creation date.
-func (vm *qemu) CreationDate() time.Time {
-	return vm.creationDate
+func (d *qemu) CreationDate() time.Time {
+	return d.creationDate
 }
 
 // LastUsedDate returns the instance's last used date.
-func (vm *qemu) LastUsedDate() time.Time {
-	return vm.lastUsedDate
+func (d *qemu) LastUsedDate() time.Time {
+	return d.lastUsedDate
 }
 
 // Profiles returns the instance's profiles.
-func (vm *qemu) Profiles() []string {
-	return vm.profiles
+func (d *qemu) Profiles() []string {
+	return d.profiles
 }
 
 // InitPID returns the instance's current process ID.
-func (vm *qemu) InitPID() int {
-	pid, _ := vm.pid()
+func (d *qemu) InitPID() int {
+	pid, _ := d.pid()
 	return pid
 }
 
-func (vm *qemu) statusCode() api.StatusCode {
+func (d *qemu) statusCode() api.StatusCode {
 	// Connect to the monitor.
-	monitor, err := qmp.Connect(vm.monitorPath(), qemuSerialChardevName, vm.getMonitorEventHandler())
+	monitor, err := qmp.Connect(d.monitorPath(), qemuSerialChardevName, d.getMonitorEventHandler())
 	if err != nil {
 		// If cannot connect to monitor, but qemu process in pid file still exists, then likely qemu
 		// has crashed/hung and this instance is in an error state.
-		pid, _ := vm.pid()
+		pid, _ := d.pid()
 		if pid > 0 && shared.PathExists(fmt.Sprintf("/proc/%d", pid)) {
 			return api.Error
 		}
@@ -4613,14 +4613,14 @@ func (vm *qemu) statusCode() api.StatusCode {
 }
 
 // State returns the instance's state code.
-func (vm *qemu) State() string {
-	return strings.ToUpper(vm.statusCode().String())
+func (d *qemu) State() string {
+	return strings.ToUpper(d.statusCode().String())
 }
 
 // ExpiryDate returns when this snapshot expires.
-func (vm *qemu) ExpiryDate() time.Time {
-	if vm.IsSnapshot() {
-		return vm.expiryDate
+func (d *qemu) ExpiryDate() time.Time {
+	if d.IsSnapshot() {
+		return d.expiryDate
 	}
 
 	// Return zero time if the instance is not a snapshot.
@@ -4628,61 +4628,61 @@ func (vm *qemu) ExpiryDate() time.Time {
 }
 
 // Path returns the instance's path.
-func (vm *qemu) Path() string {
-	return storagePools.InstancePath(vm.Type(), vm.Project(), vm.Name(), vm.IsSnapshot())
+func (d *qemu) Path() string {
+	return storagePools.InstancePath(d.Type(), d.Project(), d.Name(), d.IsSnapshot())
 }
 
 // DevicesPath returns the instance's devices path.
-func (vm *qemu) DevicesPath() string {
-	name := project.Instance(vm.Project(), vm.Name())
+func (d *qemu) DevicesPath() string {
+	name := project.Instance(d.Project(), d.Name())
 	return shared.VarPath("devices", name)
 }
 
 // ShmountsPath returns the instance's shared mounts path.
-func (vm *qemu) ShmountsPath() string {
-	name := project.Instance(vm.Project(), vm.Name())
+func (d *qemu) ShmountsPath() string {
+	name := project.Instance(d.Project(), d.Name())
 	return shared.VarPath("shmounts", name)
 }
 
 // LogPath returns the instance's log path.
-func (vm *qemu) LogPath() string {
-	name := project.Instance(vm.Project(), vm.Name())
+func (d *qemu) LogPath() string {
+	name := project.Instance(d.Project(), d.Name())
 	return shared.LogPath(name)
 }
 
 // EarlyLogFilePath returns the instance's early log path.
-func (vm *qemu) EarlyLogFilePath() string {
-	return filepath.Join(vm.LogPath(), "qemu.early.log")
+func (d *qemu) EarlyLogFilePath() string {
+	return filepath.Join(d.LogPath(), "qemu.early.log")
 }
 
 // LogFilePath returns the instance's log path.
-func (vm *qemu) LogFilePath() string {
-	return filepath.Join(vm.LogPath(), "qemu.log")
+func (d *qemu) LogFilePath() string {
+	return filepath.Join(d.LogPath(), "qemu.log")
 }
 
 // ConsoleBufferLogPath returns the instance's console buffer log path.
-func (vm *qemu) ConsoleBufferLogPath() string {
-	return filepath.Join(vm.LogPath(), "console.log")
+func (d *qemu) ConsoleBufferLogPath() string {
+	return filepath.Join(d.LogPath(), "console.log")
 }
 
 // RootfsPath returns the instance's rootfs path.
-func (vm *qemu) RootfsPath() string {
-	return filepath.Join(vm.Path(), "rootfs")
+func (d *qemu) RootfsPath() string {
+	return filepath.Join(d.Path(), "rootfs")
 }
 
 // TemplatesPath returns the instance's templates path.
-func (vm *qemu) TemplatesPath() string {
-	return filepath.Join(vm.Path(), "templates")
+func (d *qemu) TemplatesPath() string {
+	return filepath.Join(d.Path(), "templates")
 }
 
 // StatePath returns the instance's state path.
-func (vm *qemu) StatePath() string {
-	return filepath.Join(vm.Path(), "state")
+func (d *qemu) StatePath() string {
+	return filepath.Join(d.Path(), "state")
 }
 
 // StoragePool returns the name of the instance's storage pool.
-func (vm *qemu) StoragePool() (string, error) {
-	poolName, err := vm.state.Cluster.GetInstancePool(vm.Project(), vm.Name())
+func (d *qemu) StoragePool() (string, error) {
+	poolName, err := d.state.Cluster.GetInstancePool(d.Project(), d.Name())
 	if err != nil {
 		return "", err
 	}
@@ -4691,13 +4691,13 @@ func (vm *qemu) StoragePool() (string, error) {
 }
 
 // SetOperation sets the current operation.
-func (vm *qemu) SetOperation(op *operations.Operation) {
-	vm.op = op
+func (d *qemu) SetOperation(op *operations.Operation) {
+	d.op = op
 }
 
 // DeferTemplateApply not used currently.
-func (vm *qemu) DeferTemplateApply(trigger string) error {
-	err := vm.VolatileSet(map[string]string{"volatile.apply_template": trigger})
+func (d *qemu) DeferTemplateApply(trigger string) error {
+	err := d.VolatileSet(map[string]string{"volatile.apply_template": trigger})
 	if err != nil {
 		return errors.Wrap(err, "Failed to set apply_template volatile key")
 	}
@@ -4707,17 +4707,17 @@ func (vm *qemu) DeferTemplateApply(trigger string) error {
 
 // FillNetworkDevice takes a nic or infiniband device type and enriches it with automatically
 // generated name and hwaddr properties if these are missing from the device.
-func (vm *qemu) FillNetworkDevice(name string, m deviceConfig.Device) (deviceConfig.Device, error) {
+func (d *qemu) FillNetworkDevice(name string, m deviceConfig.Device) (deviceConfig.Device, error) {
 	var err error
 
 	newDevice := m.Clone()
 	updateKey := func(key string, value string) error {
-		tx, err := vm.state.Cluster.Begin()
+		tx, err := d.state.Cluster.Begin()
 		if err != nil {
 			return err
 		}
 
-		err = db.CreateInstanceConfig(tx, vm.id, map[string]string{key: value})
+		err = db.CreateInstanceConfig(tx, d.id, map[string]string{key: value})
 		if err != nil {
 			tx.Rollback()
 			return err
@@ -4731,7 +4731,7 @@ func (vm *qemu) FillNetworkDevice(name string, m deviceConfig.Device) (deviceCon
 		return nil
 	}
 
-	nicType, err := nictype.NICType(vm.state, vm.Project(), m)
+	nicType, err := nictype.NICType(d.state, d.Project(), m)
 	if err != nil {
 		return nil, err
 	}
@@ -4739,7 +4739,7 @@ func (vm *qemu) FillNetworkDevice(name string, m deviceConfig.Device) (deviceCon
 	// Fill in the MAC address
 	if !shared.StringInSlice(nicType, []string{"physical", "ipvlan", "sriov"}) && m["hwaddr"] == "" {
 		configKey := fmt.Sprintf("volatile.%s.hwaddr", name)
-		volatileHwaddr := vm.localConfig[configKey]
+		volatileHwaddr := d.localConfig[configKey]
 		if volatileHwaddr == "" {
 			// Generate a new MAC address
 			volatileHwaddr, err = instance.DeviceNextInterfaceHWAddr()
@@ -4752,18 +4752,18 @@ func (vm *qemu) FillNetworkDevice(name string, m deviceConfig.Device) (deviceCon
 				err := updateKey(configKey, volatileHwaddr)
 				if err != nil {
 					// Check if something else filled it in behind our back
-					value, err1 := vm.state.Cluster.GetInstanceConfig(vm.id, configKey)
+					value, err1 := d.state.Cluster.GetInstanceConfig(d.id, configKey)
 					if err1 != nil || value == "" {
 						return err
 					}
 
-					vm.localConfig[configKey] = value
-					vm.expandedConfig[configKey] = value
+					d.localConfig[configKey] = value
+					d.expandedConfig[configKey] = value
 					return nil
 				}
 
-				vm.localConfig[configKey] = volatileHwaddr
-				vm.expandedConfig[configKey] = volatileHwaddr
+				d.localConfig[configKey] = volatileHwaddr
+				d.expandedConfig[configKey] = volatileHwaddr
 				return nil
 			})
 			if err != nil {
@@ -4777,7 +4777,7 @@ func (vm *qemu) FillNetworkDevice(name string, m deviceConfig.Device) (deviceCon
 }
 
 // Internal MAAS handling.
-func (vm *qemu) maasInterfaces(devices map[string]map[string]string) ([]maas.ContainerInterface, error) {
+func (d *qemu) maasInterfaces(devices map[string]map[string]string) ([]maas.ContainerInterface, error) {
 	interfaces := []maas.ContainerInterface{}
 	for k, m := range devices {
 		if m["type"] != "nic" {
@@ -4788,7 +4788,7 @@ func (vm *qemu) maasInterfaces(devices map[string]map[string]string) ([]maas.Con
 			continue
 		}
 
-		m, err := vm.FillNetworkDevice(k, m)
+		m, err := d.FillNetworkDevice(k, m)
 		if err != nil {
 			return nil, err
 		}
@@ -4827,8 +4827,8 @@ func (vm *qemu) maasInterfaces(devices map[string]map[string]string) ([]maas.Con
 	return interfaces, nil
 }
 
-func (vm *qemu) maasRename(newName string) error {
-	maasURL, err := cluster.ConfigGetString(vm.state.Cluster, "maas.api.url")
+func (d *qemu) maasRename(newName string) error {
+	maasURL, err := cluster.ConfigGetString(d.state.Cluster, "maas.api.url")
 	if err != nil {
 		return err
 	}
@@ -4837,7 +4837,7 @@ func (vm *qemu) maasRename(newName string) error {
 		return nil
 	}
 
-	interfaces, err := vm.maasInterfaces(vm.expandedDevices.CloneNative())
+	interfaces, err := d.maasInterfaces(d.expandedDevices.CloneNative())
 	if err != nil {
 		return err
 	}
@@ -4846,24 +4846,24 @@ func (vm *qemu) maasRename(newName string) error {
 		return nil
 	}
 
-	if vm.state.MAAS == nil {
+	if d.state.MAAS == nil {
 		return fmt.Errorf("Can't perform the operation because MAAS is currently unavailable")
 	}
 
-	exists, err := vm.state.MAAS.DefinedContainer(vm)
+	exists, err := d.state.MAAS.DefinedContainer(d)
 	if err != nil {
 		return err
 	}
 
 	if !exists {
-		return vm.maasUpdate(nil)
+		return d.maasUpdate(nil)
 	}
 
-	return vm.state.MAAS.RenameContainer(vm, newName)
+	return d.state.MAAS.RenameContainer(d, newName)
 }
 
-func (vm *qemu) maasDelete() error {
-	maasURL, err := cluster.ConfigGetString(vm.state.Cluster, "maas.api.url")
+func (d *qemu) maasDelete() error {
+	maasURL, err := cluster.ConfigGetString(d.state.Cluster, "maas.api.url")
 	if err != nil {
 		return err
 	}
@@ -4872,7 +4872,7 @@ func (vm *qemu) maasDelete() error {
 		return nil
 	}
 
-	interfaces, err := vm.maasInterfaces(vm.expandedDevices.CloneNative())
+	interfaces, err := d.maasInterfaces(d.expandedDevices.CloneNative())
 	if err != nil {
 		return err
 	}
@@ -4881,11 +4881,11 @@ func (vm *qemu) maasDelete() error {
 		return nil
 	}
 
-	if vm.state.MAAS == nil {
+	if d.state.MAAS == nil {
 		return fmt.Errorf("Can't perform the operation because MAAS is currently unavailable")
 	}
 
-	exists, err := vm.state.MAAS.DefinedContainer(vm)
+	exists, err := d.state.MAAS.DefinedContainer(d)
 	if err != nil {
 		return err
 	}
@@ -4894,12 +4894,12 @@ func (vm *qemu) maasDelete() error {
 		return nil
 	}
 
-	return vm.state.MAAS.DeleteContainer(vm)
+	return d.state.MAAS.DeleteContainer(d)
 }
 
-func (vm *qemu) maasUpdate(oldDevices map[string]map[string]string) error {
+func (d *qemu) maasUpdate(oldDevices map[string]map[string]string) error {
 	// Check if MAAS is configured
-	maasURL, err := cluster.ConfigGetString(vm.state.Cluster, "maas.api.url")
+	maasURL, err := cluster.ConfigGetString(d.state.Cluster, "maas.api.url")
 	if err != nil {
 		return err
 	}
@@ -4909,14 +4909,14 @@ func (vm *qemu) maasUpdate(oldDevices map[string]map[string]string) error {
 	}
 
 	// Check if there's something that uses MAAS
-	interfaces, err := vm.maasInterfaces(vm.expandedDevices.CloneNative())
+	interfaces, err := d.maasInterfaces(d.expandedDevices.CloneNative())
 	if err != nil {
 		return err
 	}
 
 	var oldInterfaces []maas.ContainerInterface
 	if oldDevices != nil {
-		oldInterfaces, err = vm.maasInterfaces(oldDevices)
+		oldInterfaces, err = d.maasInterfaces(oldDevices)
 		if err != nil {
 			return err
 		}
@@ -4927,39 +4927,39 @@ func (vm *qemu) maasUpdate(oldDevices map[string]map[string]string) error {
 	}
 
 	// See if we're connected to MAAS
-	if vm.state.MAAS == nil {
+	if d.state.MAAS == nil {
 		return fmt.Errorf("Can't perform the operation because MAAS is currently unavailable")
 	}
 
-	exists, err := vm.state.MAAS.DefinedContainer(vm)
+	exists, err := d.state.MAAS.DefinedContainer(d)
 	if err != nil {
 		return err
 	}
 
 	if exists {
 		if len(interfaces) == 0 && len(oldInterfaces) > 0 {
-			return vm.state.MAAS.DeleteContainer(vm)
+			return d.state.MAAS.DeleteContainer(d)
 		}
 
-		return vm.state.MAAS.UpdateContainer(vm, interfaces)
+		return d.state.MAAS.UpdateContainer(d, interfaces)
 	}
 
-	return vm.state.MAAS.CreateContainer(vm, interfaces)
+	return d.state.MAAS.CreateContainer(d, interfaces)
 }
 
 // UpdateBackupFile writes the instance's backup.yaml file to storage.
-func (vm *qemu) UpdateBackupFile() error {
-	pool, err := vm.getStoragePool()
+func (d *qemu) UpdateBackupFile() error {
+	pool, err := d.getStoragePool()
 	if err != nil {
 		return err
 	}
 
-	return pool.UpdateInstanceBackupFile(vm, nil)
+	return pool.UpdateInstanceBackupFile(d, nil)
 }
 
 // cpuTopology takes a user cpu range and returns the number of sockets, cores and threads to configure
 // as well as a map of vcpu to threadid for pinning and a map of numa nodes to vcpus for NUMA layout.
-func (vm *qemu) cpuTopology(limit string) (int, int, int, map[uint64]uint64, map[uint64][]uint64, error) {
+func (d *qemu) cpuTopology(limit string) (int, int, int, map[uint64]uint64, map[uint64][]uint64, error) {
 	// Get CPU topology.
 	cpus, err := resources.GetCPU()
 	if err != nil {
@@ -5064,7 +5064,7 @@ func (vm *qemu) cpuTopology(limit string) (int, int, int, map[uint64]uint64, map
 		nrCores = countCores
 		nrThreads = countThreads
 	} else {
-		logger.Warnf("Instance '%s' uses a CPU pinning profile which doesn't match hardware layout", project.Instance(vm.Project(), vm.Name()))
+		logger.Warnf("Instance '%s' uses a CPU pinning profile which doesn't match hardware layout", project.Instance(d.Project(), d.Name()))
 
 		// Fallback on pretending everything are cores.
 		nrSockets = 1
@@ -5075,34 +5075,34 @@ func (vm *qemu) cpuTopology(limit string) (int, int, int, map[uint64]uint64, map
 	return nrSockets, nrCores, nrThreads, vcpus, numaNodes, nil
 }
 
-func (vm *qemu) expandConfig(profiles []api.Profile) error {
-	if profiles == nil && len(vm.profiles) > 0 {
+func (d *qemu) expandConfig(profiles []api.Profile) error {
+	if profiles == nil && len(d.profiles) > 0 {
 		var err error
-		profiles, err = vm.state.Cluster.GetProfiles(vm.project, vm.profiles)
+		profiles, err = d.state.Cluster.GetProfiles(d.project, d.profiles)
 		if err != nil {
 			return err
 		}
 	}
 
-	vm.expandedConfig = db.ExpandInstanceConfig(vm.localConfig, profiles)
+	d.expandedConfig = db.ExpandInstanceConfig(d.localConfig, profiles)
 
 	return nil
 }
 
-func (vm *qemu) devlxdEventSend(eventType string, eventMessage interface{}) error {
+func (d *qemu) devlxdEventSend(eventType string, eventMessage interface{}) error {
 	event := shared.Jmap{}
 	event["type"] = eventType
 	event["timestamp"] = time.Now()
 	event["metadata"] = eventMessage
 
-	client, err := vm.getAgentClient()
+	client, err := d.getAgentClient()
 	if err != nil {
 		return err
 	}
 
 	agent, err := lxdClient.ConnectLXDHTTP(nil, client)
 	if err != nil {
-		logger.Errorf("Failed to connect to lxd-agent on %s: %v", vm.Name(), err)
+		logger.Errorf("Failed to connect to lxd-agent on %s: %v", d.Name(), err)
 		return fmt.Errorf("Failed to connect to lxd-agent")
 	}
 	defer agent.Disconnect()
@@ -5115,17 +5115,17 @@ func (vm *qemu) devlxdEventSend(eventType string, eventMessage interface{}) erro
 	return nil
 }
 
-func (vm *qemu) writeInstanceData() error {
+func (d *qemu) writeInstanceData() error {
 	// Only write instance-data file if security.devlxd is true.
-	if !shared.IsTrue(vm.expandedConfig["security.devlxd"]) {
+	if !shared.IsTrue(d.expandedConfig["security.devlxd"]) {
 		return nil
 	}
 
 	// Instance data for devlxd.
-	configDrivePath := filepath.Join(vm.Path(), "config")
+	configDrivePath := filepath.Join(d.Path(), "config")
 	userConfig := make(map[string]string)
 
-	for k, v := range vm.ExpandedConfig() {
+	for k, v := range d.ExpandedConfig() {
 		if !strings.HasPrefix(k, "user.") {
 			continue
 		}
@@ -5136,7 +5136,7 @@ func (vm *qemu) writeInstanceData() error {
 	out, err := json.Marshal(struct {
 		Name   string            `json:"name"`
 		Config map[string]string `json:"config,omitempty"`
-	}{vm.Name(), userConfig})
+	}{d.Name(), userConfig})
 	if err != nil {
 		return err
 	}

From 8a8ff1c560f5af674392290b13acab1e0b3e1ef4 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?St=C3=A9phane=20Graber?= <stgraber at ubuntu.com>
Date: Mon, 23 Nov 2020 15:00:18 -0500
Subject: [PATCH 5/6] lxd/instance/lxc: Rename d to dev
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Signed-off-by: Stéphane Graber <stgraber at ubuntu.com>
---
 lxd/instance/drivers/driver_lxc.go | 57 +++++++++++++++---------------
 1 file changed, 29 insertions(+), 28 deletions(-)

diff --git a/lxd/instance/drivers/driver_lxc.go b/lxd/instance/drivers/driver_lxc.go
index e6dc02c197..69df0d39f7 100644
--- a/lxd/instance/drivers/driver_lxc.go
+++ b/lxd/instance/drivers/driver_lxc.go
@@ -1322,21 +1322,21 @@ func (c *lxc) runHooks(hooks []func() error) error {
 // RegisterDevices calls the Register() function on all of the instance's devices.
 func (c *lxc) RegisterDevices() {
 	devices := c.ExpandedDevices()
-	for _, dev := range devices.Sorted() {
-		d, _, err := c.deviceLoad(dev.Name, dev.Config)
+	for _, entry := range devices.Sorted() {
+		dev, _, err := c.deviceLoad(entry.Name, entry.Config)
 		if err == device.ErrUnsupportedDevType {
 			continue
 		}
 
 		if err != nil {
-			logger.Error("Failed to load device to register", log.Ctx{"err": err, "instance": c.Name(), "device": dev.Name})
+			logger.Error("Failed to load device to register", log.Ctx{"err": err, "instance": c.Name(), "device": entry.Name})
 			continue
 		}
 
 		// Check whether device wants to register for any events.
-		err = d.Register()
+		err = dev.Register()
 		if err != nil {
-			logger.Error("Failed to register device", log.Ctx{"err": err, "instance": c.Name(), "device": dev.Name})
+			logger.Error("Failed to register device", log.Ctx{"err": err, "instance": c.Name(), "device": entry.Name})
 			continue
 		}
 	}
@@ -1366,16 +1366,16 @@ func (c *lxc) deviceLoad(deviceName string, rawConfig deviceConfig.Device) (devi
 
 // deviceAdd loads a new device and calls its Add() function.
 func (c *lxc) deviceAdd(deviceName string, rawConfig deviceConfig.Device, instanceRunning bool) error {
-	d, _, err := c.deviceLoad(deviceName, rawConfig)
+	dev, _, err := c.deviceLoad(deviceName, rawConfig)
 	if err != nil {
 		return err
 	}
 
-	if instanceRunning && !d.CanHotPlug() {
+	if instanceRunning && !dev.CanHotPlug() {
 		return fmt.Errorf("Device cannot be added when instance is running")
 	}
 
-	return d.Add()
+	return dev.Add()
 }
 
 // deviceStart loads a new device and calls its Start() function.
@@ -1383,16 +1383,16 @@ func (c *lxc) deviceStart(deviceName string, rawConfig deviceConfig.Device, inst
 	logger := logging.AddContext(logger.Log, log.Ctx{"device": deviceName, "type": rawConfig["type"], "project": c.Project(), "instance": c.Name()})
 	logger.Debug("Starting device")
 
-	d, configCopy, err := c.deviceLoad(deviceName, rawConfig)
+	dev, configCopy, err := c.deviceLoad(deviceName, rawConfig)
 	if err != nil {
 		return nil, err
 	}
 
-	if instanceRunning && !d.CanHotPlug() {
+	if instanceRunning && !dev.CanHotPlug() {
 		return nil, fmt.Errorf("Device cannot be started when instance is running")
 	}
 
-	runConf, err := d.Start()
+	runConf, err := dev.Start()
 	if err != nil {
 		return nil, err
 	}
@@ -1528,12 +1528,12 @@ func (c *lxc) deviceAttachNIC(configCopy map[string]string, netIF []deviceConfig
 
 // deviceUpdate loads a new device and calls its Update() function.
 func (c *lxc) deviceUpdate(deviceName string, rawConfig deviceConfig.Device, oldDevices deviceConfig.Devices, instanceRunning bool) error {
-	d, _, err := c.deviceLoad(deviceName, rawConfig)
+	dev, _, err := c.deviceLoad(deviceName, rawConfig)
 	if err != nil {
 		return err
 	}
 
-	err = d.Update(oldDevices, instanceRunning)
+	err = dev.Update(oldDevices, instanceRunning)
 	if err != nil {
 		return err
 	}
@@ -1548,7 +1548,7 @@ func (c *lxc) deviceStop(deviceName string, rawConfig deviceConfig.Device, insta
 	logger := logging.AddContext(logger.Log, log.Ctx{"device": deviceName, "type": rawConfig["type"], "project": c.Project(), "instance": c.Name()})
 	logger.Debug("Stopping device")
 
-	d, configCopy, err := c.deviceLoad(deviceName, rawConfig)
+	dev, configCopy, err := c.deviceLoad(deviceName, rawConfig)
 
 	// If deviceLoad fails with unsupported device type then return.
 	if err == device.ErrUnsupportedDevType {
@@ -1560,18 +1560,18 @@ func (c *lxc) deviceStop(deviceName string, rawConfig deviceConfig.Device, insta
 	// versions we still need to allow previously valid devices to be stopped.
 	if err != nil {
 		// If there is no device returned, then we cannot proceed, so return as error.
-		if d == nil {
+		if dev == nil {
 			return fmt.Errorf("Device stop validation failed for %q: %v", deviceName, err)
 		}
 
 		logger.Error("Device stop validation failed for", log.Ctx{"err": err})
 	}
 
-	if instanceRunning && !d.CanHotPlug() {
+	if instanceRunning && !dev.CanHotPlug() {
 		return fmt.Errorf("Device cannot be stopped when instance is running")
 	}
 
-	runConf, err := d.Stop()
+	runConf, err := dev.Stop()
 	if err != nil {
 		return err
 	}
@@ -1730,7 +1730,7 @@ func (c *lxc) deviceHandleMounts(mounts []deviceConfig.MountEntryItem) error {
 func (c *lxc) deviceRemove(deviceName string, rawConfig deviceConfig.Device, instanceRunning bool) error {
 	logger := logging.AddContext(logger.Log, log.Ctx{"device": deviceName, "type": rawConfig["type"], "project": c.Project(), "instance": c.Name()})
 
-	d, _, err := c.deviceLoad(deviceName, rawConfig)
+	dev, _, err := c.deviceLoad(deviceName, rawConfig)
 
 	// If deviceLoad fails with unsupported device type then return.
 	if err == device.ErrUnsupportedDevType {
@@ -1742,18 +1742,18 @@ func (c *lxc) deviceRemove(deviceName string, rawConfig deviceConfig.Device, ins
 	// versions we still need to allow previously valid devices to be stopped.
 	if err != nil {
 		// If there is no device returned, then we cannot proceed, so return as error.
-		if d == nil {
+		if dev == nil {
 			return fmt.Errorf("Device remove validation failed for %q: %v", deviceName, err)
 		}
 
 		logger.Error("Device remove validation failed for", log.Ctx{"err": err})
 	}
 
-	if instanceRunning && !d.CanHotPlug() {
+	if instanceRunning && !dev.CanHotPlug() {
 		return fmt.Errorf("Device cannot be removed when instance is running")
 	}
 
-	return d.Remove()
+	return dev.Remove()
 }
 
 // deviceVolatileGetFunc returns a function that retrieves a named device's volatile config and
@@ -2097,8 +2097,8 @@ func (c *lxc) startCommon() (string, []func() error, error) {
 	nicID := -1
 
 	// Setup devices in sorted order, this ensures that device mounts are added in path order.
-	for _, d := range c.expandedDevices.Sorted() {
-		dev := d // Ensure device variable has local scope for revert.
+	for _, entry := range c.expandedDevices.Sorted() {
+		dev := entry // Ensure device variable has local scope for revert.
 
 		// Start the device.
 		runConf, err := c.deviceStart(dev.Name, dev.Config, false)
@@ -4009,12 +4009,12 @@ func (c *lxc) Update(args db.InstanceArgs, userRequested bool) error {
 			return []string{} // Device types aren't the same, so this cannot be an update.
 		}
 
-		d, err := device.New(c, c.state, "", newDevice, nil, nil)
+		dev, err := device.New(c, c.state, "", newDevice, nil, nil)
 		if err != nil {
 			return []string{} // Couldn't create Device, so this cannot be an update.
 		}
 
-		return d.UpdatableFields()
+		return dev.UpdatableFields()
 	})
 
 	if userRequested {
@@ -6201,12 +6201,13 @@ func (c *lxc) InsertSeccompUnixDevice(prefix string, m deviceConfig.Device, pid
 		return err
 	}
 
-	d, err := device.UnixDeviceCreate(c.state, idmapSet, c.DevicesPath(), prefix, m, true)
+	dev, err := device.UnixDeviceCreate(c.state, idmapSet, c.DevicesPath(), prefix, m, true)
 	if err != nil {
 		return fmt.Errorf("Failed to setup device: %s", err)
 	}
-	devPath := d.HostPath
-	tgtPath := d.RelativePath
+
+	devPath := dev.HostPath
+	tgtPath := dev.RelativePath
 
 	// Bind-mount it into the container
 	defer os.Remove(devPath)

From 288e42e58b82e08d4a6432f4f10ff1febdc28fa2 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?St=C3=A9phane=20Graber?= <stgraber at ubuntu.com>
Date: Mon, 23 Nov 2020 16:26:02 -0500
Subject: [PATCH 6/6] lxd/instance/lxc: Replace c with d
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Signed-off-by: Stéphane Graber <stgraber at ubuntu.com>
---
 lxd/instance/drivers/driver_lxc.go | 2224 ++++++++++++++--------------
 1 file changed, 1112 insertions(+), 1112 deletions(-)

diff --git a/lxd/instance/drivers/driver_lxc.go b/lxd/instance/drivers/driver_lxc.go
index 69df0d39f7..86ca3474ce 100644
--- a/lxd/instance/drivers/driver_lxc.go
+++ b/lxd/instance/drivers/driver_lxc.go
@@ -107,8 +107,8 @@ func lxcSetConfigItem(c *liblxc.Container, key string, value string) error {
 			key = "lxc.init_uid"
 		case "lxc.init.gid":
 			key = "lxc.init_gid"
-		case "lxc.idmap":
-			key = "lxc.id_map"
+		case "lxd.idmap":
+			key = "lxd.id_map"
 		}
 	}
 
@@ -143,7 +143,7 @@ func lxcStatusCode(state liblxc.State) api.StatusCode {
 // Loader functions
 func lxcCreate(s *state.State, args db.InstanceArgs) (instance.Instance, error) {
 	// Create the container struct
-	c := &lxc{
+	d := &lxc{
 		common: common{
 			id:           args.ID,
 			dbType:       args.Type,
@@ -168,60 +168,60 @@ func lxcCreate(s *state.State, args db.InstanceArgs) (instance.Instance, error)
 	revert := revert.New()
 	defer revert.Fail()
 
-	// Use c.Delete() in revert on error as this function doesn't just create DB records, it can also cause
+	// Use d.Delete() in revert on error as this function doesn't just create DB records, it can also cause
 	// other modifications to the host when devices are added.
-	revert.Add(func() { c.Delete() })
+	revert.Add(func() { d.Delete() })
 
 	// Cleanup the zero values
-	if c.expiryDate.IsZero() {
-		c.expiryDate = time.Time{}
+	if d.expiryDate.IsZero() {
+		d.expiryDate = time.Time{}
 	}
 
-	if c.creationDate.IsZero() {
-		c.creationDate = time.Time{}
+	if d.creationDate.IsZero() {
+		d.creationDate = time.Time{}
 	}
 
-	if c.lastUsedDate.IsZero() {
-		c.lastUsedDate = time.Time{}
+	if d.lastUsedDate.IsZero() {
+		d.lastUsedDate = time.Time{}
 	}
 
 	ctxMap := log.Ctx{
 		"project":   args.Project,
-		"name":      c.name,
-		"ephemeral": c.ephemeral,
+		"name":      d.name,
+		"ephemeral": d.ephemeral,
 	}
 
 	logger.Info("Creating container", ctxMap)
 
 	// Load the config.
-	err := c.init()
+	err := d.init()
 	if err != nil {
 		return nil, errors.Wrap(err, "Failed to expand config")
 	}
 
 	// Validate expanded config.
-	err = instance.ValidConfig(s.OS, c.expandedConfig, false, true)
+	err = instance.ValidConfig(s.OS, d.expandedConfig, false, true)
 	if err != nil {
 		return nil, errors.Wrap(err, "Invalid config")
 	}
 
-	err = instance.ValidDevices(s, s.Cluster, c.Project(), c.Type(), c.expandedDevices, true)
+	err = instance.ValidDevices(s, s.Cluster, d.Project(), d.Type(), d.expandedDevices, true)
 	if err != nil {
 		return nil, errors.Wrap(err, "Invalid devices")
 	}
 
 	// Retrieve the container's storage pool.
 	var storageInstance instance.Instance
-	if c.IsSnapshot() {
-		parentName, _, _ := shared.InstanceGetParentAndSnapshotName(c.name)
+	if d.IsSnapshot() {
+		parentName, _, _ := shared.InstanceGetParentAndSnapshotName(d.name)
 
 		// Load the parent.
-		storageInstance, err = instance.LoadByProjectAndName(c.state, c.project, parentName)
+		storageInstance, err = instance.LoadByProjectAndName(d.state, d.project, parentName)
 		if err != nil {
 			return nil, errors.Wrap(err, "Invalid parent")
 		}
 	} else {
-		storageInstance = c
+		storageInstance = d
 	}
 
 	_, rootDiskDevice, err := shared.GetRootDiskDevice(storageInstance.ExpandedDevices().CloneNative())
@@ -234,23 +234,23 @@ func lxcCreate(s *state.State, args db.InstanceArgs) (instance.Instance, error)
 	}
 
 	// Initialize the storage pool.
-	c.storagePool, err = storagePools.GetPoolByName(c.state, rootDiskDevice["pool"])
+	d.storagePool, err = storagePools.GetPoolByName(d.state, rootDiskDevice["pool"])
 	if err != nil {
 		return nil, errors.Wrapf(err, "Failed loading storage pool")
 	}
 
 	// Fill default config.
 	volumeConfig := map[string]string{}
-	err = c.storagePool.FillInstanceConfig(c, volumeConfig)
+	err = d.storagePool.FillInstanceConfig(d, volumeConfig)
 	if err != nil {
 		return nil, errors.Wrapf(err, "Failed filling default config")
 	}
 
 	// Create a new storage volume database entry for the container's storage volume.
-	if c.IsSnapshot() {
-		_, err = s.Cluster.CreateStorageVolumeSnapshot(args.Project, args.Name, "", db.StoragePoolVolumeTypeContainer, c.storagePool.ID(), volumeConfig, time.Time{})
+	if d.IsSnapshot() {
+		_, err = s.Cluster.CreateStorageVolumeSnapshot(args.Project, args.Name, "", db.StoragePoolVolumeTypeContainer, d.storagePool.ID(), volumeConfig, time.Time{})
 	} else {
-		_, err = s.Cluster.CreateStoragePoolVolume(args.Project, args.Name, "", db.StoragePoolVolumeTypeContainer, c.storagePool.ID(), volumeConfig, db.StoragePoolVolumeContentTypeFS)
+		_, err = s.Cluster.CreateStoragePoolVolume(args.Project, args.Name, "", db.StoragePoolVolumeTypeContainer, d.storagePool.ID(), volumeConfig, db.StoragePoolVolumeContentTypeFS)
 	}
 	if err != nil {
 		return nil, errors.Wrapf(err, "Failed creating storage record")
@@ -259,14 +259,14 @@ func lxcCreate(s *state.State, args db.InstanceArgs) (instance.Instance, error)
 	// Setup initial idmap config
 	var idmap *idmap.IdmapSet
 	base := int64(0)
-	if !c.IsPrivileged() {
+	if !d.IsPrivileged() {
 		idmap, base, err = findIdmap(
 			s,
 			args.Name,
-			c.expandedConfig["security.idmap.isolated"],
-			c.expandedConfig["security.idmap.base"],
-			c.expandedConfig["security.idmap.size"],
-			c.expandedConfig["raw.idmap"],
+			d.expandedConfig["security.idmap.isolated"],
+			d.expandedConfig["security.idmap.base"],
+			d.expandedConfig["security.idmap.size"],
+			d.expandedConfig["raw.idmap"],
 		)
 
 		if err != nil {
@@ -285,89 +285,89 @@ func lxcCreate(s *state.State, args db.InstanceArgs) (instance.Instance, error)
 		jsonIdmap = "[]"
 	}
 
-	err = c.VolatileSet(map[string]string{"volatile.idmap.next": jsonIdmap})
+	err = d.VolatileSet(map[string]string{"volatile.idmap.next": jsonIdmap})
 	if err != nil {
 		return nil, err
 	}
 
-	err = c.VolatileSet(map[string]string{"volatile.idmap.base": fmt.Sprintf("%v", base)})
+	err = d.VolatileSet(map[string]string{"volatile.idmap.base": fmt.Sprintf("%v", base)})
 	if err != nil {
 		return nil, err
 	}
 
 	// Invalid idmap cache.
-	c.idmapset = nil
+	d.idmapset = nil
 
 	// Set last_state if not currently set.
-	if c.localConfig["volatile.last_state.idmap"] == "" {
-		err = c.VolatileSet(map[string]string{"volatile.last_state.idmap": "[]"})
+	if d.localConfig["volatile.last_state.idmap"] == "" {
+		err = d.VolatileSet(map[string]string{"volatile.last_state.idmap": "[]"})
 		if err != nil {
 			return nil, err
 		}
 	}
 
 	// Re-run init to update the idmap.
-	err = c.init()
+	err = d.init()
 	if err != nil {
 		return nil, err
 	}
 
-	if !c.IsSnapshot() {
+	if !d.IsSnapshot() {
 		// Add devices to container.
-		for k, m := range c.expandedDevices {
-			err = c.deviceAdd(k, m, false)
+		for k, m := range d.expandedDevices {
+			err = d.deviceAdd(k, m, false)
 			if err != nil && err != device.ErrUnsupportedDevType {
 				return nil, errors.Wrapf(err, "Failed to add device %q", k)
 			}
 		}
 
 		// Update MAAS (must run after the MAC addresses have been generated).
-		err = c.maasUpdate(nil)
+		err = d.maasUpdate(nil)
 		if err != nil {
 			return nil, err
 		}
 	}
 
 	logger.Info("Created container", ctxMap)
-	c.state.Events.SendLifecycle(c.project, "container-created", fmt.Sprintf("/1.0/containers/%s", c.name), nil)
+	d.state.Events.SendLifecycle(d.project, "container-created", fmt.Sprintf("/1.0/containers/%s", d.name), nil)
 
 	revert.Success()
-	return c, nil
+	return d, nil
 }
 
 func lxcLoad(s *state.State, args db.InstanceArgs, profiles []api.Profile) (instance.Instance, error) {
 	// Create the container struct
-	c := lxcInstantiate(s, args, nil)
+	d := lxcInstantiate(s, args, nil)
 
 	// Setup finalizer
-	runtime.SetFinalizer(c, lxcUnload)
+	runtime.SetFinalizer(d, lxcUnload)
 
 	// Expand config and devices
-	err := c.(*lxc).expandConfig(profiles)
+	err := d.(*lxc).expandConfig(profiles)
 	if err != nil {
 		return nil, err
 	}
 
-	err = c.(*lxc).expandDevices(profiles)
+	err = d.(*lxc).expandDevices(profiles)
 	if err != nil {
 		return nil, err
 	}
 
-	return c, nil
+	return d, nil
 }
 
 // Unload is called by the garbage collector
-func lxcUnload(c *lxc) {
-	runtime.SetFinalizer(c, nil)
-	if c.c != nil {
-		c.c.Release()
-		c.c = nil
+func lxcUnload(d *lxc) {
+	runtime.SetFinalizer(d, nil)
+	if d.c != nil {
+		d.c.Release()
+		d.c = nil
 	}
 }
 
 // Create a container struct without initializing it.
 func lxcInstantiate(s *state.State, args db.InstanceArgs, expandedDevices deviceConfig.Devices) instance.Instance {
-	c := &lxc{
+	d := &lxc{
 		common: common{
 			id:           args.ID,
 			dbType:       args.Type,
@@ -390,24 +390,24 @@ func lxcInstantiate(s *state.State, args db.InstanceArgs, expandedDevices device
 	}
 
 	// Cleanup the zero values
-	if c.expiryDate.IsZero() {
-		c.expiryDate = time.Time{}
+	if d.expiryDate.IsZero() {
+		d.expiryDate = time.Time{}
 	}
 
-	if c.creationDate.IsZero() {
-		c.creationDate = time.Time{}
+	if d.creationDate.IsZero() {
+		d.creationDate = time.Time{}
 	}
 
-	if c.lastUsedDate.IsZero() {
-		c.lastUsedDate = time.Time{}
+	if d.lastUsedDate.IsZero() {
+		d.lastUsedDate = time.Time{}
 	}
 
 	// This is passed during expanded config validation.
 	if expandedDevices != nil {
-		c.expandedDevices = expandedDevices
+		d.expandedDevices = expandedDevices
 	}
 
-	return c
+	return d
 }
 
 // The LXC container driver.
@@ -447,8 +447,8 @@ type lxc struct {
 }
 
 // Type returns the instance type.
-func (c *lxc) Type() instancetype.Type {
-	return c.dbType
+func (d *lxc) Type() instancetype.Type {
+	return d.dbType
 }
 
 func idmapSize(state *state.State, isolatedStr string, size string) (int64, error) {
@@ -635,14 +635,14 @@ func findIdmap(state *state.State, cName string, isolatedStr string, configBase
 	return nil, 0, fmt.Errorf("Not enough uid/gid available for the container")
 }
 
-func (c *lxc) init() error {
+func (d *lxc) init() error {
 	// Compute the expanded config and device list
-	err := c.expandConfig(nil)
+	err := d.expandConfig(nil)
 	if err != nil {
 		return err
 	}
 
-	err = c.expandDevices(nil)
+	err = d.expandDevices(nil)
 	if err != nil {
 		return err
 	}
@@ -650,33 +650,33 @@ func (c *lxc) init() error {
 	return nil
 }
 
-func (c *lxc) initLXC(config bool) error {
+func (d *lxc) initLXC(config bool) error {
 	// No need to go through all that for snapshots
-	if c.IsSnapshot() {
+	if d.IsSnapshot() {
 		return nil
 	}
 
 	// Check if being called from a hook
-	if c.fromHook {
+	if d.fromHook {
 		return fmt.Errorf("You can't use go-lxc from inside a LXC hook")
 	}
 
 	// Check if already initialized
-	if c.c != nil {
-		if !config || c.cConfig {
+	if d.c != nil {
+		if !config || d.cConfig {
 			return nil
 		}
 	}
 
 	// Load the go-lxc struct
-	cname := project.Instance(c.Project(), c.Name())
-	cc, err := liblxc.NewContainer(cname, c.state.OS.LxcPath)
+	cname := project.Instance(d.Project(), d.Name())
+	cc, err := liblxc.NewContainer(cname, d.state.OS.LxcPath)
 	if err != nil {
 		return err
 	}
 
 	// Load cgroup abstraction
-	cg, err := c.cgroup(cc)
+	cg, err := d.cgroup(cc)
 	if err != nil {
 		return err
 	}
@@ -689,7 +689,7 @@ func (c *lxc) initLXC(config bool) error {
 	}()
 
 	// Setup logging
-	logfile := c.LogFilePath()
+	logfile := d.LogFilePath()
 	err = lxcSetConfigItem(cc, "lxc.log.file", logfile)
 	if err != nil {
 		return err
@@ -721,7 +721,7 @@ func (c *lxc) initLXC(config bool) error {
 
 		// File to dump ringbuffer contents to when requested or
 		// container shutdown.
-		consoleBufferLogFile := c.ConsoleBufferLogPath()
+		consoleBufferLogFile := d.ConsoleBufferLogPath()
 		err = lxcSetConfigItem(cc, "lxc.console.logfile", consoleBufferLogFile)
 		if err != nil {
 			return err
@@ -729,21 +729,21 @@ func (c *lxc) initLXC(config bool) error {
 	}
 
 	// Allow for lightweight init
-	c.cConfig = config
+	d.cConfig = config
 	if !config {
-		if c.c != nil {
-			c.c.Release()
+		if d.c != nil {
+			d.c.Release()
 		}
 
-		c.c = cc
+		d.c = cc
 		freeContainer = false
 		return nil
 	}
 
-	if c.IsPrivileged() {
+	if d.IsPrivileged() {
 		// Base config
 		toDrop := "sys_time sys_module sys_rawio"
-		if !c.state.OS.AppArmorStacking || c.state.OS.AppArmorStacked {
+		if !d.state.OS.AppArmorStacking || d.state.OS.AppArmorStacked {
 			toDrop = toDrop + " mac_admin mac_override"
 		}
 
@@ -755,7 +755,7 @@ func (c *lxc) initLXC(config bool) error {
 
 	// Set an appropriate /proc, /sys/ and /sys/fs/cgroup
 	mounts := []string{}
-	if c.IsPrivileged() && !c.state.OS.RunningInUserNS {
+	if d.IsPrivileged() && !d.state.OS.RunningInUserNS {
 		mounts = append(mounts, "proc:mixed")
 		mounts = append(mounts, "sys:mixed")
 	} else {
@@ -802,7 +802,7 @@ func (c *lxc) initLXC(config bool) error {
 		"/sys/kernel/tracing",
 	}
 
-	if c.IsPrivileged() && !c.state.OS.RunningInUserNS {
+	if d.IsPrivileged() && !d.state.OS.RunningInUserNS {
 		err = lxcSetConfigItem(cc, "lxc.mount.entry", "mqueue dev/mqueue mqueue rw,relatime,create=dir,optional 0 0")
 		if err != nil {
 			return err
@@ -843,7 +843,7 @@ func (c *lxc) initLXC(config bool) error {
 	}
 
 	// Configure devices cgroup
-	if c.IsPrivileged() && !c.state.OS.RunningInUserNS && c.state.OS.CGInfo.Supports(cgroup.Devices, cg) {
+	if d.IsPrivileged() && !d.state.OS.RunningInUserNS && d.state.OS.CGInfo.Supports(cgroup.Devices, cg) {
 		err = lxcSetConfigItem(cc, "lxc.cgroup.devices.deny", "a")
 		if err != nil {
 			return err
@@ -873,7 +873,7 @@ func (c *lxc) initLXC(config bool) error {
 		}
 	}
 
-	if c.IsNesting() {
+	if d.IsNesting() {
 		/*
 		 * mount extra /proc and /sys to work around kernel
 		 * restrictions on remounting them when covered
@@ -890,9 +890,9 @@ func (c *lxc) initLXC(config bool) error {
 	}
 
 	// Setup architecture
-	personality, err := osarch.ArchitecturePersonality(c.architecture)
+	personality, err := osarch.ArchitecturePersonality(d.architecture)
 	if err != nil {
-		personality, err = osarch.ArchitecturePersonality(c.state.OS.Architectures[0])
+		personality, err = osarch.ArchitecturePersonality(d.state.OS.Architectures[0])
 		if err != nil {
 			return err
 		}
@@ -910,19 +910,19 @@ func (c *lxc) initLXC(config bool) error {
 	}
 
 	// Call the onstart hook on start.
-	err = lxcSetConfigItem(cc, "lxc.hook.pre-start", fmt.Sprintf("/proc/%d/exe callhook %s %s %s start", os.Getpid(), shared.VarPath(""), strconv.Quote(c.Project()), strconv.Quote(c.Name())))
+	err = lxcSetConfigItem(cc, "lxc.hook.pre-start", fmt.Sprintf("/proc/%d/exe callhook %s %s %s start", os.Getpid(), shared.VarPath(""), strconv.Quote(d.Project()), strconv.Quote(d.Name())))
 	if err != nil {
 		return err
 	}
 
 	// Call the onstopns hook on stop but before namespaces are unmounted.
-	err = lxcSetConfigItem(cc, "lxc.hook.stop", fmt.Sprintf("%s callhook %s %s %s stopns", c.state.OS.ExecPath, shared.VarPath(""), strconv.Quote(c.Project()), strconv.Quote(c.Name())))
+	err = lxcSetConfigItem(cc, "lxc.hook.stop", fmt.Sprintf("%s callhook %s %s %s stopns", d.state.OS.ExecPath, shared.VarPath(""), strconv.Quote(d.Project()), strconv.Quote(d.Name())))
 	if err != nil {
 		return err
 	}
 
 	// Call the onstop hook on stop.
-	err = lxcSetConfigItem(cc, "lxc.hook.post-stop", fmt.Sprintf("%s callhook %s %s %s stop", c.state.OS.ExecPath, shared.VarPath(""), strconv.Quote(c.Project()), strconv.Quote(c.Name())))
+	err = lxcSetConfigItem(cc, "lxc.hook.post-stop", fmt.Sprintf("%s callhook %s %s %s stop", d.state.OS.ExecPath, shared.VarPath(""), strconv.Quote(d.Project()), strconv.Quote(d.Name())))
 	if err != nil {
 		return err
 	}
@@ -934,13 +934,13 @@ func (c *lxc) initLXC(config bool) error {
 	}
 
 	// Setup the hostname
-	err = lxcSetConfigItem(cc, "lxc.uts.name", c.Name())
+	err = lxcSetConfigItem(cc, "lxc.uts.name", d.Name())
 	if err != nil {
 		return err
 	}
 
 	// Setup devlxd
-	if c.expandedConfig["security.devlxd"] == "" || shared.IsTrue(c.expandedConfig["security.devlxd"]) {
+	if d.expandedConfig["security.devlxd"] == "" || shared.IsTrue(d.expandedConfig["security.devlxd"]) {
 		err = lxcSetConfigItem(cc, "lxc.mount.entry", fmt.Sprintf("%s dev/lxd none bind,create=dir 0 0", shared.VarPath("devlxd")))
 		if err != nil {
 			return err
@@ -948,8 +948,8 @@ func (c *lxc) initLXC(config bool) error {
 	}
 
 	// Setup AppArmor
-	if c.state.OS.AppArmorAvailable {
-		if c.state.OS.AppArmorConfined || !c.state.OS.AppArmorAdmin {
+	if d.state.OS.AppArmorAvailable {
+		if d.state.OS.AppArmorConfined || !d.state.OS.AppArmorAdmin {
 			// If confined but otherwise able to use AppArmor, use our own profile
 			curProfile := util.AppArmorProfile()
 			curProfile = strings.TrimSuffix(curProfile, " (enforce)")
@@ -959,7 +959,7 @@ func (c *lxc) initLXC(config bool) error {
 			}
 		} else {
 			// If not currently confined, use the container's profile
-			profile := apparmor.InstanceProfileName(c)
+			profile := apparmor.InstanceProfileName(d)
 
 			/* In the nesting case, we want to enable the inside
 			 * LXD to load its profile. Unprivileged containers can
@@ -968,8 +968,8 @@ func (c *lxc) initLXC(config bool) error {
 			 * the old way of nesting, i.e. using the parent's
 			 * profile.
 			 */
-			if c.state.OS.AppArmorStacking && !c.state.OS.AppArmorStacked {
-				profile = fmt.Sprintf("%s//&:%s:", profile, apparmor.InstanceNamespaceName(c))
+			if d.state.OS.AppArmorStacking && !d.state.OS.AppArmorStacked {
+				profile = fmt.Sprintf("%s//&:%s:", profile, apparmor.InstanceNamespaceName(d))
 			}
 
 			err := lxcSetConfigItem(cc, "lxc.apparmor.profile", profile)
@@ -980,15 +980,15 @@ func (c *lxc) initLXC(config bool) error {
 	}
 
 	// Setup Seccomp if necessary
-	if seccomp.InstanceNeedsPolicy(c) {
-		err = lxcSetConfigItem(cc, "lxc.seccomp.profile", seccomp.ProfilePath(c))
+	if seccomp.InstanceNeedsPolicy(d) {
+		err = lxcSetConfigItem(cc, "lxc.seccomp.profile", seccomp.ProfilePath(d))
 		if err != nil {
 			return err
 		}
 
 		// Setup notification socket
 		// System requirement errors are handled during policy generation instead of here
-		ok, err := seccomp.InstanceNeedsIntercept(c.state, c)
+		ok, err := seccomp.InstanceNeedsIntercept(d.state, d)
 		if err == nil && ok {
 			err = lxcSetConfigItem(cc, "lxc.seccomp.notify.proxy", fmt.Sprintf("unix:%s", shared.VarPath("seccomp.socket")))
 			if err != nil {
@@ -998,7 +998,7 @@ func (c *lxc) initLXC(config bool) error {
 	}
 
 	// Setup idmap
-	idmapset, err := c.NextIdmap()
+	idmapset, err := d.NextIdmap()
 	if err != nil {
 		return err
 	}
@@ -1006,7 +1006,7 @@ func (c *lxc) initLXC(config bool) error {
 	if idmapset != nil {
 		lines := idmapset.ToLxcString()
 		for _, line := range lines {
-			err := lxcSetConfigItem(cc, "lxc.idmap", line)
+			err := lxcSetConfigItem(cc, "lxd.idmap", line)
 			if err != nil {
 				return err
 			}
@@ -1014,7 +1014,7 @@ func (c *lxc) initLXC(config bool) error {
 	}
 
 	// Setup environment
-	for k, v := range c.expandedConfig {
+	for k, v := range d.expandedConfig {
 		if strings.HasPrefix(k, "environment.") {
 			err = lxcSetConfigItem(cc, "lxc.environment", fmt.Sprintf("%s=%s", strings.TrimPrefix(k, "environment."), v))
 			if err != nil {
@@ -1024,7 +1024,7 @@ func (c *lxc) initLXC(config bool) error {
 	}
 
 	// Setup NVIDIA runtime
-	if shared.IsTrue(c.expandedConfig["nvidia.runtime"]) {
+	if shared.IsTrue(d.expandedConfig["nvidia.runtime"]) {
 		hookDir := os.Getenv("LXD_LXC_HOOK")
 		if hookDir == "" {
 			hookDir = "/usr/share/lxc/hooks"
@@ -1045,7 +1045,7 @@ func (c *lxc) initLXC(config bool) error {
 			return err
 		}
 
-		nvidiaDriver := c.expandedConfig["nvidia.driver.capabilities"]
+		nvidiaDriver := d.expandedConfig["nvidia.driver.capabilities"]
 		if nvidiaDriver == "" {
 			err = lxcSetConfigItem(cc, "lxc.environment", "NVIDIA_DRIVER_CAPABILITIES=compute,utility")
 			if err != nil {
@@ -1058,7 +1058,7 @@ func (c *lxc) initLXC(config bool) error {
 			}
 		}
 
-		nvidiaRequireCuda := c.expandedConfig["nvidia.require.cuda"]
+		nvidiaRequireCuda := d.expandedConfig["nvidia.require.cuda"]
 		if nvidiaRequireCuda == "" {
 			err = lxcSetConfigItem(cc, "lxc.environment", fmt.Sprintf("NVIDIA_REQUIRE_CUDA=%s", nvidiaRequireCuda))
 			if err != nil {
@@ -1066,7 +1066,7 @@ func (c *lxc) initLXC(config bool) error {
 			}
 		}
 
-		nvidiaRequireDriver := c.expandedConfig["nvidia.require.driver"]
+		nvidiaRequireDriver := d.expandedConfig["nvidia.require.driver"]
 		if nvidiaRequireDriver == "" {
 			err = lxcSetConfigItem(cc, "lxc.environment", fmt.Sprintf("NVIDIA_REQUIRE_DRIVER=%s", nvidiaRequireDriver))
 			if err != nil {
@@ -1081,11 +1081,11 @@ func (c *lxc) initLXC(config bool) error {
 	}
 
 	// Memory limits
-	if c.state.OS.CGInfo.Supports(cgroup.Memory, cg) {
-		memory := c.expandedConfig["limits.memory"]
-		memoryEnforce := c.expandedConfig["limits.memory.enforce"]
-		memorySwap := c.expandedConfig["limits.memory.swap"]
-		memorySwapPriority := c.expandedConfig["limits.memory.swap.priority"]
+	if d.state.OS.CGInfo.Supports(cgroup.Memory, cg) {
+		memory := d.expandedConfig["limits.memory"]
+		memoryEnforce := d.expandedConfig["limits.memory.enforce"]
+		memorySwap := d.expandedConfig["limits.memory.swap"]
+		memorySwapPriority := d.expandedConfig["limits.memory.swap.priority"]
 
 		// Configure the memory limits
 		if memory != "" {
@@ -1115,7 +1115,7 @@ func (c *lxc) initLXC(config bool) error {
 					return err
 				}
 			} else {
-				if c.state.OS.CGInfo.Supports(cgroup.MemorySwap, cg) && (memorySwap == "" || shared.IsTrue(memorySwap)) {
+				if d.state.OS.CGInfo.Supports(cgroup.MemorySwap, cg) && (memorySwap == "" || shared.IsTrue(memorySwap)) {
 					err = cg.SetMemoryLimit(valueInt)
 					if err != nil {
 						return err
@@ -1140,7 +1140,7 @@ func (c *lxc) initLXC(config bool) error {
 			}
 		}
 
-		if c.state.OS.CGInfo.Supports(cgroup.MemorySwappiness, cg) {
+		if d.state.OS.CGInfo.Supports(cgroup.MemorySwappiness, cg) {
 			// Configure the swappiness
 			if memorySwap != "" && !shared.IsTrue(memorySwap) {
 				err = cg.SetMemorySwappiness(0)
@@ -1162,10 +1162,10 @@ func (c *lxc) initLXC(config bool) error {
 	}
 
 	// CPU limits
-	cpuPriority := c.expandedConfig["limits.cpu.priority"]
-	cpuAllowance := c.expandedConfig["limits.cpu.allowance"]
+	cpuPriority := d.expandedConfig["limits.cpu.priority"]
+	cpuAllowance := d.expandedConfig["limits.cpu.allowance"]
 
-	if (cpuPriority != "" || cpuAllowance != "") && c.state.OS.CGInfo.Supports(cgroup.CPU, cg) {
+	if (cpuPriority != "" || cpuAllowance != "") && d.state.OS.CGInfo.Supports(cgroup.CPU, cg) {
 		cpuShares, cpuCfsQuota, cpuCfsPeriod, err := cgroup.ParseCPU(cpuAllowance, cpuPriority)
 		if err != nil {
 			return err
@@ -1187,9 +1187,9 @@ func (c *lxc) initLXC(config bool) error {
 	}
 
 	// Disk priority limits.
-	diskPriority := c.ExpandedConfig()["limits.disk.priority"]
+	diskPriority := d.ExpandedConfig()["limits.disk.priority"]
 	if diskPriority != "" {
-		if c.state.OS.CGInfo.Supports(cgroup.BlkioWeight, nil) {
+		if d.state.OS.CGInfo.Supports(cgroup.BlkioWeight, nil) {
 			priorityInt, err := strconv.Atoi(diskPriority)
 			if err != nil {
 				return err
@@ -1212,8 +1212,8 @@ func (c *lxc) initLXC(config bool) error {
 	}
 
 	// Processes
-	if c.state.OS.CGInfo.Supports(cgroup.Pids, cg) {
-		processes := c.expandedConfig["limits.processes"]
+	if d.state.OS.CGInfo.Supports(cgroup.Pids, cg) {
+		processes := d.expandedConfig["limits.processes"]
 		if processes != "" {
 			valueInt, err := strconv.ParseInt(processes, 10, 64)
 			if err != nil {
@@ -1228,9 +1228,9 @@ func (c *lxc) initLXC(config bool) error {
 	}
 
 	// Hugepages
-	if c.state.OS.CGInfo.Supports(cgroup.Hugetlb, cg) {
+	if d.state.OS.CGInfo.Supports(cgroup.Hugetlb, cg) {
 		for i, key := range shared.HugePageSizeKeys {
-			value := c.expandedConfig[key]
+			value := d.expandedConfig[key]
 			if value != "" {
 				value, err := units.ParseByteSizeString(value)
 				if err != nil {
@@ -1246,7 +1246,7 @@ func (c *lxc) initLXC(config bool) error {
 	}
 
 	// Setup process limits
-	for k, v := range c.expandedConfig {
+	for k, v := range d.expandedConfig {
 		if strings.HasPrefix(k, "limits.kernel.") {
 			prlimitSuffix := strings.TrimPrefix(k, "limits.kernel.")
 			prlimitKey := fmt.Sprintf("lxc.prlimit.%s", prlimitSuffix)
@@ -1258,17 +1258,17 @@ func (c *lxc) initLXC(config bool) error {
 	}
 
 	// Setup shmounts
-	if c.state.OS.LXCFeatures["mount_injection_file"] {
-		err = lxcSetConfigItem(cc, "lxc.mount.auto", fmt.Sprintf("shmounts:%s:/dev/.lxd-mounts", c.ShmountsPath()))
+	if d.state.OS.LXCFeatures["mount_injection_file"] {
+		err = lxcSetConfigItem(cc, "lxc.mount.auto", fmt.Sprintf("shmounts:%s:/dev/.lxd-mounts", d.ShmountsPath()))
 	} else {
-		err = lxcSetConfigItem(cc, "lxc.mount.entry", fmt.Sprintf("%s dev/.lxd-mounts none bind,create=dir 0 0", c.ShmountsPath()))
+		err = lxcSetConfigItem(cc, "lxc.mount.entry", fmt.Sprintf("%s dev/.lxd-mounts none bind,create=dir 0 0", d.ShmountsPath()))
 	}
 	if err != nil {
 		return err
 	}
 
 	// Apply raw.lxc
-	if lxcConfig, ok := c.expandedConfig["raw.lxc"]; ok {
+	if lxcConfig, ok := d.expandedConfig["raw.lxc"]; ok {
 		f, err := ioutil.TempFile("", "lxd_config_")
 		if err != nil {
 			return err
@@ -1286,26 +1286,26 @@ func (c *lxc) initLXC(config bool) error {
 		}
 	}
 
-	if c.c != nil {
-		c.c.Release()
+	if d.c != nil {
+		d.c.Release()
 	}
-	c.c = cc
+	d.c = cc
 	freeContainer = false
 
 	return nil
 }
 
-func (c *lxc) devlxdEventSend(eventType string, eventMessage interface{}) error {
+func (d *lxc) devlxdEventSend(eventType string, eventMessage interface{}) error {
 	event := shared.Jmap{}
 	event["type"] = eventType
 	event["timestamp"] = time.Now()
 	event["metadata"] = eventMessage
 
-	return c.state.DevlxdEvents.Send(strconv.Itoa(c.ID()), eventType, eventMessage)
+	return d.state.DevlxdEvents.Send(strconv.Itoa(d.ID()), eventType, eventMessage)
 }
 
 // runHooks executes the callback functions returned from a function.
-func (c *lxc) runHooks(hooks []func() error) error {
+func (d *lxc) runHooks(hooks []func() error) error {
 	// Run any post start hooks.
 	if len(hooks) > 0 {
 		for _, hook := range hooks {
@@ -1320,36 +1320,36 @@ func (c *lxc) runHooks(hooks []func() error) error {
 }
 
 // RegisterDevices calls the Register() function on all of the instance's devices.
-func (c *lxc) RegisterDevices() {
-	devices := c.ExpandedDevices()
+func (d *lxc) RegisterDevices() {
+	devices := d.ExpandedDevices()
 	for _, entry := range devices.Sorted() {
-		dev, _, err := c.deviceLoad(entry.Name, entry.Config)
+		dev, _, err := d.deviceLoad(entry.Name, entry.Config)
 		if err == device.ErrUnsupportedDevType {
 			continue
 		}
 
 		if err != nil {
-			logger.Error("Failed to load device to register", log.Ctx{"err": err, "instance": c.Name(), "device": entry.Name})
+			logger.Error("Failed to load device to register", log.Ctx{"err": err, "instance": d.Name(), "device": entry.Name})
 			continue
 		}
 
 		// Check whether device wants to register for any events.
 		err = dev.Register()
 		if err != nil {
-			logger.Error("Failed to register device", log.Ctx{"err": err, "instance": c.Name(), "device": entry.Name})
+			logger.Error("Failed to register device", log.Ctx{"err": err, "instance": d.Name(), "device": entry.Name})
 			continue
 		}
 	}
 }
 
 // deviceLoad instantiates and validates a new device and returns it along with enriched config.
-func (c *lxc) deviceLoad(deviceName string, rawConfig deviceConfig.Device) (device.Device, deviceConfig.Device, error) {
+func (d *lxc) deviceLoad(deviceName string, rawConfig deviceConfig.Device) (device.Device, deviceConfig.Device, error) {
 	var configCopy deviceConfig.Device
 	var err error
 
 	// Create copy of config and load some fields from volatile if device is nic or infiniband.
 	if shared.StringInSlice(rawConfig["type"], []string{"nic", "infiniband"}) {
-		configCopy, err = c.FillNetworkDevice(deviceName, rawConfig)
+		configCopy, err = d.FillNetworkDevice(deviceName, rawConfig)
 		if err != nil {
 			return nil, nil, err
 		}
@@ -1358,15 +1358,15 @@ func (c *lxc) deviceLoad(deviceName string, rawConfig deviceConfig.Device) (devi
 		configCopy = rawConfig.Clone()
 	}
 
-	d, err := device.New(c, c.state, deviceName, configCopy, c.deviceVolatileGetFunc(deviceName), c.deviceVolatileSetFunc(deviceName))
+	dev, err := device.New(d, d.state, deviceName, configCopy, d.deviceVolatileGetFunc(deviceName), d.deviceVolatileSetFunc(deviceName))
 
 	// Return device and config copy even if error occurs as caller may still use device.
-	return d, configCopy, err
+	return dev, configCopy, err
 }
 
 // deviceAdd loads a new device and calls its Add() function.
-func (c *lxc) deviceAdd(deviceName string, rawConfig deviceConfig.Device, instanceRunning bool) error {
-	dev, _, err := c.deviceLoad(deviceName, rawConfig)
+func (d *lxc) deviceAdd(deviceName string, rawConfig deviceConfig.Device, instanceRunning bool) error {
+	dev, _, err := d.deviceLoad(deviceName, rawConfig)
 	if err != nil {
 		return err
 	}
@@ -1379,11 +1379,11 @@ func (c *lxc) deviceAdd(deviceName string, rawConfig deviceConfig.Device, instan
 }
 
 // deviceStart loads a new device and calls its Start() function.
-func (c *lxc) deviceStart(deviceName string, rawConfig deviceConfig.Device, instanceRunning bool) (*deviceConfig.RunConfig, error) {
-	logger := logging.AddContext(logger.Log, log.Ctx{"device": deviceName, "type": rawConfig["type"], "project": c.Project(), "instance": c.Name()})
+func (d *lxc) deviceStart(deviceName string, rawConfig deviceConfig.Device, instanceRunning bool) (*deviceConfig.RunConfig, error) {
+	logger := logging.AddContext(logger.Log, log.Ctx{"device": deviceName, "type": rawConfig["type"], "project": d.Project(), "instance": d.Name()})
 	logger.Debug("Starting device")
 
-	dev, configCopy, err := c.deviceLoad(deviceName, rawConfig)
+	dev, configCopy, err := d.deviceLoad(deviceName, rawConfig)
 	if err != nil {
 		return nil, err
 	}
@@ -1402,7 +1402,7 @@ func (c *lxc) deviceStart(deviceName string, rawConfig deviceConfig.Device, inst
 		// Shift device file ownership if needed before mounting into container.
 		// This needs to be done whether or not container is running.
 		if len(runConf.Mounts) > 0 {
-			err := c.deviceStaticShiftMounts(runConf.Mounts)
+			err := d.deviceStaticShiftMounts(runConf.Mounts)
 			if err != nil {
 				return nil, err
 			}
@@ -1412,7 +1412,7 @@ func (c *lxc) deviceStart(deviceName string, rawConfig deviceConfig.Device, inst
 		if instanceRunning {
 			// Attach mounts if requested.
 			if len(runConf.Mounts) > 0 {
-				err = c.deviceHandleMounts(runConf.Mounts)
+				err = d.deviceHandleMounts(runConf.Mounts)
 				if err != nil {
 					return nil, err
 				}
@@ -1420,7 +1420,7 @@ func (c *lxc) deviceStart(deviceName string, rawConfig deviceConfig.Device, inst
 
 			// Add cgroup rules if requested.
 			if len(runConf.CGroups) > 0 {
-				err = c.deviceAddCgroupRules(runConf.CGroups)
+				err = d.deviceAddCgroupRules(runConf.CGroups)
 				if err != nil {
 					return nil, err
 				}
@@ -1428,7 +1428,7 @@ func (c *lxc) deviceStart(deviceName string, rawConfig deviceConfig.Device, inst
 
 			// Attach network interface if requested.
 			if len(runConf.NetworkInterface) > 0 {
-				err = c.deviceAttachNIC(configCopy, runConf.NetworkInterface)
+				err = d.deviceAttachNIC(configCopy, runConf.NetworkInterface)
 				if err != nil {
 					return nil, err
 				}
@@ -1436,7 +1436,7 @@ func (c *lxc) deviceStart(deviceName string, rawConfig deviceConfig.Device, inst
 
 			// If running, run post start hooks now (if not running LXD will run them
 			// once the instance is started).
-			err = c.runHooks(runConf.PostHooks)
+			err = d.runHooks(runConf.PostHooks)
 			if err != nil {
 				return nil, err
 			}
@@ -1447,15 +1447,15 @@ func (c *lxc) deviceStart(deviceName string, rawConfig deviceConfig.Device, inst
 }
 
 // deviceStaticShiftMounts statically shift device mount files ownership to active idmap if needed.
-func (c *lxc) deviceStaticShiftMounts(mounts []deviceConfig.MountEntryItem) error {
-	idmapSet, err := c.CurrentIdmap()
+func (d *lxc) deviceStaticShiftMounts(mounts []deviceConfig.MountEntryItem) error {
+	idmapSet, err := d.CurrentIdmap()
 	if err != nil {
 		return fmt.Errorf("Failed to get idmap for device: %s", err)
 	}
 
 	// If there is an idmap being applied and LXD not running in a user namespace then shift the
 	// device files before they are mounted.
-	if idmapSet != nil && !c.state.OS.RunningInUserNS {
+	if idmapSet != nil && !d.state.OS.RunningInUserNS {
 		for _, mount := range mounts {
 			// Skip UID/GID shifting if OwnerShift mode is not static, or the host-side
 			// DevPath is empty (meaning an unmount request that doesn't need shifting).
@@ -1475,20 +1475,20 @@ func (c *lxc) deviceStaticShiftMounts(mounts []deviceConfig.MountEntryItem) erro
 }
 
 // deviceAddCgroupRules live adds cgroup rules to a container.
-func (c *lxc) deviceAddCgroupRules(cgroups []deviceConfig.RunConfigItem) error {
-	cg, err := c.cgroup(nil)
+func (d *lxc) deviceAddCgroupRules(cgroups []deviceConfig.RunConfigItem) error {
+	cg, err := d.cgroup(nil)
 	if err != nil {
 		return err
 	}
 
 	for _, rule := range cgroups {
 		// Only apply devices cgroup rules if container is running privileged and host has devices cgroup controller.
-		if strings.HasPrefix(rule.Key, "devices.") && (!c.isCurrentlyPrivileged() || c.state.OS.RunningInUserNS || !c.state.OS.CGInfo.Supports(cgroup.Devices, cg)) {
+		if strings.HasPrefix(rule.Key, "devices.") && (!d.isCurrentlyPrivileged() || d.state.OS.RunningInUserNS || !d.state.OS.CGInfo.Supports(cgroup.Devices, cg)) {
 			continue
 		}
 
 		// Add the new device cgroup rule.
-		err := c.CGroupSet(rule.Key, rule.Value)
+		err := d.CGroupSet(rule.Key, rule.Value)
 		if err != nil {
 			return fmt.Errorf("Failed to add cgroup rule for device")
 		}
@@ -1498,7 +1498,7 @@ func (c *lxc) deviceAddCgroupRules(cgroups []deviceConfig.RunConfigItem) error {
 }
 
 // deviceAttachNIC live attaches a NIC device to a container.
-func (c *lxc) deviceAttachNIC(configCopy map[string]string, netIF []deviceConfig.RunConfigItem) error {
+func (d *lxc) deviceAttachNIC(configCopy map[string]string, netIF []deviceConfig.RunConfigItem) error {
 	devName := ""
 	for _, dev := range netIF {
 		if dev.Key == "link" {
@@ -1512,13 +1512,13 @@ func (c *lxc) deviceAttachNIC(configCopy map[string]string, netIF []deviceConfig
 	}
 
 	// Load the go-lxc struct.
-	err := c.initLXC(false)
+	err := d.initLXC(false)
 	if err != nil {
 		return err
 	}
 
 	// Add the interface to the container.
-	err = c.c.AttachInterface(devName, configCopy["name"])
+	err = d.c.AttachInterface(devName, configCopy["name"])
 	if err != nil {
 		return fmt.Errorf("Failed to attach interface: %s to %s: %s", devName, configCopy["name"], err)
 	}
@@ -1527,8 +1527,8 @@ func (c *lxc) deviceAttachNIC(configCopy map[string]string, netIF []deviceConfig
 }
 
 // deviceUpdate loads a new device and calls its Update() function.
-func (c *lxc) deviceUpdate(deviceName string, rawConfig deviceConfig.Device, oldDevices deviceConfig.Devices, instanceRunning bool) error {
-	dev, _, err := c.deviceLoad(deviceName, rawConfig)
+func (d *lxc) deviceUpdate(deviceName string, rawConfig deviceConfig.Device, oldDevices deviceConfig.Devices, instanceRunning bool) error {
+	dev, _, err := d.deviceLoad(deviceName, rawConfig)
 	if err != nil {
 		return err
 	}
@@ -1544,11 +1544,11 @@ func (c *lxc) deviceUpdate(deviceName string, rawConfig deviceConfig.Device, old
 // deviceStop loads a new device and calls its Stop() function.
 // Accepts a stopHookNetnsPath argument which is required when run from the onStopNS hook before the
 // container's network namespace is unmounted (which is required for NIC device cleanup).
-func (c *lxc) deviceStop(deviceName string, rawConfig deviceConfig.Device, instanceRunning bool, stopHookNetnsPath string) error {
-	logger := logging.AddContext(logger.Log, log.Ctx{"device": deviceName, "type": rawConfig["type"], "project": c.Project(), "instance": c.Name()})
+func (d *lxc) deviceStop(deviceName string, rawConfig deviceConfig.Device, instanceRunning bool, stopHookNetnsPath string) error {
+	logger := logging.AddContext(logger.Log, log.Ctx{"device": deviceName, "type": rawConfig["type"], "project": d.Project(), "instance": d.Name()})
 	logger.Debug("Stopping device")
 
-	dev, configCopy, err := c.deviceLoad(deviceName, rawConfig)
+	dev, configCopy, err := d.deviceLoad(deviceName, rawConfig)
 
 	// If deviceLoad fails with unsupported device type then return.
 	if err == device.ErrUnsupportedDevType {
@@ -1579,7 +1579,7 @@ func (c *lxc) deviceStop(deviceName string, rawConfig deviceConfig.Device, insta
 	if runConf != nil {
 		// If network interface settings returned, then detach NIC from container.
 		if len(runConf.NetworkInterface) > 0 {
-			err = c.deviceDetachNIC(configCopy, runConf.NetworkInterface, instanceRunning, stopHookNetnsPath)
+			err = d.deviceDetachNIC(configCopy, runConf.NetworkInterface, instanceRunning, stopHookNetnsPath)
 			if err != nil {
 				return err
 			}
@@ -1587,7 +1587,7 @@ func (c *lxc) deviceStop(deviceName string, rawConfig deviceConfig.Device, insta
 
 		// Add cgroup rules if requested and container is running.
 		if len(runConf.CGroups) > 0 && instanceRunning {
-			err = c.deviceAddCgroupRules(runConf.CGroups)
+			err = d.deviceAddCgroupRules(runConf.CGroups)
 			if err != nil {
 				return err
 			}
@@ -1595,14 +1595,14 @@ func (c *lxc) deviceStop(deviceName string, rawConfig deviceConfig.Device, insta
 
 		// Detach mounts if requested and container is running.
 		if len(runConf.Mounts) > 0 && instanceRunning {
-			err = c.deviceHandleMounts(runConf.Mounts)
+			err = d.deviceHandleMounts(runConf.Mounts)
 			if err != nil {
 				return err
 			}
 		}
 
 		// Run post stop hooks irrespective of run state of instance.
-		err = c.runHooks(runConf.PostHooks)
+		err = d.runHooks(runConf.PostHooks)
 		if err != nil {
 			return err
 		}
@@ -1614,7 +1614,7 @@ func (c *lxc) deviceStop(deviceName string, rawConfig deviceConfig.Device, insta
 // deviceDetachNIC detaches a NIC device from a container.
 // Accepts a stopHookNetnsPath argument which is required when run from the onStopNS hook before the
 // container's network namespace is unmounted (which is required for NIC device cleanup).
-func (c *lxc) deviceDetachNIC(configCopy map[string]string, netIF []deviceConfig.RunConfigItem, instanceRunning bool, stopHookNetnsPath string) error {
+func (d *lxc) deviceDetachNIC(configCopy map[string]string, netIF []deviceConfig.RunConfigItem, instanceRunning bool, stopHookNetnsPath string) error {
 	// Get requested device name to detach interface back to on the host.
 	devName := ""
 	for _, dev := range netIF {
@@ -1631,8 +1631,8 @@ func (c *lxc) deviceDetachNIC(configCopy map[string]string, netIF []deviceConfig
 	// If container is running, perform live detach of interface back to host.
 	if instanceRunning {
 		// For some reason, having network config confuses detach, so get our own go-lxc struct.
-		cname := project.Instance(c.Project(), c.Name())
-		cc, err := liblxc.NewContainer(cname, c.state.OS.LxcPath)
+		cname := project.Instance(d.Project(), d.Name())
+		cc, err := liblxc.NewContainer(cname, d.state.OS.LxcPath)
 		if err != nil {
 			return err
 		}
@@ -1667,7 +1667,7 @@ func (c *lxc) deviceDetachNIC(configCopy map[string]string, netIF []deviceConfig
 				return fmt.Errorf("Cannot detach NIC device %q without stopHookNetnsPath being provided", devName)
 			}
 
-			err := c.detachInterfaceRename(stopHookNetnsPath, configCopy["name"], devName)
+			err := d.detachInterfaceRename(stopHookNetnsPath, configCopy["name"], devName)
 			if err != nil {
 				return errors.Wrapf(err, "Failed to detach interface: %q to %q", configCopy["name"], devName)
 			}
@@ -1680,7 +1680,7 @@ func (c *lxc) deviceDetachNIC(configCopy map[string]string, netIF []deviceConfig
 
 // deviceHandleMounts live attaches or detaches mounts on a container.
 // If the mount DevPath is empty the mount action is treated as unmount.
-func (c *lxc) deviceHandleMounts(mounts []deviceConfig.MountEntryItem) error {
+func (d *lxc) deviceHandleMounts(mounts []deviceConfig.MountEntryItem) error {
 	for _, mount := range mounts {
 		if mount.DevPath != "" {
 			flags := 0
@@ -1700,19 +1700,19 @@ func (c *lxc) deviceHandleMounts(mounts []deviceConfig.MountEntryItem) error {
 			}
 
 			// Mount it into the container.
-			err := c.insertMount(mount.DevPath, mount.TargetPath, mount.FSType, flags, shiftfs)
+			err := d.insertMount(mount.DevPath, mount.TargetPath, mount.FSType, flags, shiftfs)
 			if err != nil {
 				return fmt.Errorf("Failed to add mount for device inside container: %s", err)
 			}
 		} else {
 			relativeTargetPath := strings.TrimPrefix(mount.TargetPath, "/")
-			if c.FileExists(relativeTargetPath) == nil {
-				err := c.removeMount(mount.TargetPath)
+			if d.FileExists(relativeTargetPath) == nil {
+				err := d.removeMount(mount.TargetPath)
 				if err != nil {
 					return fmt.Errorf("Error unmounting the device path inside container: %s", err)
 				}
 
-				err = c.FileRemove(relativeTargetPath)
+				err = d.FileRemove(relativeTargetPath)
 				if err != nil {
 					// Only warn here and don't fail as removing a directory
 					// mount may fail if there was already files inside
@@ -1727,10 +1727,10 @@ func (c *lxc) deviceHandleMounts(mounts []deviceConfig.MountEntryItem) error {
 }
 
 // deviceRemove loads a new device and calls its Remove() function.
-func (c *lxc) deviceRemove(deviceName string, rawConfig deviceConfig.Device, instanceRunning bool) error {
-	logger := logging.AddContext(logger.Log, log.Ctx{"device": deviceName, "type": rawConfig["type"], "project": c.Project(), "instance": c.Name()})
+func (d *lxc) deviceRemove(deviceName string, rawConfig deviceConfig.Device, instanceRunning bool) error {
+	logger := logging.AddContext(logger.Log, log.Ctx{"device": deviceName, "type": rawConfig["type"], "project": d.Project(), "instance": d.Name()})
 
-	dev, _, err := c.deviceLoad(deviceName, rawConfig)
+	dev, _, err := d.deviceLoad(deviceName, rawConfig)
 
 	// If deviceLoad fails with unsupported device type then return.
 	if err == device.ErrUnsupportedDevType {
@@ -1758,11 +1758,11 @@ func (c *lxc) deviceRemove(deviceName string, rawConfig deviceConfig.Device, ins
 
 // deviceVolatileGetFunc returns a function that retrieves a named device's volatile config and
 // removes its device prefix from the keys.
-func (c *lxc) deviceVolatileGetFunc(devName string) func() map[string]string {
+func (d *lxc) deviceVolatileGetFunc(devName string) func() map[string]string {
 	return func() map[string]string {
 		volatile := make(map[string]string)
 		prefix := fmt.Sprintf("volatile.%s.", devName)
-		for k, v := range c.localConfig {
+		for k, v := range d.localConfig {
 			if strings.HasPrefix(k, prefix) {
 				volatile[strings.TrimPrefix(k, prefix)] = v
 			}
@@ -1773,29 +1773,29 @@ func (c *lxc) deviceVolatileGetFunc(devName string) func() map[string]string {
 
 // deviceVolatileSetFunc returns a function that can be called to save a named device's volatile
 // config using keys that do not have the device's name prefixed.
-func (c *lxc) deviceVolatileSetFunc(devName string) func(save map[string]string) error {
+func (d *lxc) deviceVolatileSetFunc(devName string) func(save map[string]string) error {
 	return func(save map[string]string) error {
 		volatileSave := make(map[string]string)
 		for k, v := range save {
 			volatileSave[fmt.Sprintf("volatile.%s.%s", devName, k)] = v
 		}
 
-		return c.VolatileSet(volatileSave)
+		return d.VolatileSet(volatileSave)
 	}
 }
 
 // deviceResetVolatile resets a device's volatile data when its removed or updated in such a way
 // that it is removed then added immediately afterwards.
-func (c *lxc) deviceResetVolatile(devName string, oldConfig, newConfig deviceConfig.Device) error {
+func (d *lxc) deviceResetVolatile(devName string, oldConfig, newConfig deviceConfig.Device) error {
 	volatileClear := make(map[string]string)
 	devicePrefix := fmt.Sprintf("volatile.%s.", devName)
 
-	newNICType, err := nictype.NICType(c.state, c.Project(), newConfig)
+	newNICType, err := nictype.NICType(d.state, d.Project(), newConfig)
 	if err != nil {
 		return err
 	}
 
-	oldNICType, err := nictype.NICType(c.state, c.Project(), oldConfig)
+	oldNICType, err := nictype.NICType(d.state, d.Project(), oldConfig)
 	if err != nil {
 		return err
 	}
@@ -1804,7 +1804,7 @@ func (c *lxc) deviceResetVolatile(devName string, oldConfig, newConfig deviceCon
 	// This will occur if the newConfig is empty (i.e the device is actually being removed) or
 	// if the device type is being changed but keeping the same name.
 	if newConfig["type"] != oldConfig["type"] || newNICType != oldNICType {
-		for k := range c.localConfig {
+		for k := range d.localConfig {
 			if !strings.HasPrefix(k, devicePrefix) {
 				continue
 			}
@@ -1812,13 +1812,13 @@ func (c *lxc) deviceResetVolatile(devName string, oldConfig, newConfig deviceCon
 			volatileClear[k] = ""
 		}
 
-		return c.VolatileSet(volatileClear)
+		return d.VolatileSet(volatileClear)
 	}
 
 	// If the device type remains the same, then just remove any volatile keys that have
 	// the same key name present in the new config (i.e the new config is replacing the
 	// old volatile key).
-	for k := range c.localConfig {
+	for k := range d.localConfig {
 		if !strings.HasPrefix(k, devicePrefix) {
 			continue
 		}
@@ -1829,13 +1829,13 @@ func (c *lxc) deviceResetVolatile(devName string, oldConfig, newConfig deviceCon
 		}
 	}
 
-	return c.VolatileSet(volatileClear)
+	return d.VolatileSet(volatileClear)
 }
 
 // DeviceEventHandler actions the results of a RunConfig after an event has occurred on a device.
-func (c *lxc) DeviceEventHandler(runConf *deviceConfig.RunConfig) error {
+func (d *lxc) DeviceEventHandler(runConf *deviceConfig.RunConfig) error {
 	// Device events can only be processed when the container is running.
-	if !c.IsRunning() {
+	if !d.IsRunning() {
 		return nil
 	}
 
@@ -1845,12 +1845,12 @@ func (c *lxc) DeviceEventHandler(runConf *deviceConfig.RunConfig) error {
 
 	// Shift device file ownership if needed before mounting devices into container.
 	if len(runConf.Mounts) > 0 {
-		err := c.deviceStaticShiftMounts(runConf.Mounts)
+		err := d.deviceStaticShiftMounts(runConf.Mounts)
 		if err != nil {
 			return err
 		}
 
-		err = c.deviceHandleMounts(runConf.Mounts)
+		err = d.deviceHandleMounts(runConf.Mounts)
 		if err != nil {
 			return err
 		}
@@ -1858,14 +1858,14 @@ func (c *lxc) DeviceEventHandler(runConf *deviceConfig.RunConfig) error {
 
 	// Add cgroup rules if requested.
 	if len(runConf.CGroups) > 0 {
-		err := c.deviceAddCgroupRules(runConf.CGroups)
+		err := d.deviceAddCgroupRules(runConf.CGroups)
 		if err != nil {
 			return err
 		}
 	}
 
 	// Run any post hooks requested.
-	err := c.runHooks(runConf.PostHooks)
+	err := d.runHooks(runConf.PostHooks)
 	if err != nil {
 		return err
 	}
@@ -1873,7 +1873,7 @@ func (c *lxc) DeviceEventHandler(runConf *deviceConfig.RunConfig) error {
 	// Generate uevent inside container if requested.
 	if len(runConf.Uevents) > 0 {
 
-		pidFdNr, pidFd := c.inheritInitPidFd()
+		pidFdNr, pidFd := d.inheritInitPidFd()
 		if pidFdNr >= 0 {
 			defer pidFd.Close()
 		}
@@ -1883,7 +1883,7 @@ func (c *lxc) DeviceEventHandler(runConf *deviceConfig.RunConfig) error {
 			ueventArray[0] = "forkuevent"
 			ueventArray[2] = "inject"
 			ueventArray[1] = "--"
-			ueventArray[3] = fmt.Sprintf("%d", c.InitPID())
+			ueventArray[3] = fmt.Sprintf("%d", d.InitPID())
 			ueventArray[4] = fmt.Sprintf("%d", pidFdNr)
 			length := 0
 			for _, part := range eventParts {
@@ -1891,7 +1891,7 @@ func (c *lxc) DeviceEventHandler(runConf *deviceConfig.RunConfig) error {
 			}
 			ueventArray[5] = fmt.Sprintf("%d", length)
 			ueventArray = append(ueventArray, eventParts...)
-			_, _, err := shared.RunCommandSplit(nil, []*os.File{pidFd}, c.state.OS.ExecPath, ueventArray...)
+			_, _, err := shared.RunCommandSplit(nil, []*os.File{pidFd}, d.state.OS.ExecPath, ueventArray...)
 			if err != nil {
 				return err
 			}
@@ -1902,52 +1902,52 @@ func (c *lxc) DeviceEventHandler(runConf *deviceConfig.RunConfig) error {
 }
 
 // Config handling
-func (c *lxc) expandConfig(profiles []api.Profile) error {
-	if profiles == nil && len(c.profiles) > 0 {
+func (d *lxc) expandConfig(profiles []api.Profile) error {
+	if profiles == nil && len(d.profiles) > 0 {
 		var err error
-		profiles, err = c.state.Cluster.GetProfiles(c.project, c.profiles)
+		profiles, err = d.state.Cluster.GetProfiles(d.project, d.profiles)
 		if err != nil {
 			return err
 		}
 	}
 
-	c.expandedConfig = db.ExpandInstanceConfig(c.localConfig, profiles)
+	d.expandedConfig = db.ExpandInstanceConfig(d.localConfig, profiles)
 
 	return nil
 }
 
-func (c *lxc) expandDevices(profiles []api.Profile) error {
-	if profiles == nil && len(c.profiles) > 0 {
+func (d *lxc) expandDevices(profiles []api.Profile) error {
+	if profiles == nil && len(d.profiles) > 0 {
 		var err error
-		profiles, err = c.state.Cluster.GetProfiles(c.project, c.profiles)
+		profiles, err = d.state.Cluster.GetProfiles(d.project, d.profiles)
 		if err != nil {
 			return err
 		}
 	}
 
-	c.expandedDevices = db.ExpandInstanceDevices(c.localDevices, profiles)
+	d.expandedDevices = db.ExpandInstanceDevices(d.localDevices, profiles)
 
 	return nil
 }
 
 // Start functions
-func (c *lxc) startCommon() (string, []func() error, error) {
+func (d *lxc) startCommon() (string, []func() error, error) {
 	revert := revert.New()
 	defer revert.Fail()
 
 	// Load the go-lxc struct
-	err := c.initLXC(true)
+	err := d.initLXC(true)
 	if err != nil {
 		return "", nil, errors.Wrap(err, "Load go-lxc struct")
 	}
 
 	// Check that we're not already running
-	if c.IsRunning() {
+	if d.IsRunning() {
 		return "", nil, fmt.Errorf("The container is already running")
 	}
 
 	// Load any required kernel modules
-	kernelModules := c.expandedConfig["linux.kernel_modules"]
+	kernelModules := d.expandedConfig["linux.kernel_modules"]
 	if kernelModules != "" {
 		for _, module := range strings.Split(kernelModules, ",") {
 			module = strings.TrimPrefix(module, " ")
@@ -1959,7 +1959,7 @@ func (c *lxc) startCommon() (string, []func() error, error) {
 	}
 
 	// Rotate the log file.
-	logfile := c.LogFilePath()
+	logfile := d.LogFilePath()
 	if shared.PathExists(logfile) {
 		os.Remove(logfile + ".old")
 		err := os.Rename(logfile, logfile+".old")
@@ -1969,57 +1969,57 @@ func (c *lxc) startCommon() (string, []func() error, error) {
 	}
 
 	// Mount instance root volume.
-	_, err = c.mount()
+	_, err = d.mount()
 	if err != nil {
 		return "", nil, err
 	}
-	revert.Add(func() { c.unmount() })
+	revert.Add(func() { d.unmount() })
 
 	/* Deal with idmap changes */
-	nextIdmap, err := c.NextIdmap()
+	nextIdmap, err := d.NextIdmap()
 	if err != nil {
 		return "", nil, errors.Wrap(err, "Set ID map")
 	}
 
-	diskIdmap, err := c.DiskIdmap()
+	diskIdmap, err := d.DiskIdmap()
 	if err != nil {
 		return "", nil, errors.Wrap(err, "Set last ID map")
 	}
 
 	// Once mounted, check if filesystem needs shifting.
-	if !nextIdmap.Equals(diskIdmap) && !(diskIdmap == nil && c.state.OS.Shiftfs) {
-		if shared.IsTrue(c.expandedConfig["security.protection.shift"]) {
+	if !nextIdmap.Equals(diskIdmap) && !(diskIdmap == nil && d.state.OS.Shiftfs) {
+		if shared.IsTrue(d.expandedConfig["security.protection.shift"]) {
 			return "", nil, fmt.Errorf("Container is protected against filesystem shifting")
 		}
 
 		logger.Debugf("Container idmap changed, remapping")
-		c.updateProgress("Remapping container filesystem")
+		d.updateProgress("Remapping container filesystem")
 
-		storageType, err := c.getStorageType()
+		storageType, err := d.getStorageType()
 		if err != nil {
 			return "", nil, errors.Wrap(err, "Storage type")
 		}
 
 		if diskIdmap != nil {
 			if storageType == "zfs" {
-				err = diskIdmap.UnshiftRootfs(c.RootfsPath(), storageDrivers.ShiftZFSSkipper)
+				err = diskIdmap.UnshiftRootfs(d.RootfsPath(), storageDrivers.ShiftZFSSkipper)
 			} else if storageType == "btrfs" {
-				err = storageDrivers.UnshiftBtrfsRootfs(c.RootfsPath(), diskIdmap)
+				err = storageDrivers.UnshiftBtrfsRootfs(d.RootfsPath(), diskIdmap)
 			} else {
-				err = diskIdmap.UnshiftRootfs(c.RootfsPath(), nil)
+				err = diskIdmap.UnshiftRootfs(d.RootfsPath(), nil)
 			}
 			if err != nil {
 				return "", nil, err
 			}
 		}
 
-		if nextIdmap != nil && !c.state.OS.Shiftfs {
+		if nextIdmap != nil && !d.state.OS.Shiftfs {
 			if storageType == "zfs" {
-				err = nextIdmap.ShiftRootfs(c.RootfsPath(), storageDrivers.ShiftZFSSkipper)
+				err = nextIdmap.ShiftRootfs(d.RootfsPath(), storageDrivers.ShiftZFSSkipper)
 			} else if storageType == "btrfs" {
-				err = storageDrivers.ShiftBtrfsRootfs(c.RootfsPath(), nextIdmap)
+				err = storageDrivers.ShiftBtrfsRootfs(d.RootfsPath(), nextIdmap)
 			} else {
-				err = nextIdmap.ShiftRootfs(c.RootfsPath(), nil)
+				err = nextIdmap.ShiftRootfs(d.RootfsPath(), nil)
 			}
 			if err != nil {
 				return "", nil, err
@@ -2027,7 +2027,7 @@ func (c *lxc) startCommon() (string, []func() error, error) {
 		}
 
 		jsonDiskIdmap := "[]"
-		if nextIdmap != nil && !c.state.OS.Shiftfs {
+		if nextIdmap != nil && !d.state.OS.Shiftfs {
 			idmapBytes, err := json.Marshal(nextIdmap.Idmap)
 			if err != nil {
 				return "", nil, err
@@ -2035,12 +2035,12 @@ func (c *lxc) startCommon() (string, []func() error, error) {
 			jsonDiskIdmap = string(idmapBytes)
 		}
 
-		err = c.VolatileSet(map[string]string{"volatile.last_state.idmap": jsonDiskIdmap})
+		err = d.VolatileSet(map[string]string{"volatile.last_state.idmap": jsonDiskIdmap})
 		if err != nil {
-			return "", nil, errors.Wrapf(err, "Set volatile.last_state.idmap config key on container %q (id %d)", c.name, c.id)
+			return "", nil, errors.Wrapf(err, "Set volatile.last_state.idmap config key on container %q (id %d)", d.name, d.id)
 		}
 
-		c.updateProgress("")
+		d.updateProgress("")
 	}
 
 	var idmapBytes []byte
@@ -2053,43 +2053,43 @@ func (c *lxc) startCommon() (string, []func() error, error) {
 		}
 	}
 
-	if c.localConfig["volatile.idmap.current"] != string(idmapBytes) {
-		err = c.VolatileSet(map[string]string{"volatile.idmap.current": string(idmapBytes)})
+	if d.localConfig["volatile.idmap.current"] != string(idmapBytes) {
+		err = d.VolatileSet(map[string]string{"volatile.idmap.current": string(idmapBytes)})
 		if err != nil {
-			return "", nil, errors.Wrapf(err, "Set volatile.idmap.current config key on container %q (id %d)", c.name, c.id)
+			return "", nil, errors.Wrapf(err, "Set volatile.idmap.current config key on container %q (id %d)", d.name, d.id)
 		}
 	}
 
 	// Generate the Seccomp profile
-	if err := seccomp.CreateProfile(c.state, c); err != nil {
+	if err := seccomp.CreateProfile(d.state, d); err != nil {
 		return "", nil, err
 	}
 
 	// Cleanup any existing leftover devices
-	c.removeUnixDevices()
-	c.removeDiskDevices()
+	d.removeUnixDevices()
+	d.removeDiskDevices()
 
 	// Create any missing directories.
-	err = os.MkdirAll(c.LogPath(), 0700)
+	err = os.MkdirAll(d.LogPath(), 0700)
 	if err != nil {
 		return "", nil, err
 	}
 
-	err = os.MkdirAll(c.DevicesPath(), 0711)
+	err = os.MkdirAll(d.DevicesPath(), 0711)
 	if err != nil {
 		return "", nil, err
 	}
 
-	err = os.MkdirAll(c.ShmountsPath(), 0711)
+	err = os.MkdirAll(d.ShmountsPath(), 0711)
 	if err != nil {
 		return "", nil, err
 	}
 
 	// Generate UUID if not present.
-	instUUID := c.localConfig["volatile.uuid"]
+	instUUID := d.localConfig["volatile.uuid"]
 	if instUUID == "" {
 		instUUID = uuid.New()
-		c.VolatileSet(map[string]string{"volatile.uuid": instUUID})
+		d.VolatileSet(map[string]string{"volatile.uuid": instUUID})
 	}
 
 	// Create the devices
@@ -2097,18 +2097,18 @@ func (c *lxc) startCommon() (string, []func() error, error) {
 	nicID := -1
 
 	// Setup devices in sorted order, this ensures that device mounts are added in path order.
-	for _, entry := range c.expandedDevices.Sorted() {
+	for _, entry := range d.expandedDevices.Sorted() {
 		dev := entry // Ensure device variable has local scope for revert.
 
 		// Start the device.
-		runConf, err := c.deviceStart(dev.Name, dev.Config, false)
+		runConf, err := d.deviceStart(dev.Name, dev.Config, false)
 		if err != nil {
 			return "", nil, errors.Wrapf(err, "Failed to start device %q", dev.Name)
 		}
 
 		// Stop device on failure to setup container.
 		revert.Add(func() {
-			err := c.deviceStop(dev.Name, dev.Config, false, "")
+			err := d.deviceStop(dev.Name, dev.Config, false, "")
 			if err != nil {
 				logger.Errorf("Failed to cleanup device %q: %v", dev.Name, err)
 			}
@@ -2121,20 +2121,20 @@ func (c *lxc) startCommon() (string, []func() error, error) {
 		if runConf.RootFS.Path != "" {
 			if !util.RuntimeLiblxcVersionAtLeast(2, 1, 0) {
 				// Set the rootfs backend type if supported (must happen before any other lxc.rootfs)
-				err := lxcSetConfigItem(c.c, "lxc.rootfs.backend", "dir")
+				err := lxcSetConfigItem(d.c, "lxc.rootfs.backend", "dir")
 				if err == nil {
-					value := c.c.ConfigItem("lxc.rootfs.backend")
+					value := d.c.ConfigItem("lxc.rootfs.backend")
 					if len(value) == 0 || value[0] != "dir" {
-						lxcSetConfigItem(c.c, "lxc.rootfs.backend", "")
+						lxcSetConfigItem(d.c, "lxc.rootfs.backend", "")
 					}
 				}
 			}
 
 			if util.RuntimeLiblxcVersionAtLeast(2, 1, 0) {
 				rootfsPath := fmt.Sprintf("dir:%s", runConf.RootFS.Path)
-				err = lxcSetConfigItem(c.c, "lxc.rootfs.path", rootfsPath)
+				err = lxcSetConfigItem(d.c, "lxc.rootfs.path", rootfsPath)
 			} else {
-				err = lxcSetConfigItem(c.c, "lxc.rootfs", runConf.RootFS.Path)
+				err = lxcSetConfigItem(d.c, "lxc.rootfs", runConf.RootFS.Path)
 			}
 
 			if err != nil {
@@ -2142,27 +2142,27 @@ func (c *lxc) startCommon() (string, []func() error, error) {
 			}
 
 			if len(runConf.RootFS.Opts) > 0 {
-				err = lxcSetConfigItem(c.c, "lxc.rootfs.options", strings.Join(runConf.RootFS.Opts, ","))
+				err = lxcSetConfigItem(d.c, "lxc.rootfs.options", strings.Join(runConf.RootFS.Opts, ","))
 				if err != nil {
 					return "", nil, errors.Wrapf(err, "Failed to setup device rootfs '%s'", dev.Name)
 				}
 			}
 
-			if c.state.OS.Shiftfs && !c.IsPrivileged() && diskIdmap == nil {
+			if d.state.OS.Shiftfs && !d.IsPrivileged() && diskIdmap == nil {
 				// Host side mark mount.
-				err = lxcSetConfigItem(c.c, "lxc.hook.pre-start", fmt.Sprintf("/bin/mount -t shiftfs -o mark,passthrough=3 %s %s", strconv.Quote(c.RootfsPath()), strconv.Quote(c.RootfsPath())))
+				err = lxcSetConfigItem(d.c, "lxc.hook.pre-start", fmt.Sprintf("/bin/mount -t shiftfs -o mark,passthrough=3 %s %s", strconv.Quote(d.RootfsPath()), strconv.Quote(d.RootfsPath())))
 				if err != nil {
 					return "", nil, errors.Wrapf(err, "Failed to setup device mount shiftfs '%s'", dev.Name)
 				}
 
 				// Container side shift mount.
-				err = lxcSetConfigItem(c.c, "lxc.hook.pre-mount", fmt.Sprintf("/bin/mount -t shiftfs -o passthrough=3 %s %s", strconv.Quote(c.RootfsPath()), strconv.Quote(c.RootfsPath())))
+				err = lxcSetConfigItem(d.c, "lxc.hook.pre-mount", fmt.Sprintf("/bin/mount -t shiftfs -o passthrough=3 %s %s", strconv.Quote(d.RootfsPath()), strconv.Quote(d.RootfsPath())))
 				if err != nil {
 					return "", nil, errors.Wrapf(err, "Failed to setup device mount shiftfs '%s'", dev.Name)
 				}
 
 				// Host side umount of mark mount.
-				err = lxcSetConfigItem(c.c, "lxc.hook.start-host", fmt.Sprintf("/bin/umount -l %s", strconv.Quote(c.RootfsPath())))
+				err = lxcSetConfigItem(d.c, "lxc.hook.start-host", fmt.Sprintf("/bin/umount -l %s", strconv.Quote(d.RootfsPath())))
 				if err != nil {
 					return "", nil, errors.Wrapf(err, "Failed to setup device mount shiftfs '%s'", dev.Name)
 				}
@@ -2172,7 +2172,7 @@ func (c *lxc) startCommon() (string, []func() error, error) {
 		// Pass any cgroups rules into LXC.
 		if len(runConf.CGroups) > 0 {
 			for _, rule := range runConf.CGroups {
-				err = lxcSetConfigItem(c.c, fmt.Sprintf("lxc.cgroup.%s", rule.Key), rule.Value)
+				err = lxcSetConfigItem(d.c, fmt.Sprintf("lxc.cgroup.%s", rule.Key), rule.Value)
 				if err != nil {
 					return "", nil, errors.Wrapf(err, "Failed to setup device cgroup '%s'", dev.Name)
 				}
@@ -2186,29 +2186,29 @@ func (c *lxc) startCommon() (string, []func() error, error) {
 					return "", nil, errors.Wrapf(fmt.Errorf("liblxc 3.0 is required for mount propagation configuration"), "Failed to setup device mount '%s'", dev.Name)
 				}
 
-				if mount.OwnerShift == deviceConfig.MountOwnerShiftDynamic && !c.IsPrivileged() {
-					if !c.state.OS.Shiftfs {
+				if mount.OwnerShift == deviceConfig.MountOwnerShiftDynamic && !d.IsPrivileged() {
+					if !d.state.OS.Shiftfs {
 						return "", nil, errors.Wrapf(fmt.Errorf("shiftfs is required but isn't supported on system"), "Failed to setup device mount '%s'", dev.Name)
 					}
 
-					err = lxcSetConfigItem(c.c, "lxc.hook.pre-start", fmt.Sprintf("/bin/mount -t shiftfs -o mark,passthrough=3 %s %s", strconv.Quote(mount.DevPath), strconv.Quote(mount.DevPath)))
+					err = lxcSetConfigItem(d.c, "lxc.hook.pre-start", fmt.Sprintf("/bin/mount -t shiftfs -o mark,passthrough=3 %s %s", strconv.Quote(mount.DevPath), strconv.Quote(mount.DevPath)))
 					if err != nil {
 						return "", nil, errors.Wrapf(err, "Failed to setup device mount shiftfs '%s'", dev.Name)
 					}
 
-					err = lxcSetConfigItem(c.c, "lxc.hook.pre-mount", fmt.Sprintf("/bin/mount -t shiftfs -o passthrough=3 %s %s", strconv.Quote(mount.DevPath), strconv.Quote(mount.DevPath)))
+					err = lxcSetConfigItem(d.c, "lxc.hook.pre-mount", fmt.Sprintf("/bin/mount -t shiftfs -o passthrough=3 %s %s", strconv.Quote(mount.DevPath), strconv.Quote(mount.DevPath)))
 					if err != nil {
 						return "", nil, errors.Wrapf(err, "Failed to setup device mount shiftfs '%s'", dev.Name)
 					}
 
-					err = lxcSetConfigItem(c.c, "lxc.hook.start-host", fmt.Sprintf("/bin/umount -l %s", strconv.Quote(mount.DevPath)))
+					err = lxcSetConfigItem(d.c, "lxc.hook.start-host", fmt.Sprintf("/bin/umount -l %s", strconv.Quote(mount.DevPath)))
 					if err != nil {
 						return "", nil, errors.Wrapf(err, "Failed to setup device mount shiftfs '%s'", dev.Name)
 					}
 				}
 
 				mntVal := fmt.Sprintf("%s %s %s %s %d %d", shared.EscapePathFstab(mount.DevPath), shared.EscapePathFstab(mount.TargetPath), mount.FSType, strings.Join(mount.Opts, ","), mount.Freq, mount.PassNo)
-				err = lxcSetConfigItem(c.c, "lxc.mount.entry", mntVal)
+				err = lxcSetConfigItem(d.c, "lxc.mount.entry", mntVal)
 				if err != nil {
 					return "", nil, errors.Wrapf(err, "Failed to setup device mount '%s'", dev.Name)
 				}
@@ -2226,7 +2226,7 @@ func (c *lxc) startCommon() (string, []func() error, error) {
 			}
 
 			for _, nicItem := range runConf.NetworkInterface {
-				err = lxcSetConfigItem(c.c, fmt.Sprintf("%s.%d.%s", networkKeyPrefix, nicID, nicItem.Key), nicItem.Value)
+				err = lxcSetConfigItem(d.c, fmt.Sprintf("%s.%d.%s", networkKeyPrefix, nicID, nicItem.Key), nicItem.Value)
 				if err != nil {
 					return "", nil, errors.Wrapf(err, "Failed to setup device network interface '%s'", dev.Name)
 				}
@@ -2240,15 +2240,15 @@ func (c *lxc) startCommon() (string, []func() error, error) {
 	}
 
 	// Generate the LXC config
-	configPath := filepath.Join(c.LogPath(), "lxc.conf")
-	err = c.c.SaveConfigFile(configPath)
+	configPath := filepath.Join(d.LogPath(), "lxc.conf")
+	err = d.c.SaveConfigFile(configPath)
 	if err != nil {
 		os.Remove(configPath)
 		return "", nil, err
 	}
 
 	// Set ownership to match container root
-	currentIdmapset, err := c.CurrentIdmap()
+	currentIdmapset, err := d.CurrentIdmap()
 	if err != nil {
 		return "", nil, err
 	}
@@ -2258,30 +2258,30 @@ func (c *lxc) startCommon() (string, []func() error, error) {
 		uid, _ = currentIdmapset.ShiftFromNs(0, 0)
 	}
 
-	err = os.Chown(c.Path(), int(uid), 0)
+	err = os.Chown(d.Path(), int(uid), 0)
 	if err != nil {
 		return "", nil, err
 	}
 
 	// We only need traversal by root in the container
-	err = os.Chmod(c.Path(), 0100)
+	err = os.Chmod(d.Path(), 0100)
 	if err != nil {
 		return "", nil, err
 	}
 
 	// Update the backup.yaml file
-	err = c.UpdateBackupFile()
+	err = d.UpdateBackupFile()
 	if err != nil {
 		return "", nil, err
 	}
 
 	// If starting stateless, wipe state
-	if !c.IsStateful() && shared.PathExists(c.StatePath()) {
-		os.RemoveAll(c.StatePath())
+	if !d.IsStateful() && shared.PathExists(d.StatePath()) {
+		os.RemoveAll(d.StatePath())
 	}
 
 	// Unmount any previously mounted shiftfs
-	unix.Unmount(c.RootfsPath(), unix.MNT_DETACH)
+	unix.Unmount(d.RootfsPath(), unix.MNT_DETACH)
 
 	revert.Success()
 	return configPath, postStartHooks, nil
@@ -2289,12 +2289,12 @@ func (c *lxc) startCommon() (string, []func() error, error) {
 
 // detachInterfaceRename enters the container's network namespace and moves the named interface
 // in ifName back to the network namespace of the running process as the name specified in hostName.
-func (c *lxc) detachInterfaceRename(netns string, ifName string, hostName string) error {
+func (d *lxc) detachInterfaceRename(netns string, ifName string, hostName string) error {
 	lxdPID := os.Getpid()
 
 	// Run forknet detach
 	_, err := shared.RunCommand(
-		c.state.OS.ExecPath,
+		d.state.OS.ExecPath,
 		"forknet",
 		"detach",
 		"--",
@@ -2313,11 +2313,11 @@ func (c *lxc) detachInterfaceRename(netns string, ifName string, hostName string
 }
 
 // Start starts the instance.
-func (c *lxc) Start(stateful bool) error {
+func (d *lxc) Start(stateful bool) error {
 	var ctxMap log.Ctx
 
 	// Setup a new operation
-	op, err := operationlock.Create(c.id, "start", false, false)
+	op, err := operationlock.Create(d.id, "start", false, false)
 	if err != nil {
 		return errors.Wrap(err, "Create container start operation")
 	}
@@ -2328,31 +2328,31 @@ func (c *lxc) Start(stateful bool) error {
 	}
 
 	// Run the shared start code
-	configPath, postStartHooks, err := c.startCommon()
+	configPath, postStartHooks, err := d.startCommon()
 	if err != nil {
 		return errors.Wrap(err, "Failed preparing container for start")
 	}
 
 	ctxMap = log.Ctx{
-		"project":   c.project,
-		"name":      c.name,
+		"project":   d.project,
+		"name":      d.name,
 		"action":    op.Action(),
-		"created":   c.creationDate,
-		"ephemeral": c.ephemeral,
-		"used":      c.lastUsedDate,
+		"created":   d.creationDate,
+		"ephemeral": d.ephemeral,
+		"used":      d.lastUsedDate,
 		"stateful":  stateful}
 
 	logger.Info("Starting container", ctxMap)
 
 	// If stateful, restore now
 	if stateful {
-		if !c.stateful {
+		if !d.stateful {
 			return fmt.Errorf("Container has no existing state to restore")
 		}
 
 		criuMigrationArgs := instance.CriuMigrationArgs{
 			Cmd:          liblxc.MIGRATE_RESTORE,
-			StateDir:     c.StatePath(),
+			StateDir:     d.StatePath(),
 			Function:     "snapshot",
 			Stop:         false,
 			ActionScript: false,
@@ -2360,58 +2360,58 @@ func (c *lxc) Start(stateful bool) error {
 			PreDumpDir:   "",
 		}
 
-		err := c.Migrate(&criuMigrationArgs)
-		if err != nil && !c.IsRunning() {
+		err := d.Migrate(&criuMigrationArgs)
+		if err != nil && !d.IsRunning() {
 			return errors.Wrap(err, "Migrate")
 		}
 
-		os.RemoveAll(c.StatePath())
-		c.stateful = false
+		os.RemoveAll(d.StatePath())
+		d.stateful = false
 
-		err = c.state.Cluster.UpdateInstanceStatefulFlag(c.id, false)
+		err = d.state.Cluster.UpdateInstanceStatefulFlag(d.id, false)
 		if err != nil {
 			logger.Error("Failed starting container", ctxMap)
 			return errors.Wrap(err, "Start container")
 		}
 
 		// Run any post start hooks.
-		err = c.runHooks(postStartHooks)
+		err = d.runHooks(postStartHooks)
 		if err != nil {
 			// Attempt to stop container.
 			op.Done(err)
-			c.Stop(false)
+			d.Stop(false)
 			return err
 		}
 
 		logger.Info("Started container", ctxMap)
 		return nil
-	} else if c.stateful {
+	} else if d.stateful {
 		/* stateless start required when we have state, let's delete it */
-		err := os.RemoveAll(c.StatePath())
+		err := os.RemoveAll(d.StatePath())
 		if err != nil {
 			return err
 		}
 
-		c.stateful = false
-		err = c.state.Cluster.UpdateInstanceStatefulFlag(c.id, false)
+		d.stateful = false
+		err = d.state.Cluster.UpdateInstanceStatefulFlag(d.id, false)
 		if err != nil {
 			return errors.Wrap(err, "Persist stateful flag")
 		}
 	}
 
-	name := project.Instance(c.Project(), c.name)
+	name := project.Instance(d.Project(), d.name)
 
 	// Start the LXC container
 	_, err = shared.RunCommand(
-		c.state.OS.ExecPath,
+		d.state.OS.ExecPath,
 		"forkstart",
 		name,
-		c.state.OS.LxcPath,
+		d.state.OS.LxcPath,
 		configPath)
-	if err != nil && !c.IsRunning() {
+	if err != nil && !d.IsRunning() {
 		// Attempt to extract the LXC errors
 		lxcLog := ""
-		logPath := filepath.Join(c.LogPath(), "lxc.log")
+		logPath := filepath.Join(d.LogPath(), "lxc.log")
 		if shared.PathExists(logPath) {
 			logContent, err := ioutil.ReadFile(logPath)
 			if err == nil {
@@ -2443,94 +2443,94 @@ func (c *lxc) Start(stateful bool) error {
 	}
 
 	// Run any post start hooks.
-	err = c.runHooks(postStartHooks)
+	err = d.runHooks(postStartHooks)
 	if err != nil {
 		// Attempt to stop container.
 		op.Done(err)
-		c.Stop(false)
+		d.Stop(false)
 		return err
 	}
 
 	logger.Info("Started container", ctxMap)
-	c.state.Events.SendLifecycle(c.project, "container-started",
-		fmt.Sprintf("/1.0/containers/%s", c.name), nil)
+	d.state.Events.SendLifecycle(d.project, "container-started",
+		fmt.Sprintf("/1.0/containers/%s", d.name), nil)
 
 	return nil
 }
 
 // OnHook is the top-level hook handler.
-func (c *lxc) OnHook(hookName string, args map[string]string) error {
+func (d *lxc) OnHook(hookName string, args map[string]string) error {
 	switch hookName {
 	case instance.HookStart:
-		return c.onStart(args)
+		return d.onStart(args)
 	case instance.HookStopNS:
-		return c.onStopNS(args)
+		return d.onStopNS(args)
 	case instance.HookStop:
-		return c.onStop(args)
+		return d.onStop(args)
 	default:
 		return instance.ErrNotImplemented
 	}
 }
 
 // onStart implements the start hook.
-func (c *lxc) onStart(_ map[string]string) error {
+func (d *lxc) onStart(_ map[string]string) error {
 	// Make sure we can't call go-lxc functions by mistake
-	c.fromHook = true
+	d.fromHook = true
 
 	// Load the container AppArmor profile
-	err := apparmor.InstanceLoad(c.state, c)
+	err := apparmor.InstanceLoad(d.state, d)
 	if err != nil {
 		return err
 	}
 
 	// Template anything that needs templating
 	key := "volatile.apply_template"
-	if c.localConfig[key] != "" {
+	if d.localConfig[key] != "" {
 		// Run any template that needs running
-		err = c.templateApplyNow(c.localConfig[key])
+		err = d.templateApplyNow(d.localConfig[key])
 		if err != nil {
-			apparmor.InstanceUnload(c.state, c)
+			apparmor.InstanceUnload(d.state, d)
 			return err
 		}
 
 		// Remove the volatile key from the DB
-		err := c.state.Cluster.DeleteInstanceConfigKey(c.id, key)
+		err := d.state.Cluster.DeleteInstanceConfigKey(d.id, key)
 		if err != nil {
-			apparmor.InstanceUnload(c.state, c)
+			apparmor.InstanceUnload(d.state, d)
 			return err
 		}
 	}
 
-	err = c.templateApplyNow("start")
+	err = d.templateApplyNow("start")
 	if err != nil {
-		apparmor.InstanceUnload(c.state, c)
+		apparmor.InstanceUnload(d.state, d)
 		return err
 	}
 
 	// Trigger a rebalance
-	cgroup.TaskSchedulerTrigger("container", c.name, "started")
+	cgroup.TaskSchedulerTrigger("container", d.name, "started")
 
 	// Apply network priority
-	if c.expandedConfig["limits.network.priority"] != "" {
-		go func(c *lxc) {
-			c.fromHook = false
-			err := c.setNetworkPriority()
+	if d.expandedConfig["limits.network.priority"] != "" {
+		go func(d *lxc) {
+			d.fromHook = false
+			err := d.setNetworkPriority()
 			if err != nil {
-				logger.Error("Failed to apply network priority", log.Ctx{"container": c.name, "err": err})
+				logger.Error("Failed to apply network priority", log.Ctx{"container": d.name, "err": err})
 			}
-		}(c)
+		}(d)
 	}
 
 	// Database updates
-	err = c.state.Cluster.Transaction(func(tx *db.ClusterTx) error {
+	err = d.state.Cluster.Transaction(func(tx *db.ClusterTx) error {
 		// Record current state
-		err = tx.UpdateInstancePowerState(c.id, "RUNNING")
+		err = tx.UpdateInstancePowerState(d.id, "RUNNING")
 		if err != nil {
 			return errors.Wrap(err, "Error updating container state")
 		}
 
 		// Update time container last started time
-		err = tx.UpdateInstanceLastUsedDate(c.id, time.Now().UTC())
+		err = tx.UpdateInstanceLastUsedDate(d.id, time.Now().UTC())
 		if err != nil {
 			return errors.Wrap(err, "Error updating last used")
 		}
@@ -2545,27 +2545,27 @@ func (c *lxc) onStart(_ map[string]string) error {
 }
 
 // Stop functions
-func (c *lxc) Stop(stateful bool) error {
+func (d *lxc) Stop(stateful bool) error {
 	var ctxMap log.Ctx
 
 	// Check that we're not already stopped
-	if !c.IsRunning() {
+	if !d.IsRunning() {
 		return fmt.Errorf("The container is already stopped")
 	}
 
 	// Setup a new operation
-	op, err := operationlock.Create(c.id, "stop", false, true)
+	op, err := operationlock.Create(d.id, "stop", false, true)
 	if err != nil {
 		return err
 	}
 
 	ctxMap = log.Ctx{
-		"project":   c.project,
-		"name":      c.name,
+		"project":   d.project,
+		"name":      d.name,
 		"action":    op.Action(),
-		"created":   c.creationDate,
-		"ephemeral": c.ephemeral,
-		"used":      c.lastUsedDate,
+		"created":   d.creationDate,
+		"ephemeral": d.ephemeral,
+		"used":      d.lastUsedDate,
 		"stateful":  stateful}
 
 	logger.Info("Stopping container", ctxMap)
@@ -2573,7 +2573,7 @@ func (c *lxc) Stop(stateful bool) error {
 	// Handle stateful stop
 	if stateful {
 		// Cleanup any existing state
-		stateDir := c.StatePath()
+		stateDir := d.StatePath()
 		os.RemoveAll(stateDir)
 
 		err := os.MkdirAll(stateDir, 0700)
@@ -2594,7 +2594,7 @@ func (c *lxc) Stop(stateful bool) error {
 		}
 
 		// Checkpoint
-		err = c.Migrate(&criuMigrationArgs)
+		err = d.Migrate(&criuMigrationArgs)
 		if err != nil {
 			op.Done(err)
 			logger.Error("Failed stopping container", ctxMap)
@@ -2602,13 +2602,13 @@ func (c *lxc) Stop(stateful bool) error {
 		}
 
 		err = op.Wait()
-		if err != nil && c.IsRunning() {
+		if err != nil && d.IsRunning() {
 			logger.Error("Failed stopping container", ctxMap)
 			return err
 		}
 
-		c.stateful = true
-		err = c.state.Cluster.UpdateInstanceStatefulFlag(c.id, true)
+		d.stateful = true
+		err = d.state.Cluster.UpdateInstanceStatefulFlag(d.id, true)
 		if err != nil {
 			op.Done(err)
 			logger.Error("Failed stopping container", ctxMap)
@@ -2617,23 +2617,23 @@ func (c *lxc) Stop(stateful bool) error {
 
 		op.Done(nil)
 		logger.Info("Stopped container", ctxMap)
-		c.state.Events.SendLifecycle(c.project, "container-stopped",
-			fmt.Sprintf("/1.0/containers/%s", c.name), nil)
+		d.state.Events.SendLifecycle(d.project, "container-stopped",
+			fmt.Sprintf("/1.0/containers/%s", d.name), nil)
 		return nil
-	} else if shared.PathExists(c.StatePath()) {
-		os.RemoveAll(c.StatePath())
+	} else if shared.PathExists(d.StatePath()) {
+		os.RemoveAll(d.StatePath())
 	}
 
 	// Load the go-lxc struct
-	if c.expandedConfig["raw.lxc"] != "" {
-		err = c.initLXC(true)
+	if d.expandedConfig["raw.lxc"] != "" {
+		err = d.initLXC(true)
 		if err != nil {
 			op.Done(err)
 			logger.Error("Failed stopping container", ctxMap)
 			return err
 		}
 	} else {
-		err = c.initLXC(false)
+		err = d.initLXC(false)
 		if err != nil {
 			op.Done(err)
 			logger.Error("Failed stopping container", ctxMap)
@@ -2642,86 +2642,86 @@ func (c *lxc) Stop(stateful bool) error {
 	}
 
 	// Load cgroup abstraction
-	cg, err := c.cgroup(nil)
+	cg, err := d.cgroup(nil)
 	if err != nil {
 		op.Done(err)
 		return err
 	}
 
 	// Fork-bomb mitigation, prevent forking from this point on
-	if c.state.OS.CGInfo.Supports(cgroup.Pids, cg) {
+	if d.state.OS.CGInfo.Supports(cgroup.Pids, cg) {
 		// Attempt to disable forking new processes
 		cg.SetMaxProcesses(0)
-	} else if c.state.OS.CGInfo.Supports(cgroup.Freezer, cg) {
+	} else if d.state.OS.CGInfo.Supports(cgroup.Freezer, cg) {
 		// Attempt to freeze the container
 		freezer := make(chan bool, 1)
 		go func() {
-			c.Freeze()
+			d.Freeze()
 			freezer <- true
 		}()
 
 		select {
 		case <-freezer:
 		case <-time.After(time.Second * 5):
-			c.Unfreeze()
+			d.Unfreeze()
 		}
 	}
 
-	if err := c.c.Stop(); err != nil {
+	if err := d.c.Stop(); err != nil {
 		op.Done(err)
 		logger.Error("Failed stopping container", ctxMap)
 		return err
 	}
 
 	err = op.Wait()
-	if err != nil && c.IsRunning() {
+	if err != nil && d.IsRunning() {
 		logger.Error("Failed stopping container", ctxMap)
 		return err
 	}
 
 	logger.Info("Stopped container", ctxMap)
-	c.state.Events.SendLifecycle(c.project, "container-stopped",
-		fmt.Sprintf("/1.0/containers/%s", c.name), nil)
+	d.state.Events.SendLifecycle(d.project, "container-stopped",
+		fmt.Sprintf("/1.0/containers/%s", d.name), nil)
 
 	return nil
 }
 
 // Shutdown stops the instance.
-func (c *lxc) Shutdown(timeout time.Duration) error {
+func (d *lxc) Shutdown(timeout time.Duration) error {
 	var ctxMap log.Ctx
 
 	// Check that we're not already stopped
-	if !c.IsRunning() {
+	if !d.IsRunning() {
 		return fmt.Errorf("The container is already stopped")
 	}
 
 	// Setup a new operation
-	op, err := operationlock.Create(c.id, "stop", true, true)
+	op, err := operationlock.Create(d.id, "stop", true, true)
 	if err != nil {
 		return err
 	}
 
 	ctxMap = log.Ctx{
-		"project":   c.project,
-		"name":      c.name,
+		"project":   d.project,
+		"name":      d.name,
 		"action":    "shutdown",
-		"created":   c.creationDate,
-		"ephemeral": c.ephemeral,
-		"used":      c.lastUsedDate,
+		"created":   d.creationDate,
+		"ephemeral": d.ephemeral,
+		"used":      d.lastUsedDate,
 		"timeout":   timeout}
 
 	logger.Info("Shutting down container", ctxMap)
 
 	// Load the go-lxc struct
-	if c.expandedConfig["raw.lxc"] != "" {
-		err = c.initLXC(true)
+	if d.expandedConfig["raw.lxc"] != "" {
+		err = d.initLXC(true)
 		if err != nil {
 			op.Done(err)
 			logger.Error("Failed stopping container", ctxMap)
 			return err
 		}
 	} else {
-		err = c.initLXC(false)
+		err = d.initLXC(false)
 		if err != nil {
 			op.Done(err)
 			logger.Error("Failed stopping container", ctxMap)
@@ -2729,76 +2729,76 @@ func (c *lxc) Shutdown(timeout time.Duration) error {
 		}
 	}
 
-	if err := c.c.Shutdown(timeout); err != nil {
+	if err := d.c.Shutdown(timeout); err != nil {
 		op.Done(err)
 		logger.Error("Failed shutting down container", ctxMap)
 		return err
 	}
 
 	err = op.Wait()
-	if err != nil && c.IsRunning() {
+	if err != nil && d.IsRunning() {
 		logger.Error("Failed shutting down container", ctxMap)
 		return err
 	}
 
 	logger.Info("Shut down container", ctxMap)
-	c.state.Events.SendLifecycle(c.project, "container-shutdown",
-		fmt.Sprintf("/1.0/containers/%s", c.name), nil)
+	d.state.Events.SendLifecycle(d.project, "container-shutdown",
+		fmt.Sprintf("/1.0/containers/%s", d.name), nil)
 
 	return nil
 }
 
 // Restart restart the instance.
-func (c *lxc) Restart(timeout time.Duration) error {
-	return c.common.restart(c, timeout)
+func (d *lxc) Restart(timeout time.Duration) error {
+	return d.common.restart(d, timeout)
 }
 
 // onStopNS is triggered by LXC's stop hook once a container is shutdown but before the container's
 // namespaces have been closed. The netns path of the stopped container is provided.
-func (c *lxc) onStopNS(args map[string]string) error {
+func (d *lxc) onStopNS(args map[string]string) error {
 	target := args["target"]
 	netns := args["netns"]
 
 	// Validate target.
 	if !shared.StringInSlice(target, []string{"stop", "reboot"}) {
-		logger.Error("Container sent invalid target to OnStopNS", log.Ctx{"container": c.Name(), "target": target})
+		logger.Error("Container sent invalid target to OnStopNS", log.Ctx{"container": d.Name(), "target": target})
 		return fmt.Errorf("Invalid stop target %q", target)
 	}
 
 	// Clean up devices.
-	c.cleanupDevices(false, netns)
+	d.cleanupDevices(false, netns)
 
 	return nil
 }
 
 // onStop is triggered by LXC's post-stop hook once a container is shutdown and after the
 // container's namespaces have been closed.
-func (c *lxc) onStop(args map[string]string) error {
+func (d *lxc) onStop(args map[string]string) error {
 	target := args["target"]
 
 	// Validate target
 	if !shared.StringInSlice(target, []string{"stop", "reboot"}) {
-		logger.Error("Container sent invalid target to OnStop", log.Ctx{"container": c.Name(), "target": target})
+		logger.Error("Container sent invalid target to OnStop", log.Ctx{"container": d.Name(), "target": target})
 		return fmt.Errorf("Invalid stop target: %s", target)
 	}
 
 	// Pick up the existing stop operation lock created in Stop() function.
-	op := operationlock.Get(c.id)
+	op := operationlock.Get(d.id)
 	if op != nil && op.Action() != "stop" {
 		return fmt.Errorf("Container is already running a %s operation", op.Action())
 	}
 
 	// Make sure we can't call go-lxc functions by mistake
-	c.fromHook = true
+	d.fromHook = true
 
 	// Log user actions
 	ctxMap := log.Ctx{
-		"project":   c.project,
-		"name":      c.name,
+		"project":   d.project,
+		"name":      d.name,
 		"action":    target,
-		"created":   c.creationDate,
-		"ephemeral": c.ephemeral,
-		"used":      c.lastUsedDate,
+		"created":   d.creationDate,
+		"ephemeral": d.ephemeral,
+		"used":      d.lastUsedDate,
 		"stateful":  false}
 
 	if op == nil {
@@ -2806,13 +2806,13 @@ func (c *lxc) onStop(args map[string]string) error {
 	}
 
 	// Record power state
-	err := c.state.Cluster.UpdateInstancePowerState(c.id, "STOPPED")
+	err := d.state.Cluster.UpdateInstancePowerState(d.id, "STOPPED")
 	if err != nil {
-		logger.Error("Failed to set container state", log.Ctx{"container": c.Name(), "err": err})
+		logger.Error("Failed to set container state", log.Ctx{"container": d.Name(), "err": err})
 	}
 
-	go func(c *lxc, target string, op *operationlock.InstanceOperation) {
-		c.fromHook = false
+	go func(d *lxc, target string, op *operationlock.InstanceOperation) {
+		d.fromHook = false
 		err = nil
 
 		// Unlock on return
@@ -2821,105 +2821,105 @@ func (c *lxc) onStop(args map[string]string) error {
 		}
 
 		// Wait for other post-stop actions to be done and the container actually stopping.
-		c.IsRunning()
-		logger.Debug("Container stopped, starting storage cleanup", log.Ctx{"container": c.Name()})
+		d.IsRunning()
+		logger.Debug("Container stopped, starting storage cleanup", log.Ctx{"container": d.Name()})
 
 		// Clean up devices.
-		c.cleanupDevices(false, "")
+		d.cleanupDevices(false, "")
 
 		// Remove directory ownership (to avoid issue if uidmap is re-used)
-		err := os.Chown(c.Path(), 0, 0)
+		err := os.Chown(d.Path(), 0, 0)
 		if err != nil {
 			if op != nil {
 				op.Done(err)
 			}
 
-			logger.Error("Failed clearing ownernship", log.Ctx{"container": c.Name(), "err": err, "path": c.Path()})
+			logger.Error("Failed clearing ownernship", log.Ctx{"container": d.Name(), "err": err, "path": d.Path()})
 		}
 
-		err = os.Chmod(c.Path(), 0100)
+		err = os.Chmod(d.Path(), 0100)
 		if err != nil {
 			if op != nil {
 				op.Done(err)
 			}
 
-			logger.Error("Failed clearing permissions", log.Ctx{"container": c.Name(), "err": err, "path": c.Path()})
+			logger.Error("Failed clearing permissions", log.Ctx{"container": d.Name(), "err": err, "path": d.Path()})
 		}
 
 		// Stop the storage for this container
-		_, err = c.unmount()
+		_, err = d.unmount()
 		if err != nil {
 			if op != nil {
 				op.Done(err)
 			}
 
-			logger.Error("Failed unnounting container", log.Ctx{"container": c.Name(), "err": err})
+			logger.Error("Failed unnounting container", log.Ctx{"container": d.Name(), "err": err})
 		}
 
 		// Unload the apparmor profile
-		err = apparmor.InstanceUnload(c.state, c)
+		err = apparmor.InstanceUnload(d.state, d)
 		if err != nil {
-			logger.Error("Failed to destroy apparmor namespace", log.Ctx{"container": c.Name(), "err": err})
+			logger.Error("Failed to destroy apparmor namespace", log.Ctx{"container": d.Name(), "err": err})
 		}
 
 		// Clean all the unix devices
-		err = c.removeUnixDevices()
+		err = d.removeUnixDevices()
 		if err != nil {
 			if op != nil {
 				op.Done(err)
 			}
 
-			logger.Error("Unable to remove unix devices", log.Ctx{"container": c.Name(), "err": err})
+			logger.Error("Unable to remove unix devices", log.Ctx{"container": d.Name(), "err": err})
 		}
 
 		// Clean all the disk devices
-		err = c.removeDiskDevices()
+		err = d.removeDiskDevices()
 		if err != nil {
 			if op != nil {
 				op.Done(err)
 			}
 
-			logger.Error("Unable to remove disk devices", log.Ctx{"container": c.Name(), "err": err})
+			logger.Error("Unable to remove disk devices", log.Ctx{"container": d.Name(), "err": err})
 		}
 
 		// Log and emit lifecycle if not user triggered
 		if op == nil {
 			logger.Info("Shut down container", ctxMap)
-			c.state.Events.SendLifecycle(c.project, "container-shutdown", fmt.Sprintf("/1.0/containers/%s", c.name), nil)
+			d.state.Events.SendLifecycle(d.project, "container-shutdown", fmt.Sprintf("/1.0/containers/%s", d.name), nil)
 		}
 
 		// Reboot the container
 		if target == "reboot" {
 			// Start the container again
-			err = c.Start(false)
+			err = d.Start(false)
 			if err != nil {
 				if op != nil {
 					op.Done(err)
 				}
 
-				logger.Error("Failed restarting container", log.Ctx{"container": c.Name(), "err": err})
+				logger.Error("Failed restarting container", log.Ctx{"container": d.Name(), "err": err})
 				return
 			}
 
-			c.state.Events.SendLifecycle(c.project, "container-restarted", fmt.Sprintf("/1.0/containers/%s", c.name), nil)
+			d.state.Events.SendLifecycle(d.project, "container-restarted", fmt.Sprintf("/1.0/containers/%s", d.name), nil)
 			return
 		}
 
 		// Trigger a rebalance
-		cgroup.TaskSchedulerTrigger("container", c.name, "stopped")
+		cgroup.TaskSchedulerTrigger("container", d.name, "stopped")
 
 		// Destroy ephemeral containers
-		if c.ephemeral {
-			err = c.Delete()
+		if d.ephemeral {
+			err = d.Delete()
 			if err != nil {
 				if op != nil {
 					op.Done(err)
 				}
 
-				logger.Error("Failed deleting ephemeral", log.Ctx{"container": c.Name(), "err": err})
+				logger.Error("Failed deleting ephemeral", log.Ctx{"container": d.Name(), "err": err})
 			}
 		}
-	}(c, target, op)
+	}(d, target, op)
 
 	return nil
 }
@@ -2927,8 +2927,8 @@ func (c *lxc) onStop(args map[string]string) error {
 // cleanupDevices performs any needed device cleanup steps when container is stopped.
 // Accepts a stopHookNetnsPath argument which is required when run from the onStopNS hook before the
 // container's network namespace is unmounted (which is required for NIC device cleanup).
-func (c *lxc) cleanupDevices(instanceRunning bool, stopHookNetnsPath string) {
-	for _, dev := range c.expandedDevices.Reversed() {
+func (d *lxc) cleanupDevices(instanceRunning bool, stopHookNetnsPath string) {
+	for _, dev := range d.expandedDevices.Reversed() {
 		// Only stop NIC devices when run from the onStopNS hook, and stop all other devices when run from
 		// the onStop hook. This way disk devices are stopped after the instance has been fully stopped.
 		if (stopHookNetnsPath != "" && dev.Config["type"] != "nic") || (stopHookNetnsPath == "" && dev.Config["type"] == "nic") {
@@ -2936,7 +2936,7 @@ func (c *lxc) cleanupDevices(instanceRunning bool, stopHookNetnsPath string) {
 		}
 
 		// Use the device interface if device supports it.
-		err := c.deviceStop(dev.Name, dev.Config, instanceRunning, stopHookNetnsPath)
+		err := d.deviceStop(dev.Name, dev.Config, instanceRunning, stopHookNetnsPath)
 		if err == device.ErrUnsupportedDevType {
 			continue
 		} else if err != nil {
@@ -2946,46 +2946,46 @@ func (c *lxc) cleanupDevices(instanceRunning bool, stopHookNetnsPath string) {
 }
 
 // Freeze functions.
-func (c *lxc) Freeze() error {
+func (d *lxc) Freeze() error {
 	ctxMap := log.Ctx{
-		"project":   c.project,
-		"name":      c.name,
-		"created":   c.creationDate,
-		"ephemeral": c.ephemeral,
-		"used":      c.lastUsedDate}
+		"project":   d.project,
+		"name":      d.name,
+		"created":   d.creationDate,
+		"ephemeral": d.ephemeral,
+		"used":      d.lastUsedDate}
 
 	// Check that we're running
-	if !c.IsRunning() {
+	if !d.IsRunning() {
 		return fmt.Errorf("The container isn't running")
 	}
 
-	cg, err := c.cgroup(nil)
+	cg, err := d.cgroup(nil)
 	if err != nil {
 		return err
 	}
 
 	// Check if the CGroup is available
-	if !c.state.OS.CGInfo.Supports(cgroup.Freezer, cg) {
+	if !d.state.OS.CGInfo.Supports(cgroup.Freezer, cg) {
 		logger.Info("Unable to freeze container (lack of kernel support)", ctxMap)
 		return nil
 	}
 
 	// Check that we're not already frozen
-	if c.IsFrozen() {
+	if d.IsFrozen() {
 		return fmt.Errorf("The container is already frozen")
 	}
 
 	logger.Info("Freezing container", ctxMap)
 
 	// Load the go-lxc struct
-	err = c.initLXC(false)
+	err = d.initLXC(false)
 	if err != nil {
 		ctxMap["err"] = err
 		logger.Error("Failed freezing container", ctxMap)
 		return err
 	}
 
-	err = c.c.Freeze()
+	err = d.c.Freeze()
 	if err != nil {
 		ctxMap["err"] = err
 		logger.Error("Failed freezing container", ctxMap)
@@ -2993,77 +2993,77 @@ func (c *lxc) Freeze() error {
 	}
 
 	logger.Info("Froze container", ctxMap)
-	c.state.Events.SendLifecycle(c.project, "container-paused",
-		fmt.Sprintf("/1.0/containers/%s", c.name), nil)
+	d.state.Events.SendLifecycle(d.project, "container-paused",
+		fmt.Sprintf("/1.0/containers/%s", d.name), nil)
 
 	return err
 }
 
 // Unfreeze unfreezes the instance.
-func (c *lxc) Unfreeze() error {
+func (d *lxc) Unfreeze() error {
 	ctxMap := log.Ctx{
-		"project":   c.project,
-		"name":      c.name,
-		"created":   c.creationDate,
-		"ephemeral": c.ephemeral,
-		"used":      c.lastUsedDate}
+		"project":   d.project,
+		"name":      d.name,
+		"created":   d.creationDate,
+		"ephemeral": d.ephemeral,
+		"used":      d.lastUsedDate}
 
 	// Check that we're running
-	if !c.IsRunning() {
+	if !d.IsRunning() {
 		return fmt.Errorf("The container isn't running")
 	}
 
-	cg, err := c.cgroup(nil)
+	cg, err := d.cgroup(nil)
 	if err != nil {
 		return err
 	}
 
 	// Check if the CGroup is available
-	if !c.state.OS.CGInfo.Supports(cgroup.Freezer, cg) {
+	if !d.state.OS.CGInfo.Supports(cgroup.Freezer, cg) {
 		logger.Info("Unable to unfreeze container (lack of kernel support)", ctxMap)
 		return nil
 	}
 
 	// Check that we're frozen
-	if !c.IsFrozen() {
+	if !d.IsFrozen() {
 		return fmt.Errorf("The container is already running")
 	}
 
 	logger.Info("Unfreezing container", ctxMap)
 
 	// Load the go-lxc struct
-	err = c.initLXC(false)
+	err = d.initLXC(false)
 	if err != nil {
 		logger.Error("Failed unfreezing container", ctxMap)
 		return err
 	}
 
-	err = c.c.Unfreeze()
+	err = d.c.Unfreeze()
 	if err != nil {
 		logger.Error("Failed unfreezing container", ctxMap)
 	}
 
 	logger.Info("Unfroze container", ctxMap)
-	c.state.Events.SendLifecycle(c.project, "container-resumed",
-		fmt.Sprintf("/1.0/containers/%s", c.name), nil)
+	d.state.Events.SendLifecycle(d.project, "container-resumed",
+		fmt.Sprintf("/1.0/containers/%s", d.name), nil)
 
 	return err
 }
 
 // Get lxc container state, with 1 second timeout
 // If we don't get a reply, assume the lxc monitor is hung
-func (c *lxc) getLxcState() (liblxc.State, error) {
-	if c.IsSnapshot() {
+func (d *lxc) getLxcState() (liblxc.State, error) {
+	if d.IsSnapshot() {
 		return liblxc.StateMap["STOPPED"], nil
 	}
 
 	// Load the go-lxc struct
-	err := c.initLXC(false)
+	err := d.initLXC(false)
 	if err != nil {
 		return liblxc.StateMap["STOPPED"], err
 	}
 
-	if c.c == nil {
+	if d.c == nil {
 		return liblxc.StateMap["STOPPED"], nil
 	}
 
@@ -3071,7 +3071,7 @@ func (c *lxc) getLxcState() (liblxc.State, error) {
 
 	go func(c *liblxc.Container) {
 		monitor <- c.State()
-	}(c.c)
+	}(d.c)
 
 	select {
 	case state := <-monitor:
@@ -3082,29 +3082,29 @@ func (c *lxc) getLxcState() (liblxc.State, error) {
 }
 
 // Render renders the state of the instance.
-func (c *lxc) Render(options ...func(response interface{}) error) (interface{}, interface{}, error) {
+func (d *lxc) Render(options ...func(response interface{}) error) (interface{}, interface{}, error) {
 	// Ignore err as the arch string on error is correct (unknown)
-	architectureName, _ := osarch.ArchitectureName(c.architecture)
+	architectureName, _ := osarch.ArchitectureName(d.architecture)
 
-	if c.IsSnapshot() {
+	if d.IsSnapshot() {
 		// Prepare the ETag
-		etag := []interface{}{c.expiryDate}
+		etag := []interface{}{d.expiryDate}
 
 		snapState := api.InstanceSnapshot{
-			CreatedAt:       c.creationDate,
-			ExpandedConfig:  c.expandedConfig,
-			ExpandedDevices: c.expandedDevices.CloneNative(),
-			LastUsedAt:      c.lastUsedDate,
-			Name:            strings.SplitN(c.name, "/", 2)[1],
-			Stateful:        c.stateful,
+			CreatedAt:       d.creationDate,
+			ExpandedConfig:  d.expandedConfig,
+			ExpandedDevices: d.expandedDevices.CloneNative(),
+			LastUsedAt:      d.lastUsedDate,
+			Name:            strings.SplitN(d.name, "/", 2)[1],
+			Stateful:        d.stateful,
 			Size:            -1, // Default to uninitialised/error state (0 means no CoW usage).
 		}
 		snapState.Architecture = architectureName
-		snapState.Config = c.localConfig
-		snapState.Devices = c.localDevices.CloneNative()
-		snapState.Ephemeral = c.ephemeral
-		snapState.Profiles = c.profiles
-		snapState.ExpiresAt = c.expiryDate
+		snapState.Config = d.localConfig
+		snapState.Devices = d.localDevices.CloneNative()
+		snapState.Ephemeral = d.ephemeral
+		snapState.Profiles = d.profiles
+		snapState.ExpiresAt = d.expiryDate
 
 		for _, option := range options {
 			err := option(&snapState)
@@ -3117,34 +3117,34 @@ func (c *lxc) Render(options ...func(response interface{}) error) (interface{},
 	}
 
 	// Prepare the ETag
-	etag := []interface{}{c.architecture, c.localConfig, c.localDevices, c.ephemeral, c.profiles}
+	etag := []interface{}{d.architecture, d.localConfig, d.localDevices, d.ephemeral, d.profiles}
 
 	// FIXME: Render shouldn't directly access the go-lxc struct
-	cState, err := c.getLxcState()
+	cState, err := d.getLxcState()
 	if err != nil {
 		return nil, nil, errors.Wrap(err, "Get container stated")
 	}
 	statusCode := lxcStatusCode(cState)
 
 	instState := api.Instance{
-		ExpandedConfig:  c.expandedConfig,
-		ExpandedDevices: c.expandedDevices.CloneNative(),
-		Name:            c.name,
+		ExpandedConfig:  d.expandedConfig,
+		ExpandedDevices: d.expandedDevices.CloneNative(),
+		Name:            d.name,
 		Status:          statusCode.String(),
 		StatusCode:      statusCode,
-		Location:        c.node,
-		Type:            c.Type().String(),
+		Location:        d.node,
+		Type:            d.Type().String(),
 	}
 
-	instState.Description = c.description
+	instState.Description = d.description
 	instState.Architecture = architectureName
-	instState.Config = c.localConfig
-	instState.CreatedAt = c.creationDate
-	instState.Devices = c.localDevices.CloneNative()
-	instState.Ephemeral = c.ephemeral
-	instState.LastUsedAt = c.lastUsedDate
-	instState.Profiles = c.profiles
-	instState.Stateful = c.stateful
+	instState.Config = d.localConfig
+	instState.CreatedAt = d.creationDate
+	instState.Devices = d.localDevices.CloneNative()
+	instState.Ephemeral = d.ephemeral
+	instState.LastUsedAt = d.lastUsedDate
+	instState.Profiles = d.profiles
+	instState.Stateful = d.stateful
 
 	for _, option := range options {
 		err := option(&instState)
@@ -3157,13 +3157,13 @@ func (c *lxc) Render(options ...func(response interface{}) error) (interface{},
 }
 
 // RenderFull renders the full state of the instance.
-func (c *lxc) RenderFull() (*api.InstanceFull, interface{}, error) {
-	if c.IsSnapshot() {
+func (d *lxc) RenderFull() (*api.InstanceFull, interface{}, error) {
+	if d.IsSnapshot() {
 		return nil, nil, fmt.Errorf("RenderFull only works with containers")
 	}
 
 	// Get the Container struct
-	base, etag, err := c.Render()
+	base, etag, err := d.Render()
 	if err != nil {
 		return nil, nil, err
 	}
@@ -3172,13 +3172,13 @@ func (c *lxc) RenderFull() (*api.InstanceFull, interface{}, error) {
 	ct := api.InstanceFull{Instance: *base.(*api.Instance)}
 
 	// Add the ContainerState
-	ct.State, err = c.RenderState()
+	ct.State, err = d.RenderState()
 	if err != nil {
 		return nil, nil, err
 	}
 
 	// Add the ContainerSnapshots
-	snaps, err := c.Snapshots()
+	snaps, err := d.Snapshots()
 	if err != nil {
 		return nil, nil, err
 	}
@@ -3197,7 +3197,7 @@ func (c *lxc) RenderFull() (*api.InstanceFull, interface{}, error) {
 	}
 
 	// Add the ContainerBackups
-	backups, err := c.Backups()
+	backups, err := d.Backups()
 	if err != nil {
 		return nil, nil, err
 	}
@@ -3216,8 +3216,8 @@ func (c *lxc) RenderFull() (*api.InstanceFull, interface{}, error) {
 }
 
 // RenderState renders just the running state of the instance.
-func (c *lxc) RenderState() (*api.InstanceState, error) {
-	cState, err := c.getLxcState()
+func (d *lxc) RenderState() (*api.InstanceState, error) {
+	cState, err := d.getLxcState()
 	if err != nil {
 		return nil, err
 	}
@@ -3227,31 +3227,31 @@ func (c *lxc) RenderState() (*api.InstanceState, error) {
 		StatusCode: statusCode,
 	}
 
-	if c.IsRunning() {
-		pid := c.InitPID()
-		status.CPU = c.cpuState()
-		status.Memory = c.memoryState()
-		status.Network = c.networkState()
+	if d.IsRunning() {
+		pid := d.InitPID()
+		status.CPU = d.cpuState()
+		status.Memory = d.memoryState()
+		status.Network = d.networkState()
 		status.Pid = int64(pid)
-		status.Processes = c.processesState()
+		status.Processes = d.processesState()
 	}
-	status.Disk = c.diskState()
+	status.Disk = d.diskState()
 
 	return &status, nil
 }
 
 // Snapshots returns the snapshots of the instance.
-func (c *lxc) Snapshots() ([]instance.Instance, error) {
+func (d *lxc) Snapshots() ([]instance.Instance, error) {
 	var snaps []db.Instance
 
-	if c.IsSnapshot() {
+	if d.IsSnapshot() {
 		return []instance.Instance{}, nil
 	}
 
 	// Get all the snapshots
-	err := c.state.Cluster.Transaction(func(tx *db.ClusterTx) error {
+	err := d.state.Cluster.Transaction(func(tx *db.ClusterTx) error {
 		var err error
-		snaps, err = tx.GetInstanceSnapshotsWithName(c.Project(), c.name)
+		snaps, err = tx.GetInstanceSnapshotsWithName(d.Project(), d.name)
 		if err != nil {
 			return err
 		}
@@ -3263,7 +3263,7 @@ func (c *lxc) Snapshots() ([]instance.Instance, error) {
 	}
 
 	// Build the snapshot list
-	containers, err := instance.LoadAllInternal(c.state, snaps)
+	containers, err := instance.LoadAllInternal(d.state, snaps)
 	if err != nil {
 		return nil, err
 	}
@@ -3277,9 +3277,9 @@ func (c *lxc) Snapshots() ([]instance.Instance, error) {
 }
 
 // Backups returns the backups of the instance.
-func (c *lxc) Backups() ([]backup.InstanceBackup, error) {
+func (d *lxc) Backups() ([]backup.InstanceBackup, error) {
 	// Get all the backups
-	backupNames, err := c.state.Cluster.GetInstanceBackups(c.project, c.name)
+	backupNames, err := d.state.Cluster.GetInstanceBackups(d.project, d.name)
 	if err != nil {
 		return nil, err
 	}
@@ -3287,7 +3287,7 @@ func (c *lxc) Backups() ([]backup.InstanceBackup, error) {
 	// Build the backup list
 	backups := []backup.InstanceBackup{}
 	for _, backupName := range backupNames {
-		backup, err := instance.BackupLoadByName(c.state, c.project, backupName)
+		backup, err := instance.BackupLoadByName(d.state, d.project, backupName)
 		if err != nil {
 			return nil, err
 		}
@@ -3299,30 +3299,30 @@ func (c *lxc) Backups() ([]backup.InstanceBackup, error) {
 }
 
 // Restore restores a snapshot.
-func (c *lxc) Restore(sourceContainer instance.Instance, stateful bool) error {
+func (d *lxc) Restore(sourceContainer instance.Instance, stateful bool) error {
 	var ctxMap log.Ctx
 
 	// Stop the container.
 	wasRunning := false
-	if c.IsRunning() {
+	if d.IsRunning() {
 		wasRunning = true
 
-		ephemeral := c.IsEphemeral()
+		ephemeral := d.IsEphemeral()
 		if ephemeral {
 			// Unset ephemeral flag.
 			args := db.InstanceArgs{
-				Architecture: c.Architecture(),
-				Config:       c.LocalConfig(),
-				Description:  c.Description(),
-				Devices:      c.LocalDevices(),
+				Architecture: d.Architecture(),
+				Config:       d.LocalConfig(),
+				Description:  d.Description(),
+				Devices:      d.LocalDevices(),
 				Ephemeral:    false,
-				Profiles:     c.Profiles(),
-				Project:      c.Project(),
-				Type:         c.Type(),
-				Snapshot:     c.IsSnapshot(),
+				Profiles:     d.Profiles(),
+				Project:      d.Project(),
+				Type:         d.Type(),
+				Snapshot:     d.IsSnapshot(),
 			}
 
-			err := c.Update(args, false)
+			err := d.Update(args, false)
 			if err != nil {
 				return err
 			}
@@ -3330,43 +3330,43 @@ func (c *lxc) Restore(sourceContainer instance.Instance, stateful bool) error {
 			// On function return, set the flag back on.
 			defer func() {
 				args.Ephemeral = ephemeral
-				c.Update(args, false)
+				d.Update(args, false)
 			}()
 		}
 
 		// This will unmount the container storage.
-		err := c.Stop(false)
+		err := d.Stop(false)
 		if err != nil {
 			return err
 		}
 	}
 
 	ctxMap = log.Ctx{
-		"project":   c.project,
-		"name":      c.name,
-		"created":   c.creationDate,
-		"ephemeral": c.ephemeral,
-		"used":      c.lastUsedDate,
+		"project":   d.project,
+		"name":      d.name,
+		"created":   d.creationDate,
+		"ephemeral": d.ephemeral,
+		"used":      d.lastUsedDate,
 		"source":    sourceContainer.Name()}
 
 	logger.Info("Restoring container", ctxMap)
 
 	// Initialize storage interface for the container and mount the rootfs for criu state check.
-	pool, err := storagePools.GetPoolByInstance(c.state, c)
+	pool, err := storagePools.GetPoolByInstance(d.state, d)
 	if err != nil {
 		return err
 	}
 
 	// Ensure that storage is mounted for state path checks and for backup.yaml updates.
-	_, err = pool.MountInstance(c, nil)
+	_, err = pool.MountInstance(d, nil)
 	if err != nil {
 		return err
 	}
-	defer pool.UnmountInstance(c, nil)
+	defer pool.UnmountInstance(d, nil)
 
 	// Check for CRIU if necessary, before doing a bunch of filesystem manipulations.
 	// Requires container be mounted to check StatePath exists.
-	if shared.PathExists(c.StatePath()) {
+	if shared.PathExists(d.StatePath()) {
 		_, err := exec.LookPath("criu")
 		if err != nil {
 			return fmt.Errorf("Failed to restore container state. CRIU isn't installed")
@@ -3374,7 +3374,7 @@ func (c *lxc) Restore(sourceContainer instance.Instance, stateful bool) error {
 	}
 
 	// Restore the rootfs.
-	err = pool.RestoreInstanceSnapshot(c, sourceContainer, nil)
+	err = pool.RestoreInstanceSnapshot(d, sourceContainer, nil)
 	if err != nil {
 		return err
 	}
@@ -3393,7 +3393,7 @@ func (c *lxc) Restore(sourceContainer instance.Instance, stateful bool) error {
 	}
 
 	// Don't pass as user-requested as there's no way to fix a bad config.
-	err = c.Update(args, false)
+	err = d.Update(args, false)
 	if err != nil {
 		logger.Error("Failed restoring container configuration", ctxMap)
 		return err
@@ -3401,23 +3401,23 @@ func (c *lxc) Restore(sourceContainer instance.Instance, stateful bool) error {
 
 	// The old backup file may be out of date (e.g. it doesn't have all the current snapshots of
 	// the container listed); let's write a new one to be safe.
-	err = c.UpdateBackupFile()
+	err = d.UpdateBackupFile()
 	if err != nil {
 		return err
 	}
 
 	// If the container wasn't running but was stateful, should we restore it as running?
 	if stateful == true {
-		if !shared.PathExists(c.StatePath()) {
+		if !shared.PathExists(d.StatePath()) {
 			return fmt.Errorf("Stateful snapshot restore requested by snapshot is stateless")
 		}
 
 		logger.Debug("Performing stateful restore", ctxMap)
-		c.stateful = true
+		d.stateful = true
 
 		criuMigrationArgs := instance.CriuMigrationArgs{
 			Cmd:          liblxc.MIGRATE_RESTORE,
-			StateDir:     c.StatePath(),
+			StateDir:     d.StatePath(),
 			Function:     "snapshot",
 			Stop:         false,
 			ActionScript: false,
@@ -3426,15 +3426,15 @@ func (c *lxc) Restore(sourceContainer instance.Instance, stateful bool) error {
 		}
 
 		// Checkpoint.
-		err := c.Migrate(&criuMigrationArgs)
+		err := d.Migrate(&criuMigrationArgs)
 		if err != nil {
 			return err
 		}
 
 		// Remove the state from the parent container; we only keep this in snapshots.
-		err2 := os.RemoveAll(c.StatePath())
+		err2 := os.RemoveAll(d.StatePath())
 		if err2 != nil {
-			logger.Error("Failed to delete snapshot state", log.Ctx{"path": c.StatePath(), "err": err2})
+			logger.Error("Failed to delete snapshot state", log.Ctx{"path": d.StatePath(), "err": err2})
 		}
 
 		if err != nil {
@@ -3447,55 +3447,55 @@ func (c *lxc) Restore(sourceContainer instance.Instance, stateful bool) error {
 		return nil
 	}
 
-	c.state.Events.SendLifecycle(c.project, "container-snapshot-restored",
-		fmt.Sprintf("/1.0/containers/%s", c.name), map[string]interface{}{
-			"snapshot_name": c.name,
+	d.state.Events.SendLifecycle(d.project, "container-snapshot-restored",
+		fmt.Sprintf("/1.0/containers/%s", d.name), map[string]interface{}{
+			"snapshot_name": d.name,
 		})
 
 	// Restart the container.
 	if wasRunning {
 		logger.Info("Restored container", ctxMap)
-		return c.Start(false)
+		return d.Start(false)
 	}
 
 	logger.Info("Restored container", ctxMap)
 	return nil
 }
 
-func (c *lxc) cleanup() {
+func (d *lxc) cleanup() {
 	// Unmount any leftovers
-	c.removeUnixDevices()
-	c.removeDiskDevices()
+	d.removeUnixDevices()
+	d.removeDiskDevices()
 
 	// Remove the security profiles
-	apparmor.InstanceDelete(c.state, c)
-	seccomp.DeleteProfile(c)
+	apparmor.InstanceDelete(d.state, d)
+	seccomp.DeleteProfile(d)
 
 	// Remove the devices path
-	os.Remove(c.DevicesPath())
+	os.Remove(d.DevicesPath())
 
 	// Remove the shmounts path
-	os.RemoveAll(c.ShmountsPath())
+	os.RemoveAll(d.ShmountsPath())
 }
 
 // Delete deletes the instance.
-func (c *lxc) Delete() error {
+func (d *lxc) Delete() error {
 	ctxMap := log.Ctx{
-		"project":   c.project,
-		"name":      c.name,
-		"created":   c.creationDate,
-		"ephemeral": c.ephemeral,
-		"used":      c.lastUsedDate}
+		"project":   d.project,
+		"name":      d.name,
+		"created":   d.creationDate,
+		"ephemeral": d.ephemeral,
+		"used":      d.lastUsedDate}
 
 	logger.Info("Deleting container", ctxMap)
 
-	if shared.IsTrue(c.expandedConfig["security.protection.delete"]) && !c.IsSnapshot() {
+	if shared.IsTrue(d.expandedConfig["security.protection.delete"]) && !d.IsSnapshot() {
 		err := fmt.Errorf("Container is protected")
-		logger.Warn("Failed to delete container", log.Ctx{"name": c.Name(), "err": err})
+		logger.Warn("Failed to delete container", log.Ctx{"name": d.Name(), "err": err})
 		return err
 	}
 
-	pool, err := storagePools.GetPoolByInstance(c.state, c)
+	pool, err := storagePools.GetPoolByInstance(d.state, d)
 	if err != nil && err != db.ErrNoSuchObject {
 		return err
 	} else if pool != nil {
@@ -3505,16 +3505,16 @@ func (c *lxc) Delete() error {
 		// the creation of the instance and we are now being asked to delete the instance,
 		// we should not remove the storage volumes themselves as this would cause data loss.
 		isImport := false
-		cName, _, _ := shared.InstanceGetParentAndSnapshotName(c.Name())
-		importingFilePath := storagePools.InstanceImportingFilePath(c.Type(), pool.Name(), c.Project(), cName)
+		cName, _, _ := shared.InstanceGetParentAndSnapshotName(d.Name())
+		importingFilePath := storagePools.InstanceImportingFilePath(d.Type(), pool.Name(), d.Project(), cName)
 		if shared.PathExists(importingFilePath) {
 			isImport = true
 		}
 
-		if c.IsSnapshot() {
+		if d.IsSnapshot() {
 			if !isImport {
 				// Remove snapshot volume and database record.
-				err = pool.DeleteInstanceSnapshot(c, nil)
+				err = pool.DeleteInstanceSnapshot(d, nil)
 				if err != nil {
 					return err
 				}
@@ -3522,15 +3522,15 @@ func (c *lxc) Delete() error {
 		} else {
 			// Remove all snapshots by initialising each snapshot as an Instance and
 			// calling its Delete function.
-			err := instance.DeleteSnapshots(c.state, c.Project(), c.Name())
+			err := instance.DeleteSnapshots(d.state, d.Project(), d.Name())
 			if err != nil {
-				logger.Error("Failed to delete instance snapshots", log.Ctx{"project": c.Project(), "instance": c.Name(), "err": err})
+				logger.Error("Failed to delete instance snapshots", log.Ctx{"project": d.Project(), "instance": d.Name(), "err": err})
 				return err
 			}
 
 			if !isImport {
 				// Remove the storage volume, snapshot volumes and database records.
-				err = pool.DeleteInstance(c, nil)
+				err = pool.DeleteInstance(d, nil)
 				if err != nil {
 					return err
 				}
@@ -3539,9 +3539,9 @@ func (c *lxc) Delete() error {
 	}
 
 	// Perform other cleanup steps if not snapshot.
-	if !c.IsSnapshot() {
+	if !d.IsSnapshot() {
 		// Remove all backups.
-		backups, err := c.Backups()
+		backups, err := d.Backups()
 		if err != nil {
 			return err
 		}
@@ -3554,92 +3554,92 @@ func (c *lxc) Delete() error {
 		}
 
 		// Delete the MAAS entry.
-		err = c.maasDelete()
+		err = d.maasDelete()
 		if err != nil {
-			logger.Error("Failed deleting container MAAS record", log.Ctx{"name": c.Name(), "err": err})
+			logger.Error("Failed deleting container MAAS record", log.Ctx{"name": d.Name(), "err": err})
 			return err
 		}
 
 		// Remove devices from container.
-		for k, m := range c.expandedDevices {
-			err = c.deviceRemove(k, m, false)
+		for k, m := range d.expandedDevices {
+			err = d.deviceRemove(k, m, false)
 			if err != nil && err != device.ErrUnsupportedDevType {
 				return errors.Wrapf(err, "Failed to remove device %q", k)
 			}
 		}
 
 		// Clean things up.
-		c.cleanup()
+		d.cleanup()
 	}
 
 	// Remove the database record of the instance or snapshot instance.
-	if err := c.state.Cluster.DeleteInstance(c.project, c.Name()); err != nil {
-		logger.Error("Failed deleting container entry", log.Ctx{"name": c.Name(), "err": err})
+	if err := d.state.Cluster.DeleteInstance(d.project, d.Name()); err != nil {
+		logger.Error("Failed deleting container entry", log.Ctx{"name": d.Name(), "err": err})
 		return err
 	}
 
 	logger.Info("Deleted container", ctxMap)
 
-	if c.IsSnapshot() {
-		c.state.Events.SendLifecycle(c.project, "container-snapshot-deleted",
-			fmt.Sprintf("/1.0/containers/%s", c.name), map[string]interface{}{
-				"snapshot_name": c.name,
+	if d.IsSnapshot() {
+		d.state.Events.SendLifecycle(d.project, "container-snapshot-deleted",
+			fmt.Sprintf("/1.0/containers/%s", d.name), map[string]interface{}{
+				"snapshot_name": d.name,
 			})
 	} else {
-		c.state.Events.SendLifecycle(c.project, "container-deleted",
-			fmt.Sprintf("/1.0/containers/%s", c.name), nil)
+		d.state.Events.SendLifecycle(d.project, "container-deleted",
+			fmt.Sprintf("/1.0/containers/%s", d.name), nil)
 	}
 
 	return nil
 }
 
 // Rename renames the instance.
-func (c *lxc) Rename(newName string) error {
-	oldName := c.Name()
+func (d *lxc) Rename(newName string) error {
+	oldName := d.Name()
 	ctxMap := log.Ctx{
-		"project":   c.project,
-		"name":      c.name,
-		"created":   c.creationDate,
-		"ephemeral": c.ephemeral,
-		"used":      c.lastUsedDate,
+		"project":   d.project,
+		"name":      d.name,
+		"created":   d.creationDate,
+		"ephemeral": d.ephemeral,
+		"used":      d.lastUsedDate,
 		"newname":   newName}
 
 	logger.Info("Renaming container", ctxMap)
 
 	// Sanity checks.
-	err := instance.ValidName(newName, c.IsSnapshot())
+	err := instance.ValidName(newName, d.IsSnapshot())
 	if err != nil {
 		return err
 	}
 
-	if c.IsRunning() {
+	if d.IsRunning() {
 		return fmt.Errorf("Renaming of running container not allowed")
 	}
 
 	// Clean things up.
-	c.cleanup()
+	d.cleanup()
 
-	pool, err := storagePools.GetPoolByInstance(c.state, c)
+	pool, err := storagePools.GetPoolByInstance(d.state, d)
 	if err != nil {
 		return errors.Wrap(err, "Load instance storage pool")
 	}
 
-	if c.IsSnapshot() {
+	if d.IsSnapshot() {
 		_, newSnapName, _ := shared.InstanceGetParentAndSnapshotName(newName)
-		err = pool.RenameInstanceSnapshot(c, newSnapName, nil)
+		err = pool.RenameInstanceSnapshot(d, newSnapName, nil)
 		if err != nil {
 			return errors.Wrap(err, "Rename instance snapshot")
 		}
 	} else {
-		err = pool.RenameInstance(c, newName, nil)
+		err = pool.RenameInstance(d, newName, nil)
 		if err != nil {
 			return errors.Wrap(err, "Rename instance")
 		}
 	}
 
-	if !c.IsSnapshot() {
+	if !d.IsSnapshot() {
 		// Rename all the instance snapshot database entries.
-		results, err := c.state.Cluster.GetInstanceSnapshotsNames(c.project, oldName)
+		results, err := d.state.Cluster.GetInstanceSnapshotsNames(d.project, oldName)
 		if err != nil {
 			logger.Error("Failed to get container snapshots", ctxMap)
 			return err
@@ -3649,8 +3649,8 @@ func (c *lxc) Rename(newName string) error {
 			// Rename the snapshot.
 			oldSnapName := strings.SplitN(sname, shared.SnapshotDelimiter, 2)[1]
 			baseSnapName := filepath.Base(sname)
-			err := c.state.Cluster.Transaction(func(tx *db.ClusterTx) error {
-				return tx.RenameInstanceSnapshot(c.project, oldName, oldSnapName, baseSnapName)
+			err := d.state.Cluster.Transaction(func(tx *db.ClusterTx) error {
+				return tx.RenameInstanceSnapshot(d.project, oldName, oldSnapName, baseSnapName)
 			})
 			if err != nil {
 				logger.Error("Failed renaming snapshot", ctxMap)
@@ -3660,14 +3660,14 @@ func (c *lxc) Rename(newName string) error {
 	}
 
 	// Rename the instance database entry.
-	err = c.state.Cluster.Transaction(func(tx *db.ClusterTx) error {
-		if c.IsSnapshot() {
+	err = d.state.Cluster.Transaction(func(tx *db.ClusterTx) error {
+		if d.IsSnapshot() {
 			oldParts := strings.SplitN(oldName, shared.SnapshotDelimiter, 2)
 			newParts := strings.SplitN(newName, shared.SnapshotDelimiter, 2)
-			return tx.RenameInstanceSnapshot(c.project, oldParts[0], oldParts[1], newParts[1])
+			return tx.RenameInstanceSnapshot(d.project, oldParts[0], oldParts[1], newParts[1])
 		}
 
-		return tx.RenameInstance(c.project, oldName, newName)
+		return tx.RenameInstance(d.project, oldName, newName)
 	})
 	if err != nil {
 		logger.Error("Failed renaming container", ctxMap)
@@ -3675,10 +3675,10 @@ func (c *lxc) Rename(newName string) error {
 	}
 
 	// Rename the logging path.
-	newFullName := project.Instance(c.Project(), c.Name())
+	newFullName := project.Instance(d.Project(), d.Name())
 	os.RemoveAll(shared.LogPath(newFullName))
-	if shared.PathExists(c.LogPath()) {
-		err := os.Rename(c.LogPath(), shared.LogPath(newFullName))
+	if shared.PathExists(d.LogPath()) {
+		err := os.Rename(d.LogPath(), shared.LogPath(newFullName))
 		if err != nil {
 			logger.Error("Failed renaming container", ctxMap)
 			return err
@@ -3686,8 +3686,8 @@ func (c *lxc) Rename(newName string) error {
 	}
 
 	// Rename the MAAS entry.
-	if !c.IsSnapshot() {
-		err = c.maasRename(newName)
+	if !d.IsSnapshot() {
+		err = d.maasRename(newName)
 		if err != nil {
 			return err
 		}
@@ -3697,11 +3697,11 @@ func (c *lxc) Rename(newName string) error {
 	defer revert.Fail()
 
 	// Set the new name in the struct.
-	c.name = newName
-	revert.Add(func() { c.name = oldName })
+	d.name = newName
+	revert.Add(func() { d.name = oldName })
 
 	// Rename the backups.
-	backups, err := c.Backups()
+	backups, err := d.Backups()
 	if err != nil {
 		return err
 	}
@@ -3721,31 +3721,31 @@ func (c *lxc) Rename(newName string) error {
 	}
 
 	// Invalidate the go-lxc cache.
-	if c.c != nil {
-		c.c.Release()
-		c.c = nil
+	if d.c != nil {
+		d.c.Release()
+		d.c = nil
 	}
 
-	c.cConfig = false
+	d.cConfig = false
 
 	// Update lease files.
-	network.UpdateDNSMasqStatic(c.state, "")
+	network.UpdateDNSMasqStatic(d.state, "")
 
-	err = c.UpdateBackupFile()
+	err = d.UpdateBackupFile()
 	if err != nil {
 		return err
 	}
 
 	logger.Info("Renamed container", ctxMap)
 
-	if c.IsSnapshot() {
-		c.state.Events.SendLifecycle(c.project, "container-snapshot-renamed",
+	if d.IsSnapshot() {
+		d.state.Events.SendLifecycle(d.project, "container-snapshot-renamed",
 			fmt.Sprintf("/1.0/containers/%s", oldName), map[string]interface{}{
 				"new_name":      newName,
 				"snapshot_name": oldName,
 			})
 	} else {
-		c.state.Events.SendLifecycle(c.project, "container-renamed",
+		d.state.Events.SendLifecycle(d.project, "container-renamed",
 			fmt.Sprintf("/1.0/containers/%s", oldName), map[string]interface{}{
 				"new_name": newName,
 			})
@@ -3756,19 +3756,19 @@ func (c *lxc) Rename(newName string) error {
 }
 
 // CGroupSet sets a cgroup value for the instance.
-func (c *lxc) CGroupSet(key string, value string) error {
+func (d *lxc) CGroupSet(key string, value string) error {
 	// Load the go-lxc struct
-	err := c.initLXC(false)
+	err := d.initLXC(false)
 	if err != nil {
 		return err
 	}
 
 	// Make sure the container is running
-	if !c.IsRunning() {
+	if !d.IsRunning() {
 		return fmt.Errorf("Can't set cgroups on a stopped container")
 	}
 
-	err = c.c.SetCgroupItem(key, value)
+	err = d.c.SetCgroupItem(key, value)
 	if err != nil {
 		return fmt.Errorf("Failed to set cgroup %s=\"%s\": %s", key, value, err)
 	}
@@ -3777,7 +3777,7 @@ func (c *lxc) CGroupSet(key string, value string) error {
 }
 
 // VolatileSet sets volatile config.
-func (c *lxc) VolatileSet(changes map[string]string) error {
+func (d *lxc) VolatileSet(changes map[string]string) error {
 	// Sanity check
 	for key := range changes {
 		if !strings.HasPrefix(key, "volatile.") {
@@ -3787,13 +3787,13 @@ func (c *lxc) VolatileSet(changes map[string]string) error {
 
 	// Update the database
 	var err error
-	if c.IsSnapshot() {
-		err = c.state.Cluster.Transaction(func(tx *db.ClusterTx) error {
-			return tx.UpdateInstanceSnapshotConfig(c.id, changes)
+	if d.IsSnapshot() {
+		err = d.state.Cluster.Transaction(func(tx *db.ClusterTx) error {
+			return tx.UpdateInstanceSnapshotConfig(d.id, changes)
 		})
 	} else {
-		err = c.state.Cluster.Transaction(func(tx *db.ClusterTx) error {
-			return tx.UpdateInstanceConfig(c.id, changes)
+		err = d.state.Cluster.Transaction(func(tx *db.ClusterTx) error {
+			return tx.UpdateInstanceConfig(d.id, changes)
 		})
 	}
 	if err != nil {
@@ -3803,27 +3803,27 @@ func (c *lxc) VolatileSet(changes map[string]string) error {
 	// Apply the change locally
 	for key, value := range changes {
 		if value == "" {
-			delete(c.expandedConfig, key)
-			delete(c.localConfig, key)
+			delete(d.expandedConfig, key)
+			delete(d.localConfig, key)
 			continue
 		}
 
-		c.expandedConfig[key] = value
-		c.localConfig[key] = value
+		d.expandedConfig[key] = value
+		d.localConfig[key] = value
 	}
 
 	return nil
 }
 
 // Update applies updated config.
-func (c *lxc) Update(args db.InstanceArgs, userRequested bool) error {
+func (d *lxc) Update(args db.InstanceArgs, userRequested bool) error {
 	// Set sane defaults for unset keys
 	if args.Project == "" {
 		args.Project = project.Default
 	}
 
 	if args.Architecture == 0 {
-		args.Architecture = c.architecture
+		args.Architecture = d.architecture
 	}
 
 	if args.Config == nil {
@@ -3840,20 +3840,20 @@ func (c *lxc) Update(args db.InstanceArgs, userRequested bool) error {
 
 	if userRequested {
 		// Validate the new config
-		err := instance.ValidConfig(c.state.OS, args.Config, false, false)
+		err := instance.ValidConfig(d.state.OS, args.Config, false, false)
 		if err != nil {
 			return errors.Wrap(err, "Invalid config")
 		}
 
 		// Validate the new devices without using expanded devices validation (expensive checks disabled).
-		err = instance.ValidDevices(c.state, c.state.Cluster, c.Project(), c.Type(), args.Devices, false)
+		err = instance.ValidDevices(d.state, d.state.Cluster, d.Project(), d.Type(), args.Devices, false)
 		if err != nil {
 			return errors.Wrap(err, "Invalid devices")
 		}
 	}
 
 	// Validate the new profiles
-	profiles, err := c.state.Cluster.GetProfileNames(args.Project)
+	profiles, err := d.state.Cluster.GetProfileNames(args.Project)
 	if err != nil {
 		return errors.Wrap(err, "Failed to get profiles")
 	}
@@ -3880,50 +3880,50 @@ func (c *lxc) Update(args db.InstanceArgs, userRequested bool) error {
 	}
 
 	// Get a copy of the old configuration
-	oldDescription := c.Description()
+	oldDescription := d.Description()
 	oldArchitecture := 0
-	err = shared.DeepCopy(&c.architecture, &oldArchitecture)
+	err = shared.DeepCopy(&d.architecture, &oldArchitecture)
 	if err != nil {
 		return err
 	}
 
 	oldEphemeral := false
-	err = shared.DeepCopy(&c.ephemeral, &oldEphemeral)
+	err = shared.DeepCopy(&d.ephemeral, &oldEphemeral)
 	if err != nil {
 		return err
 	}
 
 	oldExpandedDevices := deviceConfig.Devices{}
-	err = shared.DeepCopy(&c.expandedDevices, &oldExpandedDevices)
+	err = shared.DeepCopy(&d.expandedDevices, &oldExpandedDevices)
 	if err != nil {
 		return err
 	}
 
 	oldExpandedConfig := map[string]string{}
-	err = shared.DeepCopy(&c.expandedConfig, &oldExpandedConfig)
+	err = shared.DeepCopy(&d.expandedConfig, &oldExpandedConfig)
 	if err != nil {
 		return err
 	}
 
 	oldLocalDevices := deviceConfig.Devices{}
-	err = shared.DeepCopy(&c.localDevices, &oldLocalDevices)
+	err = shared.DeepCopy(&d.localDevices, &oldLocalDevices)
 	if err != nil {
 		return err
 	}
 
 	oldLocalConfig := map[string]string{}
-	err = shared.DeepCopy(&c.localConfig, &oldLocalConfig)
+	err = shared.DeepCopy(&d.localConfig, &oldLocalConfig)
 	if err != nil {
 		return err
 	}
 
 	oldProfiles := []string{}
-	err = shared.DeepCopy(&c.profiles, &oldProfiles)
+	err = shared.DeepCopy(&d.profiles, &oldProfiles)
 	if err != nil {
 		return err
 	}
 
-	oldExpiryDate := c.expiryDate
+	oldExpiryDate := d.expiryDate
 
 	// Define a function which reverts everything.  Defer this function
 	// so that it doesn't need to be explicitly called in every failing
@@ -3932,41 +3932,41 @@ func (c *lxc) Update(args db.InstanceArgs, userRequested bool) error {
 	undoChanges := true
 	defer func() {
 		if undoChanges {
-			c.description = oldDescription
-			c.architecture = oldArchitecture
-			c.ephemeral = oldEphemeral
-			c.expandedConfig = oldExpandedConfig
-			c.expandedDevices = oldExpandedDevices
-			c.localConfig = oldLocalConfig
-			c.localDevices = oldLocalDevices
-			c.profiles = oldProfiles
-			c.expiryDate = oldExpiryDate
-			if c.c != nil {
-				c.c.Release()
-				c.c = nil
-			}
-			c.cConfig = false
-			c.initLXC(true)
-			cgroup.TaskSchedulerTrigger("container", c.name, "changed")
+			d.description = oldDescription
+			d.architecture = oldArchitecture
+			d.ephemeral = oldEphemeral
+			d.expandedConfig = oldExpandedConfig
+			d.expandedDevices = oldExpandedDevices
+			d.localConfig = oldLocalConfig
+			d.localDevices = oldLocalDevices
+			d.profiles = oldProfiles
+			d.expiryDate = oldExpiryDate
+			if d.c != nil {
+				d.c.Release()
+				d.c = nil
+			}
+			d.cConfig = false
+			d.initLXC(true)
+			cgroup.TaskSchedulerTrigger("container", d.name, "changed")
 		}
 	}()
 
 	// Apply the various changes
-	c.description = args.Description
-	c.architecture = args.Architecture
-	c.ephemeral = args.Ephemeral
-	c.localConfig = args.Config
-	c.localDevices = args.Devices
-	c.profiles = args.Profiles
-	c.expiryDate = args.ExpiryDate
+	d.description = args.Description
+	d.architecture = args.Architecture
+	d.ephemeral = args.Ephemeral
+	d.localConfig = args.Config
+	d.localDevices = args.Devices
+	d.profiles = args.Profiles
+	d.expiryDate = args.ExpiryDate
 
 	// Expand the config and refresh the LXC config
-	err = c.expandConfig(nil)
+	err = d.expandConfig(nil)
 	if err != nil {
 		return errors.Wrap(err, "Expand config")
 	}
 
-	err = c.expandDevices(nil)
+	err = d.expandDevices(nil)
 	if err != nil {
 		return errors.Wrap(err, "Expand devices")
 	}
@@ -3974,15 +3974,15 @@ func (c *lxc) Update(args db.InstanceArgs, userRequested bool) error {
 	// Diff the configurations
 	changedConfig := []string{}
 	for key := range oldExpandedConfig {
-		if oldExpandedConfig[key] != c.expandedConfig[key] {
+		if oldExpandedConfig[key] != d.expandedConfig[key] {
 			if !shared.StringInSlice(key, changedConfig) {
 				changedConfig = append(changedConfig, key)
 			}
 		}
 	}
 
-	for key := range c.expandedConfig {
-		if oldExpandedConfig[key] != c.expandedConfig[key] {
+	for key := range d.expandedConfig {
+		if oldExpandedConfig[key] != d.expandedConfig[key] {
 			if !shared.StringInSlice(key, changedConfig) {
 				changedConfig = append(changedConfig, key)
 			}
@@ -3990,17 +3990,17 @@ func (c *lxc) Update(args db.InstanceArgs, userRequested bool) error {
 	}
 
 	// Diff the devices
-	removeDevices, addDevices, updateDevices, updateDiff := oldExpandedDevices.Update(c.expandedDevices, func(oldDevice deviceConfig.Device, newDevice deviceConfig.Device) []string {
+	removeDevices, addDevices, updateDevices, updateDiff := oldExpandedDevices.Update(d.expandedDevices, func(oldDevice deviceConfig.Device, newDevice deviceConfig.Device) []string {
 		// This function needs to return a list of fields that are excluded from differences
 		// between oldDevice and newDevice. The result of this is that as long as the
 		// devices are otherwise identical except for the fields returned here, then the
 		// device is considered to be being "updated" rather than "added & removed".
-		oldNICType, err := nictype.NICType(c.state, c.Project(), newDevice)
+		oldNICType, err := nictype.NICType(d.state, d.Project(), newDevice)
 		if err != nil {
 			return []string{} // Cannot hot-update due to config error.
 		}
 
-		newNICType, err := nictype.NICType(c.state, c.Project(), oldDevice)
+		newNICType, err := nictype.NICType(d.state, d.Project(), oldDevice)
 		if err != nil {
 			return []string{} // Cannot hot-update due to config error.
 		}
@@ -4009,7 +4009,7 @@ func (c *lxc) Update(args db.InstanceArgs, userRequested bool) error {
 			return []string{} // Device types aren't the same, so this cannot be an update.
 		}
 
-		dev, err := device.New(c, c.state, "", newDevice, nil, nil)
+		dev, err := device.New(d, d.state, "", newDevice, nil, nil)
 		if err != nil {
 			return []string{} // Couldn't create Device, so this cannot be an update.
 		}
@@ -4019,13 +4019,13 @@ func (c *lxc) Update(args db.InstanceArgs, userRequested bool) error {
 
 	if userRequested {
 		// Do some validation of the config diff
-		err = instance.ValidConfig(c.state.OS, c.expandedConfig, false, true)
+		err = instance.ValidConfig(d.state.OS, d.expandedConfig, false, true)
 		if err != nil {
 			return errors.Wrap(err, "Invalid expanded config")
 		}
 
 		// Do full expanded validation of the devices diff.
-		err = instance.ValidDevices(c.state, c.state.Cluster, c.Project(), c.Type(), c.expandedDevices, true)
+		err = instance.ValidDevices(d.state, d.state.Cluster, d.Project(), d.Type(), d.expandedDevices, true)
 		if err != nil {
 			return errors.Wrap(err, "Invalid expanded devices")
 		}
@@ -4033,26 +4033,26 @@ func (c *lxc) Update(args db.InstanceArgs, userRequested bool) error {
 
 	// Run through initLXC to catch anything we missed
 	if userRequested {
-		if c.c != nil {
-			c.c.Release()
-			c.c = nil
+		if d.c != nil {
+			d.c.Release()
+			d.c = nil
 		}
 
-		c.cConfig = false
-		err = c.initLXC(true)
+		d.cConfig = false
+		err = d.initLXC(true)
 		if err != nil {
 			return errors.Wrap(err, "Initialize LXC")
 		}
 	}
 
-	cg, err := c.cgroup(nil)
+	cg, err := d.cgroup(nil)
 	if err != nil {
 		return err
 	}
 
 	// If apparmor changed, re-validate the apparmor profile
 	if shared.StringInSlice("raw.apparmor", changedConfig) || shared.StringInSlice("security.nesting", changedConfig) {
-		err = apparmor.InstanceParse(c.state, c)
+		err = apparmor.InstanceParse(d.state, d)
 		if err != nil {
 			return errors.Wrap(err, "Parse AppArmor profile")
 		}
@@ -4061,15 +4061,15 @@ func (c *lxc) Update(args db.InstanceArgs, userRequested bool) error {
 	if shared.StringInSlice("security.idmap.isolated", changedConfig) || shared.StringInSlice("security.idmap.base", changedConfig) || shared.StringInSlice("security.idmap.size", changedConfig) || shared.StringInSlice("raw.idmap", changedConfig) || shared.StringInSlice("security.privileged", changedConfig) {
 		var idmap *idmap.IdmapSet
 		base := int64(0)
-		if !c.IsPrivileged() {
+		if !d.IsPrivileged() {
 			// update the idmap
 			idmap, base, err = findIdmap(
-				c.state,
-				c.Name(),
-				c.expandedConfig["security.idmap.isolated"],
-				c.expandedConfig["security.idmap.base"],
-				c.expandedConfig["security.idmap.size"],
-				c.expandedConfig["raw.idmap"],
+				d.state,
+				d.Name(),
+				d.expandedConfig["security.idmap.isolated"],
+				d.expandedConfig["security.idmap.base"],
+				d.expandedConfig["security.idmap.size"],
+				d.expandedConfig["raw.idmap"],
 			)
 			if err != nil {
 				return errors.Wrap(err, "Failed to get ID map")
@@ -4086,17 +4086,17 @@ func (c *lxc) Update(args db.InstanceArgs, userRequested bool) error {
 		} else {
 			jsonIdmap = "[]"
 		}
-		c.localConfig["volatile.idmap.next"] = jsonIdmap
-		c.localConfig["volatile.idmap.base"] = fmt.Sprintf("%v", base)
+		d.localConfig["volatile.idmap.next"] = jsonIdmap
+		d.localConfig["volatile.idmap.base"] = fmt.Sprintf("%v", base)
 
 		// Invalid idmap cache
-		c.idmapset = nil
+		d.idmapset = nil
 	}
 
-	isRunning := c.IsRunning()
+	isRunning := d.IsRunning()
 
 	// Use the device interface to apply update changes.
-	err = c.updateDevices(removeDevices, addDevices, updateDevices, oldExpandedDevices, isRunning, userRequested)
+	err = d.updateDevices(removeDevices, addDevices, updateDevices, oldExpandedDevices, isRunning, userRequested)
 	if err != nil {
 		return err
 	}
@@ -4110,8 +4110,8 @@ func (c *lxc) Update(args db.InstanceArgs, userRequested bool) error {
 		}
 	}
 
-	if !c.IsSnapshot() && updateMAAS {
-		err = c.maasUpdate(oldExpandedDevices.CloneNative())
+	if !d.IsSnapshot() && updateMAAS {
+		err = d.maasUpdate(oldExpandedDevices.CloneNative())
 		if err != nil {
 			return err
 		}
@@ -4121,27 +4121,27 @@ func (c *lxc) Update(args db.InstanceArgs, userRequested bool) error {
 	if isRunning {
 		// Live update the container config
 		for _, key := range changedConfig {
-			value := c.expandedConfig[key]
+			value := d.expandedConfig[key]
 
 			if key == "raw.apparmor" || key == "security.nesting" {
 				// Update the AppArmor profile
-				err = apparmor.InstanceLoad(c.state, c)
+				err = apparmor.InstanceLoad(d.state, d)
 				if err != nil {
 					return err
 				}
 			} else if key == "security.devlxd" {
 				if value == "" || shared.IsTrue(value) {
-					err = c.insertMount(shared.VarPath("devlxd"), "/dev/lxd", "none", unix.MS_BIND, false)
+					err = d.insertMount(shared.VarPath("devlxd"), "/dev/lxd", "none", unix.MS_BIND, false)
 					if err != nil {
 						return err
 					}
-				} else if c.FileExists("/dev/lxd") == nil {
-					err = c.removeMount("/dev/lxd")
+				} else if d.FileExists("/dev/lxd") == nil {
+					err = d.removeMount("/dev/lxd")
 					if err != nil {
 						return err
 					}
 
-					err = c.FileRemove("/dev/lxd")
+					err = d.FileRemove("/dev/lxd")
 					if err != nil {
 						return err
 					}
@@ -4155,12 +4155,12 @@ func (c *lxc) Update(args db.InstanceArgs, userRequested bool) error {
 					}
 				}
 			} else if key == "limits.disk.priority" {
-				if !c.state.OS.CGInfo.Supports(cgroup.Blkio, cg) {
+				if !d.state.OS.CGInfo.Supports(cgroup.Blkio, cg) {
 					continue
 				}
 
 				priorityInt := 5
-				diskPriority := c.expandedConfig["limits.disk.priority"]
+				diskPriority := d.expandedConfig["limits.disk.priority"]
 				if diskPriority != "" {
 					priorityInt, err = strconv.Atoi(diskPriority)
 					if err != nil {
@@ -4180,14 +4180,14 @@ func (c *lxc) Update(args db.InstanceArgs, userRequested bool) error {
 				}
 			} else if key == "limits.memory" || strings.HasPrefix(key, "limits.memory.") {
 				// Skip if no memory CGroup
-				if !c.state.OS.CGInfo.Supports(cgroup.Memory, cg) {
+				if !d.state.OS.CGInfo.Supports(cgroup.Memory, cg) {
 					continue
 				}
 
 				// Set the new memory limit
-				memory := c.expandedConfig["limits.memory"]
-				memoryEnforce := c.expandedConfig["limits.memory.enforce"]
-				memorySwap := c.expandedConfig["limits.memory.swap"]
+				memory := d.expandedConfig["limits.memory"]
+				memoryEnforce := d.expandedConfig["limits.memory.enforce"]
+				memorySwap := d.expandedConfig["limits.memory.swap"]
 				var memoryInt int64
 
 				// Parse memory
@@ -4214,7 +4214,7 @@ func (c *lxc) Update(args db.InstanceArgs, userRequested bool) error {
 
 				// Store the old values for revert
 				oldMemswLimit := int64(-1)
-				if c.state.OS.CGInfo.Supports(cgroup.MemorySwap, cg) {
+				if d.state.OS.CGInfo.Supports(cgroup.MemorySwap, cg) {
 					oldMemswLimit, err = cg.GetMemorySwapLimit()
 					if err != nil {
 						oldMemswLimit = -1
@@ -4245,7 +4245,7 @@ func (c *lxc) Update(args db.InstanceArgs, userRequested bool) error {
 				}
 
 				// Reset everything
-				if c.state.OS.CGInfo.Supports(cgroup.MemorySwap, cg) {
+				if d.state.OS.CGInfo.Supports(cgroup.MemorySwap, cg) {
 					err = cg.SetMemorySwapLimit(-1)
 					if err != nil {
 						revertMemory()
@@ -4274,7 +4274,7 @@ func (c *lxc) Update(args db.InstanceArgs, userRequested bool) error {
 						return err
 					}
 				} else {
-					if c.state.OS.CGInfo.Supports(cgroup.MemorySwap, cg) && (memorySwap == "" || shared.IsTrue(memorySwap)) {
+					if d.state.OS.CGInfo.Supports(cgroup.MemorySwap, cg) && (memorySwap == "" || shared.IsTrue(memorySwap)) {
 						err = cg.SetMemoryLimit(memoryInt)
 						if err != nil {
 							revertMemory()
@@ -4302,14 +4302,14 @@ func (c *lxc) Update(args db.InstanceArgs, userRequested bool) error {
 					}
 				}
 
-				if !c.state.OS.CGInfo.Supports(cgroup.MemorySwappiness, cg) {
+				if !d.state.OS.CGInfo.Supports(cgroup.MemorySwappiness, cg) {
 					continue
 				}
 
 				// Configure the swappiness
 				if key == "limits.memory.swap" || key == "limits.memory.swap.priority" {
-					memorySwap := c.expandedConfig["limits.memory.swap"]
-					memorySwapPriority := c.expandedConfig["limits.memory.swap.priority"]
+					memorySwap := d.expandedConfig["limits.memory.swap"]
+					memorySwapPriority := d.expandedConfig["limits.memory.swap.priority"]
 					if memorySwap != "" && !shared.IsTrue(memorySwap) {
 						err = cg.SetMemorySwappiness(0)
 						if err != nil {
@@ -4331,21 +4331,21 @@ func (c *lxc) Update(args db.InstanceArgs, userRequested bool) error {
 					}
 				}
 			} else if key == "limits.network.priority" {
-				err := c.setNetworkPriority()
+				err := d.setNetworkPriority()
 				if err != nil {
 					return err
 				}
 			} else if key == "limits.cpu" {
 				// Trigger a scheduler re-run
-				cgroup.TaskSchedulerTrigger("container", c.name, "changed")
+				cgroup.TaskSchedulerTrigger("container", d.name, "changed")
 			} else if key == "limits.cpu.priority" || key == "limits.cpu.allowance" {
 				// Skip if no cpu CGroup
-				if !c.state.OS.CGInfo.Supports(cgroup.CPU, cg) {
+				if !d.state.OS.CGInfo.Supports(cgroup.CPU, cg) {
 					continue
 				}
 
 				// Apply new CPU limits
-				cpuShares, cpuCfsQuota, cpuCfsPeriod, err := cgroup.ParseCPU(c.expandedConfig["limits.cpu.allowance"], c.expandedConfig["limits.cpu.priority"])
+				cpuShares, cpuCfsQuota, cpuCfsPeriod, err := cgroup.ParseCPU(d.expandedConfig["limits.cpu.allowance"], d.expandedConfig["limits.cpu.priority"])
 				if err != nil {
 					return err
 				}
@@ -4360,7 +4360,7 @@ func (c *lxc) Update(args db.InstanceArgs, userRequested bool) error {
 					return err
 				}
 			} else if key == "limits.processes" {
-				if !c.state.OS.CGInfo.Supports(cgroup.Pids, cg) {
+				if !d.state.OS.CGInfo.Supports(cgroup.Pids, cg) {
 					continue
 				}
 
@@ -4381,7 +4381,7 @@ func (c *lxc) Update(args db.InstanceArgs, userRequested bool) error {
 					}
 				}
 			} else if strings.HasPrefix(key, "limits.hugepages.") {
-				if !c.state.OS.CGInfo.Supports(cgroup.Hugetlb, cg) {
+				if !d.state.OS.CGInfo.Supports(cgroup.Hugetlb, cg) {
 					continue
 				}
 
@@ -4415,34 +4415,34 @@ func (c *lxc) Update(args db.InstanceArgs, userRequested bool) error {
 	}
 
 	// Finally, apply the changes to the database
-	err = c.state.Cluster.Transaction(func(tx *db.ClusterTx) error {
+	err = d.state.Cluster.Transaction(func(tx *db.ClusterTx) error {
 		// Snapshots should update only their descriptions and expiry date.
-		if c.IsSnapshot() {
-			return tx.UpdateInstanceSnapshot(c.id, c.description, c.expiryDate)
+		if d.IsSnapshot() {
+			return tx.UpdateInstanceSnapshot(d.id, d.description, d.expiryDate)
 		}
 
-		object, err := tx.GetInstance(c.project, c.name)
+		object, err := tx.GetInstance(d.project, d.name)
 		if err != nil {
 			return err
 		}
 
-		object.Description = c.description
-		object.Architecture = c.architecture
-		object.Ephemeral = c.ephemeral
-		object.ExpiryDate = c.expiryDate
-		object.Config = c.localConfig
-		object.Profiles = c.profiles
-		object.Devices = c.localDevices.CloneNative()
+		object.Description = d.description
+		object.Architecture = d.architecture
+		object.Ephemeral = d.ephemeral
+		object.ExpiryDate = d.expiryDate
+		object.Config = d.localConfig
+		object.Profiles = d.profiles
+		object.Devices = d.localDevices.CloneNative()
 
-		return tx.UpdateInstance(c.project, c.name, *object)
+		return tx.UpdateInstance(d.project, d.name, *object)
 	})
 	if err != nil {
 		return errors.Wrap(err, "Failed to update database")
 	}
 
 	// Only update the backup file if it already exists (indicating the instance is mounted).
-	if shared.PathExists(filepath.Join(c.Path(), "backup.yaml")) {
-		err := c.UpdateBackupFile()
+	if shared.PathExists(filepath.Join(d.Path(), "backup.yaml")) {
+		err := d.UpdateBackupFile()
 		if err != nil && !os.IsNotExist(err) {
 			return errors.Wrap(err, "Failed to write backup file")
 		}
@@ -4459,10 +4459,10 @@ func (c *lxc) Update(args db.InstanceArgs, userRequested bool) error {
 			msg := map[string]string{
 				"key":       key,
 				"old_value": oldExpandedConfig[key],
-				"value":     c.expandedConfig[key],
+				"value":     d.expandedConfig[key],
 			}
 
-			err = c.devlxdEventSend("config", msg)
+			err = d.devlxdEventSend("config", msg)
 			if err != nil {
 				return err
 			}
@@ -4476,7 +4476,7 @@ func (c *lxc) Update(args db.InstanceArgs, userRequested bool) error {
 				"config": m,
 			}
 
-			err = c.devlxdEventSend("device", msg)
+			err = d.devlxdEventSend("device", msg)
 			if err != nil {
 				return err
 			}
@@ -4489,7 +4489,7 @@ func (c *lxc) Update(args db.InstanceArgs, userRequested bool) error {
 				"config": m,
 			}
 
-			err = c.devlxdEventSend("device", msg)
+			err = d.devlxdEventSend("device", msg)
 			if err != nil {
 				return err
 			}
@@ -4502,7 +4502,7 @@ func (c *lxc) Update(args db.InstanceArgs, userRequested bool) error {
 				"config": m,
 			}
 
-			err = c.devlxdEventSend("device", msg)
+			err = d.devlxdEventSend("device", msg)
 			if err != nil {
 				return err
 			}
@@ -4514,23 +4514,23 @@ func (c *lxc) Update(args db.InstanceArgs, userRequested bool) error {
 
 	var endpoint string
 
-	if c.IsSnapshot() {
-		cName, sName, _ := shared.InstanceGetParentAndSnapshotName(c.name)
+	if d.IsSnapshot() {
+		cName, sName, _ := shared.InstanceGetParentAndSnapshotName(d.name)
 		endpoint = fmt.Sprintf("/1.0/containers/%s/snapshots/%s", cName, sName)
 	} else {
-		endpoint = fmt.Sprintf("/1.0/containers/%s", c.name)
+		endpoint = fmt.Sprintf("/1.0/containers/%s", d.name)
 	}
 
-	c.state.Events.SendLifecycle(c.project, "container-updated", endpoint, nil)
+	d.state.Events.SendLifecycle(d.project, "container-updated", endpoint, nil)
 
 	return nil
 }
 
-func (c *lxc) updateDevices(removeDevices deviceConfig.Devices, addDevices deviceConfig.Devices, updateDevices deviceConfig.Devices, oldExpandedDevices deviceConfig.Devices, instanceRunning bool, userRequested bool) error {
+func (d *lxc) updateDevices(removeDevices deviceConfig.Devices, addDevices deviceConfig.Devices, updateDevices deviceConfig.Devices, oldExpandedDevices deviceConfig.Devices, instanceRunning bool, userRequested bool) error {
 	// Remove devices in reverse order to how they were added.
 	for _, dev := range removeDevices.Reversed() {
 		if instanceRunning {
-			err := c.deviceStop(dev.Name, dev.Config, instanceRunning, "")
+			err := d.deviceStop(dev.Name, dev.Config, instanceRunning, "")
 			if err == device.ErrUnsupportedDevType {
 				continue // No point in trying to remove device below.
 			} else if err != nil {
@@ -4538,7 +4538,7 @@ func (c *lxc) updateDevices(removeDevices deviceConfig.Devices, addDevices devic
 			}
 		}
 
-		err := c.deviceRemove(dev.Name, dev.Config, instanceRunning)
+		err := d.deviceRemove(dev.Name, dev.Config, instanceRunning)
 		if err != nil && err != device.ErrUnsupportedDevType {
 			return errors.Wrapf(err, "Failed to remove device %q", dev.Name)
 		}
@@ -4546,7 +4546,7 @@ func (c *lxc) updateDevices(removeDevices deviceConfig.Devices, addDevices devic
 		// Check whether we are about to add the same device back with updated config and
 		// if not, or if the device type has changed, then remove all volatile keys for
 		// this device (as its an actual removal or a device type change).
-		err = c.deviceResetVolatile(dev.Name, dev.Config, addDevices[dev.Name])
+		err = d.deviceResetVolatile(dev.Name, dev.Config, addDevices[dev.Name])
 		if err != nil {
 			return errors.Wrapf(err, "Failed to reset volatile data for device %q", dev.Name)
 		}
@@ -4554,7 +4554,7 @@ func (c *lxc) updateDevices(removeDevices deviceConfig.Devices, addDevices devic
 
 	// Add devices in sorted order, this ensures that device mounts are added in path order.
 	for _, dev := range addDevices.Sorted() {
-		err := c.deviceAdd(dev.Name, dev.Config, instanceRunning)
+		err := d.deviceAdd(dev.Name, dev.Config, instanceRunning)
 		if err == device.ErrUnsupportedDevType {
 			continue // No point in trying to start device below.
 		} else if err != nil {
@@ -4564,12 +4564,12 @@ func (c *lxc) updateDevices(removeDevices deviceConfig.Devices, addDevices devic
 
 			// If update is non-user requested (i.e from a snapshot restore), there's nothing we can
 			// do to fix the config and we don't want to prevent the snapshot restore so log and allow.
-			logger.Error("Failed to add device, skipping as non-user requested", log.Ctx{"project": c.Project(), "instance": c.Name(), "device": dev.Name, "err": err})
+			logger.Error("Failed to add device, skipping as non-user requested", log.Ctx{"project": d.Project(), "instance": d.Name(), "device": dev.Name, "err": err})
 			continue
 		}
 
 		if instanceRunning {
-			_, err := c.deviceStart(dev.Name, dev.Config, instanceRunning)
+			_, err := d.deviceStart(dev.Name, dev.Config, instanceRunning)
 			if err != nil && err != device.ErrUnsupportedDevType {
 				return errors.Wrapf(err, "Failed to start device %q", dev.Name)
 			}
@@ -4577,7 +4577,7 @@ func (c *lxc) updateDevices(removeDevices deviceConfig.Devices, addDevices devic
 	}
 
 	for _, dev := range updateDevices.Sorted() {
-		err := c.deviceUpdate(dev.Name, dev.Config, oldExpandedDevices, instanceRunning)
+		err := d.deviceUpdate(dev.Name, dev.Config, oldExpandedDevices, instanceRunning)
 		if err != nil && err != device.ErrUnsupportedDevType {
 			return errors.Wrapf(err, "Failed to update device %q", dev.Name)
 		}
@@ -4587,32 +4587,32 @@ func (c *lxc) updateDevices(removeDevices deviceConfig.Devices, addDevices devic
 }
 
 // Export backs up the instance.
-func (c *lxc) Export(w io.Writer, properties map[string]string) (api.ImageMetadata, error) {
+func (d *lxc) Export(w io.Writer, properties map[string]string) (api.ImageMetadata, error) {
 	ctxMap := log.Ctx{
-		"project":   c.project,
-		"name":      c.name,
-		"created":   c.creationDate,
-		"ephemeral": c.ephemeral,
-		"used":      c.lastUsedDate}
+		"project":   d.project,
+		"name":      d.name,
+		"created":   d.creationDate,
+		"ephemeral": d.ephemeral,
+		"used":      d.lastUsedDate}
 
 	meta := api.ImageMetadata{}
 
-	if c.IsRunning() {
+	if d.IsRunning() {
 		return meta, fmt.Errorf("Cannot export a running instance as an image")
 	}
 
 	logger.Info("Exporting instance", ctxMap)
 
 	// Start the storage.
-	_, err := c.mount()
+	_, err := d.mount()
 	if err != nil {
 		logger.Error("Failed exporting instance", ctxMap)
 		return meta, err
 	}
-	defer c.unmount()
+	defer d.unmount()
 
 	// Get IDMap to unshift container as the tarball is created.
-	idmap, err := c.DiskIdmap()
+	idmap, err := d.DiskIdmap()
 	if err != nil {
 		logger.Error("Failed exporting instance", ctxMap)
 		return meta, err
@@ -4622,7 +4622,7 @@ func (c *lxc) Export(w io.Writer, properties map[string]string) (api.ImageMetada
 	tarWriter := instancewriter.NewInstanceTarWriter(w, idmap)
 
 	// Keep track of the first path we saw for each path with nlink>1.
-	cDir := c.Path()
+	cDir := d.Path()
 
 	// Path inside the tar image is the pathname starting after cDir.
 	offset := len(cDir) + 1
@@ -4654,9 +4654,9 @@ func (c *lxc) Export(w io.Writer, properties map[string]string) (api.ImageMetada
 
 		// Get the instance's architecture.
 		var arch string
-		if c.IsSnapshot() {
-			parentName, _, _ := shared.InstanceGetParentAndSnapshotName(c.name)
-			parent, err := instance.LoadByProjectAndName(c.state, c.project, parentName)
+		if d.IsSnapshot() {
+			parentName, _, _ := shared.InstanceGetParentAndSnapshotName(d.name)
+			parent, err := instance.LoadByProjectAndName(d.state, d.project, parentName)
 			if err != nil {
 				tarWriter.Close()
 				logger.Error("Failed exporting instance", ctxMap)
@@ -4665,11 +4665,11 @@ func (c *lxc) Export(w io.Writer, properties map[string]string) (api.ImageMetada
 
 			arch, _ = osarch.ArchitectureName(parent.Architecture())
 		} else {
-			arch, _ = osarch.ArchitectureName(c.architecture)
+			arch, _ = osarch.ArchitectureName(d.architecture)
 		}
 
 		if arch == "" {
-			arch, err = osarch.ArchitectureName(c.state.OS.Architectures[0])
+			arch, err = osarch.ArchitectureName(d.state.OS.Architectures[0])
 			if err != nil {
 				logger.Error("Failed exporting instance", ctxMap)
 				return meta, err
@@ -4780,7 +4780,7 @@ func (c *lxc) Export(w io.Writer, properties map[string]string) (api.ImageMetada
 	}
 
 	// Include all the rootfs files.
-	fnam = c.RootfsPath()
+	fnam = d.RootfsPath()
 	err = filepath.Walk(fnam, writeToTar)
 	if err != nil {
 		logger.Error("Failed exporting instance", ctxMap)
@@ -4788,7 +4788,7 @@ func (c *lxc) Export(w io.Writer, properties map[string]string) (api.ImageMetada
 	}
 
 	// Include all the templates.
-	fnam = c.TemplatesPath()
+	fnam = d.TemplatesPath()
 	if shared.PathExists(fnam) {
 		err = filepath.Walk(fnam, writeToTar)
 		if err != nil {
@@ -4807,9 +4807,9 @@ func (c *lxc) Export(w io.Writer, properties map[string]string) (api.ImageMetada
 	return meta, nil
 }
 
-func collectCRIULogFile(c instance.Instance, imagesDir string, function string, method string) error {
+func collectCRIULogFile(d instance.Instance, imagesDir string, function string, method string) error {
 	t := time.Now().Format(time.RFC3339)
-	newPath := filepath.Join(c.LogPath(), fmt.Sprintf("%s_%s_%s.log", function, method, t))
+	newPath := filepath.Join(d.LogPath(), fmt.Sprintf("%s_%s_%s.log", function, method, t))
 	return shared.FileCopy(filepath.Join(imagesDir, fmt.Sprintf("%s.log", method)), newPath)
 }
 
@@ -4834,13 +4834,13 @@ func getCRIULogErrors(imagesDir string, method string) (string, error) {
 }
 
 // Migrate migrates the instance to another node.
-func (c *lxc) Migrate(args *instance.CriuMigrationArgs) error {
+func (d *lxc) Migrate(args *instance.CriuMigrationArgs) error {
 	ctxMap := log.Ctx{
-		"project":      c.project,
-		"name":         c.name,
-		"created":      c.creationDate,
-		"ephemeral":    c.ephemeral,
-		"used":         c.lastUsedDate,
+		"project":      d.project,
+		"name":         d.name,
+		"created":      d.creationDate,
+		"ephemeral":    d.ephemeral,
+		"used":         d.lastUsedDate,
 		"statedir":     args.StateDir,
 		"actionscript": args.ActionScript,
 		"predumpdir":   args.PreDumpDir,
@@ -4869,7 +4869,7 @@ func (c *lxc) Migrate(args *instance.CriuMigrationArgs) error {
 		logger.Warn("Unknown migrate call", log.Ctx{"cmd": args.Cmd})
 	}
 
-	pool, err := c.getStoragePool()
+	pool, err := d.getStoragePool()
 	if err != nil {
 		return err
 	}
@@ -4892,7 +4892,7 @@ func (c *lxc) Migrate(args *instance.CriuMigrationArgs) error {
 	 */
 	if args.Cmd == liblxc.MIGRATE_RESTORE {
 		// Run the shared start
-		_, postStartHooks, err := c.startCommon()
+		_, postStartHooks, err := d.startCommon()
 		if err != nil {
 			return errors.Wrap(err, "Failed preparing container for start")
 		}
@@ -4903,13 +4903,13 @@ func (c *lxc) Migrate(args *instance.CriuMigrationArgs) error {
 		 * opened by the process after it is in its user
 		 * namespace.
 		 */
-		idmapset, err := c.CurrentIdmap()
+		idmapset, err := d.CurrentIdmap()
 		if err != nil {
 			return err
 		}
 
 		if idmapset != nil {
-			storageType, err := c.getStorageType()
+			storageType, err := d.getStorageType()
 			if err != nil {
 				return errors.Wrap(err, "Storage type")
 			}
@@ -4926,32 +4926,32 @@ func (c *lxc) Migrate(args *instance.CriuMigrationArgs) error {
 			}
 		}
 
-		configPath := filepath.Join(c.LogPath(), "lxc.conf")
+		configPath := filepath.Join(d.LogPath(), "lxc.conf")
 
 		if args.DumpDir != "" {
 			finalStateDir = fmt.Sprintf("%s/%s", args.StateDir, args.DumpDir)
 		}
 
 		_, migrateErr = shared.RunCommand(
-			c.state.OS.ExecPath,
+			d.state.OS.ExecPath,
 			"forkmigrate",
-			c.name,
-			c.state.OS.LxcPath,
+			d.name,
+			d.state.OS.LxcPath,
 			configPath,
 			finalStateDir,
 			fmt.Sprintf("%v", preservesInodes))
 
 		if migrateErr == nil {
 			// Run any post start hooks.
-			err := c.runHooks(postStartHooks)
+			err := d.runHooks(postStartHooks)
 			if err != nil {
 				// Attempt to stop container.
-				c.Stop(false)
+				d.Stop(false)
 				return err
 			}
 		}
 	} else if args.Cmd == liblxc.MIGRATE_FEATURE_CHECK {
-		err := c.initLXC(true)
+		err := d.initLXC(true)
 		if err != nil {
 			return err
 		}
@@ -4959,14 +4959,14 @@ func (c *lxc) Migrate(args *instance.CriuMigrationArgs) error {
 		opts := liblxc.MigrateOptions{
 			FeaturesToCheck: args.Features,
 		}
-		migrateErr = c.c.Migrate(args.Cmd, opts)
+		migrateErr = d.c.Migrate(args.Cmd, opts)
 		if migrateErr != nil {
 			logger.Info("CRIU feature check failed", ctxMap)
 			return migrateErr
 		}
 		return nil
 	} else {
-		err := c.initLXC(true)
+		err := d.initLXC(true)
 		if err != nil {
 			return err
 		}
@@ -5000,15 +5000,15 @@ func (c *lxc) Migrate(args *instance.CriuMigrationArgs) error {
 			opts.PredumpDir = fmt.Sprintf("../%s", args.PreDumpDir)
 		}
 
-		if !c.IsRunning() {
+		if !d.IsRunning() {
 			// otherwise the migration will needlessly fail
 			args.Stop = false
 		}
 
-		migrateErr = c.c.Migrate(args.Cmd, opts)
+		migrateErr = d.c.Migrate(args.Cmd, opts)
 	}
 
-	collectErr := collectCRIULogFile(c, finalStateDir, args.Function, prettyCmd)
+	collectErr := collectCRIULogFile(d, finalStateDir, args.Function, prettyCmd)
 	if collectErr != nil {
 		logger.Error("Error collecting checkpoint log file", log.Ctx{"err": collectErr})
 	}
@@ -5030,8 +5030,8 @@ func (c *lxc) Migrate(args *instance.CriuMigrationArgs) error {
 
 // DeferTemplateApply sets volatile key to apply template on next start. Used when instance's
 // volume isn't mounted.
-func (c *lxc) DeferTemplateApply(trigger string) error {
-	err := c.VolatileSet(map[string]string{"volatile.apply_template": trigger})
+func (d *lxc) DeferTemplateApply(trigger string) error {
+	err := d.VolatileSet(map[string]string{"volatile.apply_template": trigger})
 	if err != nil {
 		return errors.Wrap(err, "Failed to set apply_template volatile key")
 	}
@@ -5039,9 +5039,9 @@ func (c *lxc) DeferTemplateApply(trigger string) error {
 	return nil
 }
 
-func (c *lxc) templateApplyNow(trigger string) error {
+func (d *lxc) templateApplyNow(trigger string) error {
 	// If there's no metadata, just return
-	fname := filepath.Join(c.Path(), "metadata.yaml")
+	fname := filepath.Join(d.Path(), "metadata.yaml")
 	if !shared.PathExists(fname) {
 		return nil
 	}
@@ -5060,7 +5060,7 @@ func (c *lxc) templateApplyNow(trigger string) error {
 	}
 
 	// Find rootUID and rootGID
-	idmapset, err := c.DiskIdmap()
+	idmapset, err := d.DiskIdmap()
 	if err != nil {
 		return errors.Wrap(err, "Failed to set ID map")
 	}
@@ -5074,9 +5074,9 @@ func (c *lxc) templateApplyNow(trigger string) error {
 	}
 
 	// Figure out the container architecture
-	arch, err := osarch.ArchitectureName(c.architecture)
+	arch, err := osarch.ArchitectureName(d.architecture)
 	if err != nil {
-		arch, err = osarch.ArchitectureName(c.state.OS.Architectures[0])
+		arch, err = osarch.ArchitectureName(d.state.OS.Architectures[0])
 		if err != nil {
 			return errors.Wrap(err, "Failed to detect system architecture")
 		}
@@ -5084,17 +5084,17 @@ func (c *lxc) templateApplyNow(trigger string) error {
 
 	// Generate the container metadata
 	containerMeta := make(map[string]string)
-	containerMeta["name"] = c.name
+	containerMeta["name"] = d.name
 	containerMeta["type"] = "container"
 	containerMeta["architecture"] = arch
 
-	if c.ephemeral {
+	if d.ephemeral {
 		containerMeta["ephemeral"] = "true"
 	} else {
 		containerMeta["ephemeral"] = "false"
 	}
 
-	if c.IsPrivileged() {
+	if d.IsPrivileged() {
 		containerMeta["privileged"] = "true"
 	} else {
 		containerMeta["privileged"] = "false"
@@ -5119,7 +5119,7 @@ func (c *lxc) templateApplyNow(trigger string) error {
 			}
 
 			// Open the file to template, create if needed
-			fullpath := filepath.Join(c.RootfsPath(), strings.TrimLeft(tplPath, "/"))
+			fullpath := filepath.Join(d.RootfsPath(), strings.TrimLeft(tplPath, "/"))
 			if shared.PathExists(fullpath) {
 				if tpl.CreateOnly {
 					return nil
@@ -5147,13 +5147,13 @@ func (c *lxc) templateApplyNow(trigger string) error {
 			defer w.Close()
 
 			// Read the template
-			tplString, err := ioutil.ReadFile(filepath.Join(c.TemplatesPath(), tpl.Template))
+			tplString, err := ioutil.ReadFile(filepath.Join(d.TemplatesPath(), tpl.Template))
 			if err != nil {
 				return errors.Wrap(err, "Failed to read template file")
 			}
 
 			// Restrict filesystem access to within the container's rootfs
-			tplSet := pongo2.NewSet(fmt.Sprintf("%s-%s", c.name, tpl.Template), template.ChrootLoader{Path: c.RootfsPath()})
+			tplSet := pongo2.NewSet(fmt.Sprintf("%s-%s", d.name, tpl.Template), template.ChrootLoader{Path: d.RootfsPath()})
 
 			tplRender, err := tplSet.FromString("{% autoescape off %}" + string(tplString) + "{% endautoescape %}")
 			if err != nil {
@@ -5161,7 +5161,7 @@ func (c *lxc) templateApplyNow(trigger string) error {
 			}
 
 			configGet := func(confKey, confDefault *pongo2.Value) *pongo2.Value {
-				val, ok := c.expandedConfig[confKey.String()]
+				val, ok := d.expandedConfig[confKey.String()]
 				if !ok {
 					return confDefault
 				}
@@ -5174,8 +5174,8 @@ func (c *lxc) templateApplyNow(trigger string) error {
 				"path":       tplPath,
 				"container":  containerMeta,
 				"instance":   containerMeta,
-				"config":     c.expandedConfig,
-				"devices":    c.expandedDevices,
+				"config":     d.expandedConfig,
+				"devices":    d.expandedDevices,
 				"properties": tpl.Properties,
 				"config_get": configGet}, w)
 
@@ -5190,9 +5190,9 @@ func (c *lxc) templateApplyNow(trigger string) error {
 	return nil
 }
 
-func (c *lxc) inheritInitPidFd() (int, *os.File) {
-	if c.state.OS.PidFds {
-		pidFdFile, err := c.InitPidFd()
+func (d *lxc) inheritInitPidFd() (int, *os.File) {
+	if d.state.OS.PidFds {
+		pidFdFile, err := d.InitPidFd()
 		if err != nil {
 			return -1, nil
 		}
@@ -5204,15 +5204,15 @@ func (c *lxc) inheritInitPidFd() (int, *os.File) {
 }
 
 // FileExists returns whether file exists inside instance.
-func (c *lxc) FileExists(path string) error {
+func (d *lxc) FileExists(path string) error {
 	// Setup container storage if needed
-	_, err := c.mount()
+	_, err := d.mount()
 	if err != nil {
 		return err
 	}
-	defer c.unmount()
+	defer d.unmount()
 
-	pidFdNr, pidFd := c.inheritInitPidFd()
+	pidFdNr, pidFd := d.inheritInitPidFd()
 	if pidFdNr >= 0 {
 		defer pidFd.Close()
 	}
@@ -5221,11 +5221,11 @@ func (c *lxc) FileExists(path string) error {
 	_, stderr, err := shared.RunCommandSplit(
 		nil,
 		[]*os.File{pidFd},
-		c.state.OS.ExecPath,
+		d.state.OS.ExecPath,
 		"forkfile",
 		"exists",
-		c.RootfsPath(),
-		fmt.Sprintf("%d", c.InitPID()),
+		d.RootfsPath(),
+		fmt.Sprintf("%d", d.InitPID()),
 		fmt.Sprintf("%d", pidFdNr),
 		path,
 	)
@@ -5249,21 +5249,21 @@ func (c *lxc) FileExists(path string) error {
 }
 
 // FilePull gets a file from the instance.
-func (c *lxc) FilePull(srcpath string, dstpath string) (int64, int64, os.FileMode, string, []string, error) {
+func (d *lxc) FilePull(srcpath string, dstpath string) (int64, int64, os.FileMode, string, []string, error) {
 	// Check for ongoing operations (that may involve shifting).
-	op := operationlock.Get(c.id)
+	op := operationlock.Get(d.id)
 	if op != nil {
 		op.Wait()
 	}
 
 	// Setup container storage if needed
-	_, err := c.mount()
+	_, err := d.mount()
 	if err != nil {
 		return -1, -1, 0, "", nil, err
 	}
-	defer c.unmount()
+	defer d.unmount()
 
-	pidFdNr, pidFd := c.inheritInitPidFd()
+	pidFdNr, pidFd := d.inheritInitPidFd()
 	if pidFdNr >= 0 {
 		defer pidFd.Close()
 	}
@@ -5272,11 +5272,11 @@ func (c *lxc) FilePull(srcpath string, dstpath string) (int64, int64, os.FileMod
 	_, stderr, err := shared.RunCommandSplit(
 		nil,
 		[]*os.File{pidFd},
-		c.state.OS.ExecPath,
+		d.state.OS.ExecPath,
 		"forkfile",
 		"pull",
-		c.RootfsPath(),
-		fmt.Sprintf("%d", c.InitPID()),
+		d.RootfsPath(),
+		fmt.Sprintf("%d", d.InitPID()),
 		fmt.Sprintf("%d", pidFdNr),
 		srcpath,
 		dstpath,
@@ -5360,8 +5360,8 @@ func (c *lxc) FilePull(srcpath string, dstpath string) (int64, int64, os.FileMod
 	}
 
 	// Unmap uid and gid if needed
-	if !c.IsRunning() {
-		idmapset, err := c.DiskIdmap()
+	if !d.IsRunning() {
+		idmapset, err := d.DiskIdmap()
 		if err != nil {
 			return -1, -1, 0, "", nil, err
 		}
@@ -5375,9 +5375,9 @@ func (c *lxc) FilePull(srcpath string, dstpath string) (int64, int64, os.FileMod
 }
 
 // FilePush sends a file into the instance.
-func (c *lxc) FilePush(fileType string, srcpath string, dstpath string, uid int64, gid int64, mode int, write string) error {
+func (d *lxc) FilePush(fileType string, srcpath string, dstpath string, uid int64, gid int64, mode int, write string) error {
 	// Check for ongoing operations (that may involve shifting).
-	op := operationlock.Get(c.id)
+	op := operationlock.Get(d.id)
 	if op != nil {
 		op.Wait()
 	}
@@ -5387,8 +5387,8 @@ func (c *lxc) FilePush(fileType string, srcpath string, dstpath string, uid int6
 	var errStr string
 
 	// Map uid and gid if needed
-	if !c.IsRunning() {
-		idmapset, err := c.DiskIdmap()
+	if !d.IsRunning() {
+		idmapset, err := d.DiskIdmap()
 		if err != nil {
 			return err
 		}
@@ -5400,18 +5400,18 @@ func (c *lxc) FilePush(fileType string, srcpath string, dstpath string, uid int6
 	}
 
 	// Setup container storage if needed
-	_, err := c.mount()
+	_, err := d.mount()
 	if err != nil {
 		return err
 	}
-	defer c.unmount()
+	defer d.unmount()
 
 	defaultMode := 0640
 	if fileType == "directory" {
 		defaultMode = 0750
 	}
 
-	pidFdNr, pidFd := c.inheritInitPidFd()
+	pidFdNr, pidFd := d.inheritInitPidFd()
 	if pidFdNr >= 0 {
 		defer pidFd.Close()
 	}
@@ -5420,11 +5420,11 @@ func (c *lxc) FilePush(fileType string, srcpath string, dstpath string, uid int6
 	_, stderr, err := shared.RunCommandSplit(
 		nil,
 		[]*os.File{pidFd},
-		c.state.OS.ExecPath,
+		d.state.OS.ExecPath,
 		"forkfile",
 		"push",
-		c.RootfsPath(),
-		fmt.Sprintf("%d", c.InitPID()),
+		d.RootfsPath(),
+		fmt.Sprintf("%d", d.InitPID()),
 		fmt.Sprintf("%d", pidFdNr),
 		srcpath,
 		dstpath,
@@ -5468,17 +5468,17 @@ func (c *lxc) FilePush(fileType string, srcpath string, dstpath string, uid int6
 }
 
 // FileRemove removes a file inside the instance.
-func (c *lxc) FileRemove(path string) error {
+func (d *lxc) FileRemove(path string) error {
 	var errStr string
 
 	// Setup container storage if needed
-	_, err := c.mount()
+	_, err := d.mount()
 	if err != nil {
 		return err
 	}
-	defer c.unmount()
+	defer d.unmount()
 
-	pidFdNr, pidFd := c.inheritInitPidFd()
+	pidFdNr, pidFd := d.inheritInitPidFd()
 	if pidFdNr >= 0 {
 		defer pidFd.Close()
 	}
@@ -5487,11 +5487,11 @@ func (c *lxc) FileRemove(path string) error {
 	_, stderr, err := shared.RunCommandSplit(
 		nil,
 		[]*os.File{pidFd},
-		c.state.OS.ExecPath,
+		d.state.OS.ExecPath,
 		"forkfile",
 		"remove",
-		c.RootfsPath(),
-		fmt.Sprintf("%d", c.InitPID()),
+		d.RootfsPath(),
+		fmt.Sprintf("%d", d.InitPID()),
 		fmt.Sprintf("%d", pidFdNr),
 		path,
 	)
@@ -5526,7 +5526,7 @@ func (c *lxc) FileRemove(path string) error {
 }
 
 // Console attaches to the instance console.
-func (c *lxc) Console(protocol string) (*os.File, chan error, error) {
+func (d *lxc) Console(protocol string) (*os.File, chan error, error) {
 	if protocol != instance.ConsoleTypeConsole {
 		return nil, nil, fmt.Errorf("Container instances don't support %q output", protocol)
 	}
@@ -5534,15 +5534,15 @@ func (c *lxc) Console(protocol string) (*os.File, chan error, error) {
 	chDisconnect := make(chan error, 1)
 
 	args := []string{
-		c.state.OS.ExecPath,
+		d.state.OS.ExecPath,
 		"forkconsole",
-		project.Instance(c.Project(), c.Name()),
-		c.state.OS.LxcPath,
-		filepath.Join(c.LogPath(), "lxc.conf"),
+		project.Instance(d.Project(), d.Name()),
+		d.state.OS.LxcPath,
+		filepath.Join(d.LogPath(), "lxc.conf"),
 		"tty=0",
 		"escape=-1"}
 
-	idmapset, err := c.CurrentIdmap()
+	idmapset, err := d.CurrentIdmap()
 	if err != nil {
 		return nil, nil, err
 	}
@@ -5558,7 +5558,7 @@ func (c *lxc) Console(protocol string) (*os.File, chan error, error) {
 	}
 
 	cmd := exec.Cmd{}
-	cmd.Path = c.state.OS.ExecPath
+	cmd.Path = d.state.OS.ExecPath
 	cmd.Args = args
 	cmd.Stdin = pty
 	cmd.Stdout = pty
@@ -5584,8 +5584,8 @@ func (c *lxc) Console(protocol string) (*os.File, chan error, error) {
 }
 
 // ConsoleLog returns console log.
-func (c *lxc) ConsoleLog(opts liblxc.ConsoleLogOptions) (string, error) {
-	msg, err := c.c.ConsoleLog(opts)
+func (d *lxc) ConsoleLog(opts liblxc.ConsoleLogOptions) (string, error) {
+	msg, err := d.c.ConsoleLog(opts)
 	if err != nil {
 		return "", err
 	}
@@ -5594,7 +5594,7 @@ func (c *lxc) ConsoleLog(opts liblxc.ConsoleLogOptions) (string, error) {
 }
 
 // Exec executes a command inside the instance.
-func (c *lxc) Exec(req api.InstanceExecPost, stdin *os.File, stdout *os.File, stderr *os.File) (instance.Cmd, error) {
+func (d *lxc) Exec(req api.InstanceExecPost, stdin *os.File, stdout *os.File, stderr *os.File) (instance.Cmd, error) {
 	// Prepare the environment
 	envSlice := []string{}
 
@@ -5603,20 +5603,20 @@ func (c *lxc) Exec(req api.InstanceExecPost, stdin *os.File, stdout *os.File, st
 	}
 
 	// Setup logfile
-	logPath := filepath.Join(c.LogPath(), "forkexec.log")
+	logPath := filepath.Join(d.LogPath(), "forkexec.log")
 	logFile, err := os.OpenFile(logPath, os.O_WRONLY|os.O_CREATE|os.O_SYNC, 0644)
 	if err != nil {
 		return nil, err
 	}
 
 	// Prepare the subcommand
-	cname := project.Instance(c.Project(), c.Name())
+	cname := project.Instance(d.Project(), d.Name())
 	args := []string{
-		c.state.OS.ExecPath,
+		d.state.OS.ExecPath,
 		"forkexec",
 		cname,
-		c.state.OS.LxcPath,
-		filepath.Join(c.LogPath(), "lxc.conf"),
+		d.state.OS.LxcPath,
+		filepath.Join(d.LogPath(), "lxc.conf"),
 		req.Cwd,
 		fmt.Sprintf("%d", req.User),
 		fmt.Sprintf("%d", req.Group),
@@ -5631,7 +5631,7 @@ func (c *lxc) Exec(req api.InstanceExecPost, stdin *os.File, stdout *os.File, st
 	args = append(args, req.Command...)
 
 	cmd := exec.Cmd{}
-	cmd.Path = c.state.OS.ExecPath
+	cmd.Path = d.state.OS.ExecPath
 	cmd.Args = args
 
 	cmd.Stdin = nil
@@ -5640,14 +5640,14 @@ func (c *lxc) Exec(req api.InstanceExecPost, stdin *os.File, stdout *os.File, st
 
 	// Mitigation for CVE-2019-5736
 	useRexec := false
-	if c.expandedConfig["raw.idmap"] != "" {
-		err := instance.AllowedUnprivilegedOnlyMap(c.expandedConfig["raw.idmap"])
+	if d.expandedConfig["raw.idmap"] != "" {
+		err := instance.AllowedUnprivilegedOnlyMap(d.expandedConfig["raw.idmap"])
 		if err != nil {
 			useRexec = true
 		}
 	}
 
-	if shared.IsTrue(c.expandedConfig["security.privileged"]) {
+	if shared.IsTrue(d.expandedConfig["security.privileged"]) {
 		useRexec = true
 	}
 
@@ -5685,16 +5685,16 @@ func (c *lxc) Exec(req api.InstanceExecPost, stdin *os.File, stdout *os.File, st
 	return instCmd, nil
 }
 
-func (c *lxc) cpuState() api.InstanceStateCPU {
+func (d *lxc) cpuState() api.InstanceStateCPU {
 	cpu := api.InstanceStateCPU{}
 
 	// CPU usage in seconds
-	cg, err := c.cgroup(nil)
+	cg, err := d.cgroup(nil)
 	if err != nil {
 		return cpu
 	}
 
-	if !c.state.OS.CGInfo.Supports(cgroup.CPUAcct, cg) {
+	if !d.state.OS.CGInfo.Supports(cgroup.CPUAcct, cg) {
 		return cpu
 	}
 
@@ -5709,10 +5709,10 @@ func (c *lxc) cpuState() api.InstanceStateCPU {
 	return cpu
 }
 
-func (c *lxc) diskState() map[string]api.InstanceStateDisk {
+func (d *lxc) diskState() map[string]api.InstanceStateDisk {
 	disk := map[string]api.InstanceStateDisk{}
 
-	for _, dev := range c.expandedDevices.Sorted() {
+	for _, dev := range d.expandedDevices.Sorted() {
 		if dev.Config["type"] != "disk" {
 			continue
 		}
@@ -5720,30 +5720,30 @@ func (c *lxc) diskState() map[string]api.InstanceStateDisk {
 		var usage int64
 
 		if dev.Config["path"] == "/" {
-			pool, err := storagePools.GetPoolByInstance(c.state, c)
+			pool, err := storagePools.GetPoolByInstance(d.state, d)
 			if err != nil {
-				logger.Error("Error loading storage pool", log.Ctx{"project": c.Project(), "instance": c.Name(), "err": err})
+				logger.Error("Error loading storage pool", log.Ctx{"project": d.Project(), "instance": d.Name(), "err": err})
 				continue
 			}
 
-			usage, err = pool.GetInstanceUsage(c)
+			usage, err = pool.GetInstanceUsage(d)
 			if err != nil {
 				if err != storageDrivers.ErrNotSupported {
-					logger.Error("Error getting disk usage", log.Ctx{"project": c.Project(), "instance": c.Name(), "err": err})
+					logger.Error("Error getting disk usage", log.Ctx{"project": d.Project(), "instance": d.Name(), "err": err})
 				}
 				continue
 			}
 		} else if dev.Config["pool"] != "" {
-			pool, err := storagePools.GetPoolByName(c.state, dev.Config["pool"])
+			pool, err := storagePools.GetPoolByName(d.state, dev.Config["pool"])
 			if err != nil {
-				logger.Error("Error loading storage pool", log.Ctx{"project": c.Project(), "poolName": dev.Config["pool"], "err": err})
+				logger.Error("Error loading storage pool", log.Ctx{"project": d.Project(), "poolName": dev.Config["pool"], "err": err})
 				continue
 			}
 
-			usage, err = pool.GetCustomVolumeUsage(c.Project(), dev.Config["source"])
+			usage, err = pool.GetCustomVolumeUsage(d.Project(), dev.Config["source"])
 			if err != nil {
 				if err != storageDrivers.ErrNotSupported {
-					logger.Error("Error getting volume usage", log.Ctx{"project": c.Project(), "volume": dev.Config["source"], "err": err})
+					logger.Error("Error getting volume usage", log.Ctx{"project": d.Project(), "volume": dev.Config["source"], "err": err})
 				}
 				continue
 			}
@@ -5757,14 +5757,14 @@ func (c *lxc) diskState() map[string]api.InstanceStateDisk {
 	return disk
 }
 
-func (c *lxc) memoryState() api.InstanceStateMemory {
+func (d *lxc) memoryState() api.InstanceStateMemory {
 	memory := api.InstanceStateMemory{}
-	cg, err := c.cgroup(nil)
+	cg, err := d.cgroup(nil)
 	if err != nil {
 		return memory
 	}
 
-	if !c.state.OS.CGInfo.Supports(cgroup.Memory, cg) {
+	if !d.state.OS.CGInfo.Supports(cgroup.Memory, cg) {
 		return memory
 	}
 
@@ -5775,14 +5775,14 @@ func (c *lxc) memoryState() api.InstanceStateMemory {
 	}
 
 	// Memory peak in bytes
-	if c.state.OS.CGInfo.Supports(cgroup.MemoryMaxUsage, cg) {
+	if d.state.OS.CGInfo.Supports(cgroup.MemoryMaxUsage, cg) {
 		value, err = cg.GetMemoryMaxUsage()
 		if err == nil {
 			memory.UsagePeak = value
 		}
 	}
 
-	if c.state.OS.CGInfo.Supports(cgroup.MemorySwapUsage, cg) {
+	if d.state.OS.CGInfo.Supports(cgroup.MemorySwapUsage, cg) {
 		// Swap in bytes
 		if memory.Usage > 0 {
 			value, err := cg.GetMemorySwapUsage()
@@ -5803,27 +5803,27 @@ func (c *lxc) memoryState() api.InstanceStateMemory {
 	return memory
 }
 
-func (c *lxc) networkState() map[string]api.InstanceStateNetwork {
+func (d *lxc) networkState() map[string]api.InstanceStateNetwork {
 	result := map[string]api.InstanceStateNetwork{}
 
-	pid := c.InitPID()
+	pid := d.InitPID()
 	if pid < 1 {
 		return result
 	}
 
-	couldUseNetnsGetifaddrs := c.state.OS.NetnsGetifaddrs
+	couldUseNetnsGetifaddrs := d.state.OS.NetnsGetifaddrs
 	if couldUseNetnsGetifaddrs {
 		nw, err := netutils.NetnsGetifaddrs(int32(pid))
 		if err != nil {
 			couldUseNetnsGetifaddrs = false
-			logger.Error("Failed to retrieve network information via netlink", log.Ctx{"container": c.name, "pid": pid})
+			logger.Error("Failed to retrieve network information via netlink", log.Ctx{"container": d.name, "pid": pid})
 		} else {
 			result = nw
 		}
 	}
 
 	if !couldUseNetnsGetifaddrs {
-		pidFdNr, pidFd := c.inheritInitPidFd()
+		pidFdNr, pidFd := d.inheritInitPidFd()
 		if pidFdNr >= 0 {
 			defer pidFd.Close()
 		}
@@ -5832,7 +5832,7 @@ func (c *lxc) networkState() map[string]api.InstanceStateNetwork {
 		out, _, err := shared.RunCommandSplit(
 			nil,
 			[]*os.File{pidFd},
-			c.state.OS.ExecPath,
+			d.state.OS.ExecPath,
 			"forknet",
 			"info",
 			"--",
@@ -5841,19 +5841,19 @@ func (c *lxc) networkState() map[string]api.InstanceStateNetwork {
 
 		// Process forkgetnet response
 		if err != nil {
-			logger.Error("Error calling 'lxd forknet", log.Ctx{"container": c.name, "err": err, "pid": pid})
+			logger.Error("Error calling 'lxd forknet", log.Ctx{"container": d.name, "err": err, "pid": pid})
 			return result
 		}
 
 		// If we can use netns_getifaddrs() but it failed and the setns() +
 		// netns_getifaddrs() succeeded we should just always fallback to the
 		// setns() + netns_getifaddrs() style retrieval.
-		c.state.OS.NetnsGetifaddrs = false
+		d.state.OS.NetnsGetifaddrs = false
 
 		nw := map[string]api.InstanceStateNetwork{}
 		err = json.Unmarshal([]byte(out), &nw)
 		if err != nil {
-			logger.Error("Failure to read forknet json", log.Ctx{"container": c.name, "err": err})
+			logger.Error("Failure to read forknet json", log.Ctx{"container": d.name, "err": err})
 			return result
 		}
 		result = nw
@@ -5862,7 +5862,7 @@ func (c *lxc) networkState() map[string]api.InstanceStateNetwork {
 	// Get host_name from volatile data if not set already.
 	for name, dev := range result {
 		if dev.HostName == "" {
-			dev.HostName = c.localConfig[fmt.Sprintf("volatile.%s.host_name", name)]
+			dev.HostName = d.localConfig[fmt.Sprintf("volatile.%s.host_name", name)]
 			result[name] = dev
 		}
 	}
@@ -5870,19 +5870,19 @@ func (c *lxc) networkState() map[string]api.InstanceStateNetwork {
 	return result
 }
 
-func (c *lxc) processesState() int64 {
+func (d *lxc) processesState() int64 {
 	// Return 0 if not running
-	pid := c.InitPID()
+	pid := d.InitPID()
 	if pid == -1 {
 		return 0
 	}
 
-	cg, err := c.cgroup(nil)
+	cg, err := d.cgroup(nil)
 	if err != nil {
 		return 0
 	}
 
-	if c.state.OS.CGInfo.Supports(cgroup.Pids, cg) {
+	if d.state.OS.CGInfo.Supports(cgroup.Pids, cg) {
 		value, err := cg.GetProcessesUsage()
 		if err != nil {
 			return -1
@@ -5916,23 +5916,23 @@ func (c *lxc) processesState() int64 {
 
 // getStoragePool returns the current storage pool handle. To avoid a DB lookup each time this
 // function is called, the handle is cached internally in the lxc struct.
-func (c *lxc) getStoragePool() (storagePools.Pool, error) {
-	if c.storagePool != nil {
-		return c.storagePool, nil
+func (d *lxc) getStoragePool() (storagePools.Pool, error) {
+	if d.storagePool != nil {
+		return d.storagePool, nil
 	}
 
-	pool, err := storagePools.GetPoolByInstance(c.state, c)
+	pool, err := storagePools.GetPoolByInstance(d.state, d)
 	if err != nil {
 		return nil, err
 	}
-	c.storagePool = pool
+	d.storagePool = pool
 
-	return c.storagePool, nil
+	return d.storagePool, nil
 }
 
 // getStorageType returns the storage type of the instance's storage pool.
-func (c *lxc) getStorageType() (string, error) {
-	pool, err := c.getStoragePool()
+func (d *lxc) getStorageType() (string, error) {
+	pool, err := d.getStoragePool()
 	if err != nil {
 		return "", err
 	}
@@ -5941,14 +5941,14 @@ func (c *lxc) getStorageType() (string, error) {
 }
 
 // mount the instance's rootfs volume if needed.
-func (c *lxc) mount() (*storagePools.MountInfo, error) {
-	pool, err := c.getStoragePool()
+func (d *lxc) mount() (*storagePools.MountInfo, error) {
+	pool, err := d.getStoragePool()
 	if err != nil {
 		return nil, err
 	}
 
-	if c.IsSnapshot() {
-		mountInfo, err := pool.MountInstanceSnapshot(c, nil)
+	if d.IsSnapshot() {
+		mountInfo, err := pool.MountInstanceSnapshot(d, nil)
 		if err != nil {
 			return nil, err
 		}
@@ -5956,7 +5956,7 @@ func (c *lxc) mount() (*storagePools.MountInfo, error) {
 		return mountInfo, nil
 	}
 
-	mountInfo, err := pool.MountInstance(c, nil)
+	mountInfo, err := pool.MountInstance(d, nil)
 	if err != nil {
 		return nil, err
 	}
@@ -5965,14 +5965,14 @@ func (c *lxc) mount() (*storagePools.MountInfo, error) {
 }
 
 // unmount the instance's rootfs volume if needed.
-func (c *lxc) unmount() (bool, error) {
-	pool, err := c.getStoragePool()
+func (d *lxc) unmount() (bool, error) {
+	pool, err := d.getStoragePool()
 	if err != nil {
 		return false, err
 	}
 
-	if c.IsSnapshot() {
-		unmounted, err := pool.UnmountInstanceSnapshot(c, nil)
+	if d.IsSnapshot() {
+		unmounted, err := pool.UnmountInstanceSnapshot(d, nil)
 		if err != nil {
 			return false, err
 		}
@@ -5980,7 +5980,7 @@ func (c *lxc) unmount() (bool, error) {
 		return unmounted, nil
 	}
 
-	unmounted, err := pool.UnmountInstance(c, nil)
+	unmounted, err := pool.UnmountInstance(d, nil)
 	if err != nil {
 		return false, err
 	}
@@ -5994,11 +5994,11 @@ func (c *lxc) unmount() (bool, error) {
 // we'll have a deadlock (with a timeout but still). The InitPID() call here is
 // the exception since the seccomp notifier will make sure to always pass a
 // valid PID.
-func (c *lxc) insertMountLXD(source, target, fstype string, flags int, mntnsPID int, shiftfs bool) error {
+func (d *lxc) insertMountLXD(source, target, fstype string, flags int, mntnsPID int, shiftfs bool) error {
 	pid := mntnsPID
 	if pid <= 0 {
 		// Get the init PID
-		pid = c.InitPID()
+		pid = d.InitPID()
 		if pid == -1 {
 			// Container isn't running
 			return fmt.Errorf("Can't insert mount into stopped container")
@@ -6009,12 +6009,12 @@ func (c *lxc) insertMountLXD(source, target, fstype string, flags int, mntnsPID
 	var tmpMount string
 	var err error
 	if shared.IsDir(source) {
-		tmpMount, err = ioutil.TempDir(c.ShmountsPath(), "lxdmount_")
+		tmpMount, err = ioutil.TempDir(d.ShmountsPath(), "lxdmount_")
 		if err != nil {
 			return fmt.Errorf("Failed to create shmounts path: %s", err)
 		}
 	} else {
-		f, err := ioutil.TempFile(c.ShmountsPath(), "lxdmount_")
+		f, err := ioutil.TempFile(d.ShmountsPath(), "lxdmount_")
 		if err != nil {
 			return fmt.Errorf("Failed to create shmounts path: %s", err)
 		}
@@ -6044,14 +6044,14 @@ func (c *lxc) insertMountLXD(source, target, fstype string, flags int, mntnsPID
 	mntsrc := filepath.Join("/dev/.lxd-mounts", filepath.Base(tmpMount))
 	pidStr := fmt.Sprintf("%d", pid)
 
-	pidFdNr, pidFd := seccomp.MakePidFd(pid, c.state)
+	pidFdNr, pidFd := seccomp.MakePidFd(pid, d.state)
 	if pidFdNr >= 0 {
 		defer pidFd.Close()
 	}
 
 	_, err = shared.RunCommandInheritFds(
 		[]*os.File{pidFd},
-		c.state.OS.ExecPath,
+		d.state.OS.ExecPath,
 		"forkmount",
 		"lxd-mount",
 		"--",
@@ -6067,9 +6067,9 @@ func (c *lxc) insertMountLXD(source, target, fstype string, flags int, mntnsPID
 	return nil
 }
 
-func (c *lxc) insertMountLXC(source, target, fstype string, flags int) error {
-	cname := project.Instance(c.Project(), c.Name())
-	configPath := filepath.Join(c.LogPath(), "lxc.conf")
+func (d *lxc) insertMountLXC(source, target, fstype string, flags int) error {
+	cname := project.Instance(d.Project(), d.Name())
+	configPath := filepath.Join(d.LogPath(), "lxc.conf")
 	if fstype == "" {
 		fstype = "none"
 	}
@@ -6079,12 +6079,12 @@ func (c *lxc) insertMountLXC(source, target, fstype string, flags int) error {
 	}
 
 	_, err := shared.RunCommand(
-		c.state.OS.ExecPath,
+		d.state.OS.ExecPath,
 		"forkmount",
 		"lxc-mount",
 		"--",
 		cname,
-		c.state.OS.LxcPath,
+		d.state.OS.LxcPath,
 		configPath,
 		source,
 		target,
@@ -6097,37 +6097,37 @@ func (c *lxc) insertMountLXC(source, target, fstype string, flags int) error {
 	return nil
 }
 
-func (c *lxc) insertMount(source, target, fstype string, flags int, shiftfs bool) error {
-	if c.state.OS.LXCFeatures["mount_injection_file"] && !shiftfs {
-		return c.insertMountLXC(source, target, fstype, flags)
+func (d *lxc) insertMount(source, target, fstype string, flags int, shiftfs bool) error {
+	if d.state.OS.LXCFeatures["mount_injection_file"] && !shiftfs {
+		return d.insertMountLXC(source, target, fstype, flags)
 	}
 
-	return c.insertMountLXD(source, target, fstype, flags, -1, shiftfs)
+	return d.insertMountLXD(source, target, fstype, flags, -1, shiftfs)
 }
 
-func (c *lxc) removeMount(mount string) error {
+func (d *lxc) removeMount(mount string) error {
 	// Get the init PID
-	pid := c.InitPID()
+	pid := d.InitPID()
 	if pid == -1 {
 		// Container isn't running
 		return fmt.Errorf("Can't remove mount from stopped container")
 	}
 
-	if c.state.OS.LXCFeatures["mount_injection_file"] {
-		configPath := filepath.Join(c.LogPath(), "lxc.conf")
-		cname := project.Instance(c.Project(), c.Name())
+	if d.state.OS.LXCFeatures["mount_injection_file"] {
+		configPath := filepath.Join(d.LogPath(), "lxc.conf")
+		cname := project.Instance(d.Project(), d.Name())
 
 		if !strings.HasPrefix(mount, "/") {
 			mount = "/" + mount
 		}
 
 		_, err := shared.RunCommand(
-			c.state.OS.ExecPath,
+			d.state.OS.ExecPath,
 			"forkmount",
 			"lxc-umount",
 			"--",
 			cname,
-			c.state.OS.LxcPath,
+			d.state.OS.LxcPath,
 			configPath,
 			mount)
 		if err != nil {
@@ -6135,14 +6135,14 @@ func (c *lxc) removeMount(mount string) error {
 		}
 	} else {
 		// Remove the mount from the container
-		pidFdNr, pidFd := c.inheritInitPidFd()
+		pidFdNr, pidFd := d.inheritInitPidFd()
 		if pidFdNr >= 0 {
 			defer pidFd.Close()
 		}
 
 		_, err := shared.RunCommandInheritFds(
 			[]*os.File{pidFd},
-			c.state.OS.ExecPath,
+			d.state.OS.ExecPath,
 			"forkmount",
 			"lxd-umount",
 			"--",
@@ -6158,7 +6158,7 @@ func (c *lxc) removeMount(mount string) error {
 }
 
 // InsertSeccompUnixDevice inserts a seccomp device.
-func (c *lxc) InsertSeccompUnixDevice(prefix string, m deviceConfig.Device, pid int) error {
+func (d *lxc) InsertSeccompUnixDevice(prefix string, m deviceConfig.Device, pid int) error {
 	if pid < 0 {
 		return fmt.Errorf("Invalid request PID specified")
 	}
@@ -6174,7 +6174,7 @@ func (c *lxc) InsertSeccompUnixDevice(prefix string, m deviceConfig.Device, pid
 		return err
 	}
 
-	idmapset, err := c.CurrentIdmap()
+	idmapset, err := d.CurrentIdmap()
 	if err != nil {
 		return err
 	}
@@ -6196,12 +6196,12 @@ func (c *lxc) InsertSeccompUnixDevice(prefix string, m deviceConfig.Device, pid
 		m["path"] = filepath.Join(rootPath, m["path"])
 	}
 
-	idmapSet, err := c.CurrentIdmap()
+	idmapSet, err := d.CurrentIdmap()
 	if err != nil {
 		return err
 	}
 
-	dev, err := device.UnixDeviceCreate(c.state, idmapSet, c.DevicesPath(), prefix, m, true)
+	dev, err := device.UnixDeviceCreate(d.state, idmapSet, d.DevicesPath(), prefix, m, true)
 	if err != nil {
 		return fmt.Errorf("Failed to setup device: %s", err)
 	}
@@ -6211,17 +6211,17 @@ func (c *lxc) InsertSeccompUnixDevice(prefix string, m deviceConfig.Device, pid
 
 	// Bind-mount it into the container
 	defer os.Remove(devPath)
-	return c.insertMountLXD(devPath, tgtPath, "none", unix.MS_BIND, pid, false)
+	return d.insertMountLXD(devPath, tgtPath, "none", unix.MS_BIND, pid, false)
 }
 
-func (c *lxc) removeUnixDevices() error {
+func (d *lxc) removeUnixDevices() error {
 	// Check that we indeed have devices to remove
-	if !shared.PathExists(c.DevicesPath()) {
+	if !shared.PathExists(d.DevicesPath()) {
 		return nil
 	}
 
 	// Load the directory listing
-	dents, err := ioutil.ReadDir(c.DevicesPath())
+	dents, err := ioutil.ReadDir(d.DevicesPath())
 	if err != nil {
 		return err
 	}
@@ -6234,7 +6234,7 @@ func (c *lxc) removeUnixDevices() error {
 		}
 
 		// Remove the entry
-		devicePath := filepath.Join(c.DevicesPath(), f.Name())
+		devicePath := filepath.Join(d.DevicesPath(), f.Name())
 		err := os.Remove(devicePath)
 		if err != nil {
 			logger.Error("Failed removing unix device", log.Ctx{"err": err, "path": devicePath})
@@ -6246,7 +6246,7 @@ func (c *lxc) removeUnixDevices() error {
 
 // FillNetworkDevice takes a nic or infiniband device type and enriches it with automatically
 // generated name and hwaddr properties if these are missing from the device.
-func (c *lxc) FillNetworkDevice(name string, m deviceConfig.Device) (deviceConfig.Device, error) {
+func (d *lxc) FillNetworkDevice(name string, m deviceConfig.Device) (deviceConfig.Device, error) {
 	var err error
 	newDevice := m.Clone()
 
@@ -6255,14 +6255,14 @@ func (c *lxc) FillNetworkDevice(name string, m deviceConfig.Device) (deviceConfi
 		devNames := []string{}
 
 		// Include all static interface names
-		for _, dev := range c.expandedDevices.Sorted() {
+		for _, dev := range d.expandedDevices.Sorted() {
 			if dev.Config["name"] != "" && !shared.StringInSlice(dev.Config["name"], devNames) {
 				devNames = append(devNames, dev.Config["name"])
 			}
 		}
 
 		// Include all currently allocated interface names
-		for k, v := range c.expandedConfig {
+		for k, v := range d.expandedConfig {
 			if !strings.HasPrefix(k, "volatile.") {
 				continue
 			}
@@ -6280,8 +6280,8 @@ func (c *lxc) FillNetworkDevice(name string, m deviceConfig.Device) (deviceConfi
 		}
 
 		// Attempt to include all existing interfaces
-		cname := project.Instance(c.Project(), c.Name())
-		cc, err := liblxc.NewContainer(cname, c.state.OS.LxcPath)
+		cname := project.Instance(d.Project(), d.Name())
+		cc, err := liblxc.NewContainer(cname, d.state.OS.LxcPath)
 		if err == nil {
 			defer cc.Release()
 
@@ -6316,12 +6316,12 @@ func (c *lxc) FillNetworkDevice(name string, m deviceConfig.Device) (deviceConfi
 	}
 
 	updateKey := func(key string, value string) error {
-		tx, err := c.state.Cluster.Begin()
+		tx, err := d.state.Cluster.Begin()
 		if err != nil {
 			return err
 		}
 
-		err = db.CreateInstanceConfig(tx, c.id, map[string]string{key: value})
+		err = db.CreateInstanceConfig(tx, d.id, map[string]string{key: value})
 		if err != nil {
 			tx.Rollback()
 			return err
@@ -6335,7 +6335,7 @@ func (c *lxc) FillNetworkDevice(name string, m deviceConfig.Device) (deviceConfi
 		return nil
 	}
 
-	nicType, err := nictype.NICType(c.state, c.Project(), m)
+	nicType, err := nictype.NICType(d.state, d.Project(), m)
 	if err != nil {
 		return nil, err
 	}
@@ -6343,7 +6343,7 @@ func (c *lxc) FillNetworkDevice(name string, m deviceConfig.Device) (deviceConfi
 	// Fill in the MAC address
 	if !shared.StringInSlice(nicType, []string{"physical", "ipvlan", "sriov"}) && m["hwaddr"] == "" {
 		configKey := fmt.Sprintf("volatile.%s.hwaddr", name)
-		volatileHwaddr := c.localConfig[configKey]
+		volatileHwaddr := d.localConfig[configKey]
 		if volatileHwaddr == "" {
 			// Generate a new MAC address
 			volatileHwaddr, err = instance.DeviceNextInterfaceHWAddr()
@@ -6356,18 +6356,18 @@ func (c *lxc) FillNetworkDevice(name string, m deviceConfig.Device) (deviceConfi
 				err := updateKey(configKey, volatileHwaddr)
 				if err != nil {
 					// Check if something else filled it in behind our back
-					value, err1 := c.state.Cluster.GetInstanceConfig(c.id, configKey)
+					value, err1 := d.state.Cluster.GetInstanceConfig(d.id, configKey)
 					if err1 != nil || value == "" {
 						return err
 					}
 
-					c.localConfig[configKey] = value
-					c.expandedConfig[configKey] = value
+					d.localConfig[configKey] = value
+					d.expandedConfig[configKey] = value
 					return nil
 				}
 
-				c.localConfig[configKey] = volatileHwaddr
-				c.expandedConfig[configKey] = volatileHwaddr
+				d.localConfig[configKey] = volatileHwaddr
+				d.expandedConfig[configKey] = volatileHwaddr
 				return nil
 			})
 			if err != nil {
@@ -6380,7 +6380,7 @@ func (c *lxc) FillNetworkDevice(name string, m deviceConfig.Device) (deviceConfi
 	// Fill in the name
 	if m["name"] == "" {
 		configKey := fmt.Sprintf("volatile.%s.name", name)
-		volatileName := c.localConfig[configKey]
+		volatileName := d.localConfig[configKey]
 		if volatileName == "" {
 			// Generate a new interface name
 			volatileName, err := nextInterfaceName()
@@ -6392,16 +6392,16 @@ func (c *lxc) FillNetworkDevice(name string, m deviceConfig.Device) (deviceConfi
 			err = updateKey(configKey, volatileName)
 			if err != nil {
 				// Check if something else filled it in behind our back
-				value, err1 := c.state.Cluster.GetInstanceConfig(c.id, configKey)
+				value, err1 := d.state.Cluster.GetInstanceConfig(d.id, configKey)
 				if err1 != nil || value == "" {
 					return nil, err
 				}
 
-				c.localConfig[configKey] = value
-				c.expandedConfig[configKey] = value
+				d.localConfig[configKey] = value
+				d.expandedConfig[configKey] = value
 			} else {
-				c.localConfig[configKey] = volatileName
-				c.expandedConfig[configKey] = volatileName
+				d.localConfig[configKey] = volatileName
+				d.expandedConfig[configKey] = volatileName
 			}
 		}
 		newDevice["name"] = volatileName
@@ -6410,14 +6410,14 @@ func (c *lxc) FillNetworkDevice(name string, m deviceConfig.Device) (deviceConfi
 	return newDevice, nil
 }
 
-func (c *lxc) removeDiskDevices() error {
+func (d *lxc) removeDiskDevices() error {
 	// Check that we indeed have devices to remove
-	if !shared.PathExists(c.DevicesPath()) {
+	if !shared.PathExists(d.DevicesPath()) {
 		return nil
 	}
 
 	// Load the directory listing
-	dents, err := ioutil.ReadDir(c.DevicesPath())
+	dents, err := ioutil.ReadDir(d.DevicesPath())
 	if err != nil {
 		return err
 	}
@@ -6430,10 +6430,10 @@ func (c *lxc) removeDiskDevices() error {
 		}
 
 		// Always try to unmount the host side
-		_ = unix.Unmount(filepath.Join(c.DevicesPath(), f.Name()), unix.MNT_DETACH)
+		_ = unix.Unmount(filepath.Join(d.DevicesPath(), f.Name()), unix.MNT_DETACH)
 
 		// Remove the entry
-		diskPath := filepath.Join(c.DevicesPath(), f.Name())
+		diskPath := filepath.Join(d.DevicesPath(), f.Name())
 		err := os.Remove(diskPath)
 		if err != nil {
 			logger.Error("Failed to remove disk device path", log.Ctx{"err": err, "path": diskPath})
@@ -6444,31 +6444,31 @@ func (c *lxc) removeDiskDevices() error {
 }
 
 // Network I/O limits
-func (c *lxc) setNetworkPriority() error {
+func (d *lxc) setNetworkPriority() error {
 	// Load the go-lxc struct.
-	err := c.initLXC(false)
+	err := d.initLXC(false)
 	if err != nil {
 		return err
 	}
 
 	// Load the cgroup struct.
-	cg, err := c.cgroup(nil)
+	cg, err := d.cgroup(nil)
 	if err != nil {
 		return err
 	}
 
 	// Check that the container is running
-	if !c.IsRunning() {
+	if !d.IsRunning() {
 		return fmt.Errorf("Can't set network priority on stopped container")
 	}
 
 	// Don't bother if the cgroup controller doesn't exist
-	if !c.state.OS.CGInfo.Supports(cgroup.NetPrio, cg) {
+	if !d.state.OS.CGInfo.Supports(cgroup.NetPrio, cg) {
 		return nil
 	}
 
 	// Extract the current priority
-	networkPriority := c.expandedConfig["limits.network.priority"]
+	networkPriority := d.expandedConfig["limits.network.priority"]
 	if networkPriority == "" {
 		networkPriority = "0"
 	}
@@ -6504,105 +6504,105 @@ func (c *lxc) setNetworkPriority() error {
 }
 
 // IsStateful returns is instance is stateful.
-func (c *lxc) IsStateful() bool {
-	return c.stateful
+func (d *lxc) IsStateful() bool {
+	return d.stateful
 }
 
 // IsEphemeral returns if instance is ephemeral.
-func (c *lxc) IsEphemeral() bool {
-	return c.ephemeral
+func (d *lxc) IsEphemeral() bool {
+	return d.ephemeral
 }
 
 // IsFrozen returns if instance is frozen.
-func (c *lxc) IsFrozen() bool {
-	return c.State() == "FROZEN"
+func (d *lxc) IsFrozen() bool {
+	return d.State() == "FROZEN"
 }
 
 // IsNesting returns if instance is nested.
-func (c *lxc) IsNesting() bool {
-	return shared.IsTrue(c.expandedConfig["security.nesting"])
+func (d *lxc) IsNesting() bool {
+	return shared.IsTrue(d.expandedConfig["security.nesting"])
 }
 
-func (c *lxc) isCurrentlyPrivileged() bool {
-	if !c.IsRunning() {
-		return c.IsPrivileged()
+func (d *lxc) isCurrentlyPrivileged() bool {
+	if !d.IsRunning() {
+		return d.IsPrivileged()
 	}
 
-	idmap, err := c.CurrentIdmap()
+	idmap, err := d.CurrentIdmap()
 	if err != nil {
-		return c.IsPrivileged()
+		return d.IsPrivileged()
 	}
 
 	return idmap == nil
 }
 
 // IsPrivileged returns if instance is privileged.
-func (c *lxc) IsPrivileged() bool {
-	return shared.IsTrue(c.expandedConfig["security.privileged"])
+func (d *lxc) IsPrivileged() bool {
+	return shared.IsTrue(d.expandedConfig["security.privileged"])
 }
 
 // IsRunning returns if instance is running.
-func (c *lxc) IsRunning() bool {
-	state := c.State()
+func (d *lxc) IsRunning() bool {
+	state := d.State()
 	return state != "BROKEN" && state != "STOPPED"
 }
 
 // IsSnapshot returns if instance is a snapshot.
-func (c *lxc) IsSnapshot() bool {
-	return c.snapshot
+func (d *lxc) IsSnapshot() bool {
+	return d.snapshot
 }
 
 // CreationDate returns creation date of instance.
-func (c *lxc) CreationDate() time.Time {
-	return c.creationDate
+func (d *lxc) CreationDate() time.Time {
+	return d.creationDate
 }
 
 // LastUsedDate returns last used date time of instance.
-func (c *lxc) LastUsedDate() time.Time {
-	return c.lastUsedDate
+func (d *lxc) LastUsedDate() time.Time {
+	return d.lastUsedDate
 }
 
 // ExpandedConfig returns expanded config.
-func (c *lxc) ExpandedConfig() map[string]string {
-	return c.expandedConfig
+func (d *lxc) ExpandedConfig() map[string]string {
+	return d.expandedConfig
 }
 
 // ExpandedDevices returns expanded devices config.
-func (c *lxc) ExpandedDevices() deviceConfig.Devices {
-	return c.expandedDevices
+func (d *lxc) ExpandedDevices() deviceConfig.Devices {
+	return d.expandedDevices
 }
 
 // ID gets instances's ID.
-func (c *lxc) ID() int {
-	return c.id
+func (d *lxc) ID() int {
+	return d.id
 }
 
 // InitPID returns PID of init process.
-func (c *lxc) InitPID() int {
+func (d *lxc) InitPID() int {
 	// Load the go-lxc struct
-	err := c.initLXC(false)
+	err := d.initLXC(false)
 	if err != nil {
 		return -1
 	}
 
-	return c.c.InitPid()
+	return d.c.InitPid()
 }
 
 // InitPidFd returns pidfd of init process.
-func (c *lxc) InitPidFd() (*os.File, error) {
+func (d *lxc) InitPidFd() (*os.File, error) {
 	// Load the go-lxc struct
-	err := c.initLXC(false)
+	err := d.initLXC(false)
 	if err != nil {
 		return nil, err
 	}
 
-	return c.c.InitPidFd()
+	return d.c.InitPidFd()
 }
 
 // DevptsFd returns dirfd of devpts mount.
-func (c *lxc) DevptsFd() (*os.File, error) {
+func (d *lxc) DevptsFd() (*os.File, error) {
 	// Load the go-lxc struct
-	err := c.initLXC(false)
+	err := d.initLXC(false)
 	if err != nil {
 		return nil, err
 	}
@@ -6611,27 +6611,27 @@ func (c *lxc) DevptsFd() (*os.File, error) {
 		return nil, fmt.Errorf("Missing devpts_fd extension")
 	}
 
-	return c.c.DevptsFd()
+	return d.c.DevptsFd()
 }
 
 // LocalConfig returns local config.
-func (c *lxc) LocalConfig() map[string]string {
-	return c.localConfig
+func (d *lxc) LocalConfig() map[string]string {
+	return d.localConfig
 }
 
 // CurrentIdmap returns current IDMAP.
-func (c *lxc) CurrentIdmap() (*idmap.IdmapSet, error) {
-	jsonIdmap, ok := c.LocalConfig()["volatile.idmap.current"]
+func (d *lxc) CurrentIdmap() (*idmap.IdmapSet, error) {
+	jsonIdmap, ok := d.LocalConfig()["volatile.idmap.current"]
 	if !ok {
-		return c.DiskIdmap()
+		return d.DiskIdmap()
 	}
 
 	return idmap.JSONUnmarshal(jsonIdmap)
 }
 
 // DiskIdmap returns DISK IDMAP.
-func (c *lxc) DiskIdmap() (*idmap.IdmapSet, error) {
-	jsonIdmap, ok := c.LocalConfig()["volatile.last_state.idmap"]
+func (d *lxc) DiskIdmap() (*idmap.IdmapSet, error) {
+	jsonIdmap, ok := d.LocalConfig()["volatile.last_state.idmap"]
 	if !ok {
 		return nil, nil
 	}
@@ -6640,38 +6640,38 @@ func (c *lxc) DiskIdmap() (*idmap.IdmapSet, error) {
 }
 
 // NextIdmap returns next IDMAP.
-func (c *lxc) NextIdmap() (*idmap.IdmapSet, error) {
-	jsonIdmap, ok := c.LocalConfig()["volatile.idmap.next"]
+func (d *lxc) NextIdmap() (*idmap.IdmapSet, error) {
+	jsonIdmap, ok := d.LocalConfig()["volatile.idmap.next"]
 	if !ok {
-		return c.CurrentIdmap()
+		return d.CurrentIdmap()
 	}
 
 	return idmap.JSONUnmarshal(jsonIdmap)
 }
 
 // Location returns instance location.
-func (c *lxc) Location() string {
-	return c.node
+func (d *lxc) Location() string {
+	return d.node
 }
 
 // Name returns instance name.
-func (c *lxc) Name() string {
-	return c.name
+func (d *lxc) Name() string {
+	return d.name
 }
 
 // Description returns instance description.
-func (c *lxc) Description() string {
-	return c.description
+func (d *lxc) Description() string {
+	return d.description
 }
 
 // Profiles returns instance profiles.
-func (c *lxc) Profiles() []string {
-	return c.profiles
+func (d *lxc) Profiles() []string {
+	return d.profiles
 }
 
 // State returns instance state.
-func (c *lxc) State() string {
-	state, err := c.getLxcState()
+func (d *lxc) State() string {
+	state, err := d.getLxcState()
 	if err != nil {
 		return api.Error.String()
 	}
@@ -6679,64 +6679,64 @@ func (c *lxc) State() string {
 }
 
 // Path instance path.
-func (c *lxc) Path() string {
-	return storagePools.InstancePath(c.Type(), c.Project(), c.Name(), c.IsSnapshot())
+func (d *lxc) Path() string {
+	return storagePools.InstancePath(d.Type(), d.Project(), d.Name(), d.IsSnapshot())
 }
 
 // DevicesPath devices path.
-func (c *lxc) DevicesPath() string {
-	name := project.Instance(c.Project(), c.Name())
+func (d *lxc) DevicesPath() string {
+	name := project.Instance(d.Project(), d.Name())
 	return shared.VarPath("devices", name)
 }
 
 // ShmountsPath shared mounts path.
-func (c *lxc) ShmountsPath() string {
-	name := project.Instance(c.Project(), c.Name())
+func (d *lxc) ShmountsPath() string {
+	name := project.Instance(d.Project(), d.Name())
 	return shared.VarPath("shmounts", name)
 }
 
 // LogPath log path.
-func (c *lxc) LogPath() string {
-	name := project.Instance(c.Project(), c.Name())
+func (d *lxc) LogPath() string {
+	name := project.Instance(d.Project(), d.Name())
 	return shared.LogPath(name)
 }
 
 // LogFilePath log file path.
-func (c *lxc) LogFilePath() string {
-	return filepath.Join(c.LogPath(), "lxc.log")
+func (d *lxc) LogFilePath() string {
+	return filepath.Join(d.LogPath(), "lxc.log")
 }
 
 // ConsoleBufferLogPath console buffer log path.
-func (c *lxc) ConsoleBufferLogPath() string {
-	return filepath.Join(c.LogPath(), "console.log")
+func (d *lxc) ConsoleBufferLogPath() string {
+	return filepath.Join(d.LogPath(), "console.log")
 }
 
 // RootfsPath root filesystem path.
-func (c *lxc) RootfsPath() string {
-	return filepath.Join(c.Path(), "rootfs")
+func (d *lxc) RootfsPath() string {
+	return filepath.Join(d.Path(), "rootfs")
 }
 
 // TemplatesPath templates path.
-func (c *lxc) TemplatesPath() string {
-	return filepath.Join(c.Path(), "templates")
+func (d *lxc) TemplatesPath() string {
+	return filepath.Join(d.Path(), "templates")
 }
 
 // StatePath state path.
-func (c *lxc) StatePath() string {
+func (d *lxc) StatePath() string {
 	/* FIXME: backwards compatibility: we used to use Join(RootfsPath(),
 	 * "state"), which was bad. Let's just check to see if that directory
 	 * exists.
 	 */
-	oldStatePath := filepath.Join(c.RootfsPath(), "state")
+	oldStatePath := filepath.Join(d.RootfsPath(), "state")
 	if shared.IsDir(oldStatePath) {
 		return oldStatePath
 	}
-	return filepath.Join(c.Path(), "state")
+	return filepath.Join(d.Path(), "state")
 }
 
 // StoragePool storage pool name.
-func (c *lxc) StoragePool() (string, error) {
-	poolName, err := c.state.Cluster.GetInstancePool(c.Project(), c.Name())
+func (d *lxc) StoragePool() (string, error) {
+	poolName, err := d.state.Cluster.GetInstancePool(d.Project(), d.Name())
 	if err != nil {
 		return "", err
 	}
@@ -6745,38 +6745,38 @@ func (c *lxc) StoragePool() (string, error) {
 }
 
 // SetOperation handles progress tracking.
-func (c *lxc) SetOperation(op *operations.Operation) {
-	c.op = op
+func (d *lxc) SetOperation(op *operations.Operation) {
+	d.op = op
 }
 
 // ExpiryDate sets expiry date.
-func (c *lxc) ExpiryDate() time.Time {
-	if c.IsSnapshot() {
-		return c.expiryDate
+func (d *lxc) ExpiryDate() time.Time {
+	if d.IsSnapshot() {
+		return d.expiryDate
 	}
 
 	// Return zero time if the container is not a snapshot
 	return time.Time{}
 }
 
-func (c *lxc) updateProgress(progress string) {
-	if c.op == nil {
+func (d *lxc) updateProgress(progress string) {
+	if d.op == nil {
 		return
 	}
 
-	meta := c.op.Metadata()
+	meta := d.op.Metadata()
 	if meta == nil {
 		meta = make(map[string]interface{})
 	}
 
 	if meta["container_progress"] != progress {
 		meta["container_progress"] = progress
-		c.op.UpdateMetadata(meta)
+		d.op.UpdateMetadata(meta)
 	}
 }
 
 // Internal MAAS handling.
-func (c *lxc) maasInterfaces(devices map[string]map[string]string) ([]maas.ContainerInterface, error) {
+func (d *lxc) maasInterfaces(devices map[string]map[string]string) ([]maas.ContainerInterface, error) {
 	interfaces := []maas.ContainerInterface{}
 	for k, m := range devices {
 		if m["type"] != "nic" {
@@ -6787,7 +6787,7 @@ func (c *lxc) maasInterfaces(devices map[string]map[string]string) ([]maas.Conta
 			continue
 		}
 
-		m, err := c.FillNetworkDevice(k, m)
+		m, err := d.FillNetworkDevice(k, m)
 		if err != nil {
 			return nil, err
 		}
@@ -6826,9 +6826,9 @@ func (c *lxc) maasInterfaces(devices map[string]map[string]string) ([]maas.Conta
 	return interfaces, nil
 }
 
-func (c *lxc) maasUpdate(oldDevices map[string]map[string]string) error {
+func (d *lxc) maasUpdate(oldDevices map[string]map[string]string) error {
 	// Check if MAAS is configured
-	maasURL, err := cluster.ConfigGetString(c.state.Cluster, "maas.api.url")
+	maasURL, err := cluster.ConfigGetString(d.state.Cluster, "maas.api.url")
 	if err != nil {
 		return err
 	}
@@ -6838,14 +6838,14 @@ func (c *lxc) maasUpdate(oldDevices map[string]map[string]string) error {
 	}
 
 	// Check if there's something that uses MAAS
-	interfaces, err := c.maasInterfaces(c.expandedDevices.CloneNative())
+	interfaces, err := d.maasInterfaces(d.expandedDevices.CloneNative())
 	if err != nil {
 		return err
 	}
 
 	var oldInterfaces []maas.ContainerInterface
 	if oldDevices != nil {
-		oldInterfaces, err = c.maasInterfaces(oldDevices)
+		oldInterfaces, err = d.maasInterfaces(oldDevices)
 		if err != nil {
 			return err
 		}
@@ -6856,28 +6856,28 @@ func (c *lxc) maasUpdate(oldDevices map[string]map[string]string) error {
 	}
 
 	// See if we're connected to MAAS
-	if c.state.MAAS == nil {
+	if d.state.MAAS == nil {
 		return fmt.Errorf("Can't perform the operation because MAAS is currently unavailable")
 	}
 
-	exists, err := c.state.MAAS.DefinedContainer(c)
+	exists, err := d.state.MAAS.DefinedContainer(d)
 	if err != nil {
 		return err
 	}
 
 	if exists {
 		if len(interfaces) == 0 && len(oldInterfaces) > 0 {
-			return c.state.MAAS.DeleteContainer(c)
+			return d.state.MAAS.DeleteContainer(d)
 		}
 
-		return c.state.MAAS.UpdateContainer(c, interfaces)
+		return d.state.MAAS.UpdateContainer(d, interfaces)
 	}
 
-	return c.state.MAAS.CreateContainer(c, interfaces)
+	return d.state.MAAS.CreateContainer(d, interfaces)
 }
 
-func (c *lxc) maasRename(newName string) error {
-	maasURL, err := cluster.ConfigGetString(c.state.Cluster, "maas.api.url")
+func (d *lxc) maasRename(newName string) error {
+	maasURL, err := cluster.ConfigGetString(d.state.Cluster, "maas.api.url")
 	if err != nil {
 		return err
 	}
@@ -6886,7 +6886,7 @@ func (c *lxc) maasRename(newName string) error {
 		return nil
 	}
 
-	interfaces, err := c.maasInterfaces(c.expandedDevices.CloneNative())
+	interfaces, err := d.maasInterfaces(d.expandedDevices.CloneNative())
 	if err != nil {
 		return err
 	}
@@ -6895,24 +6895,24 @@ func (c *lxc) maasRename(newName string) error {
 		return nil
 	}
 
-	if c.state.MAAS == nil {
+	if d.state.MAAS == nil {
 		return fmt.Errorf("Can't perform the operation because MAAS is currently unavailable")
 	}
 
-	exists, err := c.state.MAAS.DefinedContainer(c)
+	exists, err := d.state.MAAS.DefinedContainer(d)
 	if err != nil {
 		return err
 	}
 
 	if !exists {
-		return c.maasUpdate(nil)
+		return d.maasUpdate(nil)
 	}
 
-	return c.state.MAAS.RenameContainer(c, newName)
+	return d.state.MAAS.RenameContainer(d, newName)
 }
 
-func (c *lxc) maasDelete() error {
-	maasURL, err := cluster.ConfigGetString(c.state.Cluster, "maas.api.url")
+func (d *lxc) maasDelete() error {
+	maasURL, err := cluster.ConfigGetString(d.state.Cluster, "maas.api.url")
 	if err != nil {
 		return err
 	}
@@ -6921,7 +6921,7 @@ func (c *lxc) maasDelete() error {
 		return nil
 	}
 
-	interfaces, err := c.maasInterfaces(c.expandedDevices.CloneNative())
+	interfaces, err := d.maasInterfaces(d.expandedDevices.CloneNative())
 	if err != nil {
 		return err
 	}
@@ -6930,11 +6930,11 @@ func (c *lxc) maasDelete() error {
 		return nil
 	}
 
-	if c.state.MAAS == nil {
+	if d.state.MAAS == nil {
 		return fmt.Errorf("Can't perform the operation because MAAS is currently unavailable")
 	}
 
-	exists, err := c.state.MAAS.DefinedContainer(c)
+	exists, err := d.state.MAAS.DefinedContainer(d)
 	if err != nil {
 		return err
 	}
@@ -6943,26 +6943,26 @@ func (c *lxc) maasDelete() error {
 		return nil
 	}
 
-	return c.state.MAAS.DeleteContainer(c)
+	return d.state.MAAS.DeleteContainer(d)
 }
 
-func (c *lxc) CGroup() (*cgroup.CGroup, error) {
+func (d *lxc) CGroup() (*cgroup.CGroup, error) {
 	// Load the go-lxc struct
-	err := c.initLXC(false)
+	err := d.initLXC(false)
 	if err != nil {
 		return nil, err
 	}
 
-	return c.cgroup(nil)
+	return d.cgroup(nil)
 }
 
-func (c *lxc) cgroup(cc *liblxc.Container) (*cgroup.CGroup, error) {
+func (d *lxc) cgroup(cc *liblxc.Container) (*cgroup.CGroup, error) {
 	rw := lxcCgroupReadWriter{}
 	if cc != nil {
 		rw.cc = cc
 		rw.conf = true
 	} else {
-		rw.cc = c.c
+		rw.cc = d.c
 	}
 
 	cg, err := cgroup.New(&rw)
@@ -7006,25 +7006,25 @@ func (rw *lxcCgroupReadWriter) Set(version cgroup.Backend, controller string, ke
 }
 
 // UpdateBackupFile writes the instance's backup.yaml file to storage.
-func (c *lxc) UpdateBackupFile() error {
-	pool, err := c.getStoragePool()
+func (d *lxc) UpdateBackupFile() error {
+	pool, err := d.getStoragePool()
 	if err != nil {
 		return err
 	}
 
-	return pool.UpdateInstanceBackupFile(c, nil)
+	return pool.UpdateInstanceBackupFile(d, nil)
 }
 
 // SaveConfigFile generates the LXC config file on disk.
-func (c *lxc) SaveConfigFile() error {
-	err := c.initLXC(true)
+func (d *lxc) SaveConfigFile() error {
+	err := d.initLXC(true)
 	if err != nil {
 		return errors.Wrapf(err, "Failed to generate LXC config")
 	}
 
 	// Generate the LXC config.
-	configPath := filepath.Join(c.LogPath(), "lxc.conf")
-	err = c.c.SaveConfigFile(configPath)
+	configPath := filepath.Join(d.LogPath(), "lxc.conf")
+	err = d.c.SaveConfigFile(configPath)
 	if err != nil {
 		os.Remove(configPath)
 		return errors.Wrapf(err, "Failed to save LXC config to file %q", configPath)


More information about the lxc-devel mailing list