[lxc-devel] [lxd/master] lxd/instance: Adds per-struct contextual logger.

kevtheappdev on Github lxc-bot at linuxcontainers.org
Sat Nov 28 23:44:48 UTC 2020


A non-text attachment was scrubbed...
Name: not available
Type: text/x-mailbox
Size: 318 bytes
Desc: not available
URL: <http://lists.linuxcontainers.org/pipermail/lxc-devel/attachments/20201128/5b685dc4/attachment-0001.bin>
-------------- next part --------------
From 78853e03434fcb0cb57abea641c88a5c67793d98 Mon Sep 17 00:00:00 2001
From: Kevin Turner <kevinturner at utexas.edu>
Date: Sat, 28 Nov 2020 17:38:51 -0600
Subject: [PATCH] lxd/instance: Adds per-struct contextual logger.

---
 lxd/instance/drivers/driver_common.go |   2 +
 lxd/instance/drivers/driver_lxc.go    | 214 +++++++++++++-------------
 lxd/instance/drivers/driver_qemu.go   | 124 +++++++--------
 3 files changed, 173 insertions(+), 167 deletions(-)

diff --git a/lxd/instance/drivers/driver_common.go b/lxd/instance/drivers/driver_common.go
index ecb5d9bf55..d849b6b6c8 100644
--- a/lxd/instance/drivers/driver_common.go
+++ b/lxd/instance/drivers/driver_common.go
@@ -21,6 +21,7 @@ import (
 	storagePools "github.com/lxc/lxd/lxd/storage"
 	"github.com/lxc/lxd/shared"
 	"github.com/lxc/lxd/shared/api"
+	"github.com/lxc/lxd/shared/logger"
 )
 
 // common provides structure common to all instance types.
@@ -41,6 +42,7 @@ type common struct {
 	lastUsedDate    time.Time
 	localConfig     map[string]string
 	localDevices    deviceConfig.Devices
+	logger          logger.Logger
 	name            string
 	node            string
 	profiles        []string
diff --git a/lxd/instance/drivers/driver_lxc.go b/lxd/instance/drivers/driver_lxc.go
index 35b353e588..6f0e93e5d3 100644
--- a/lxd/instance/drivers/driver_lxc.go
+++ b/lxd/instance/drivers/driver_lxc.go
@@ -155,6 +155,7 @@ func lxcCreate(s *state.State, args db.InstanceArgs) (instance.Instance, error)
 			lastUsedDate: args.LastUsedDate,
 			localConfig:  args.Config,
 			localDevices: args.Devices,
+			logger:       logging.AddContext(logger.Log, log.Ctx{"instanceType": args.Type.String, "instanceName": args.Name}),
 			name:         args.Name,
 			node:         args.Node,
 			profiles:     args.Profiles,
@@ -190,7 +191,7 @@ func lxcCreate(s *state.State, args db.InstanceArgs) (instance.Instance, error)
 		"ephemeral": d.ephemeral,
 	}
 
-	logger.Info("Creating container", ctxMap)
+	d.common.logger.Info("Creating container", ctxMap)
 
 	// Load the config.
 	err := d.init()
@@ -327,7 +328,7 @@ func lxcCreate(s *state.State, args db.InstanceArgs) (instance.Instance, error)
 		}
 	}
 
-	logger.Info("Created container", ctxMap)
+	d.common.logger.Info("Created container", ctxMap)
 	d.state.Events.SendLifecycle(d.project, "container-created", fmt.Sprintf("/1.0/containers/%s", d.name), nil)
 
 	revert.Success()
@@ -380,6 +381,7 @@ func lxcInstantiate(s *state.State, args db.InstanceArgs, expandedDevices device
 			lastUsedDate: args.LastUsedDate,
 			localConfig:  args.Config,
 			localDevices: args.Devices,
+			logger:       logging.AddContext(logger.Log, log.Ctx{"instanceType": args.Type.String, "instanceName": args.Name}),
 			name:         args.Name,
 			node:         args.Node,
 			profiles:     args.Profiles,
@@ -1289,14 +1291,14 @@ func (d *lxc) RegisterDevices() {
 		}
 
 		if err != nil {
-			logger.Error("Failed to load device to register", log.Ctx{"err": err, "instance": d.Name(), "device": entry.Name})
+			d.common.logger.Error("Failed to load device to register", log.Ctx{"err": err, "instance": d.Name(), "device": entry.Name})
 			continue
 		}
 
 		// Check whether device wants to register for any events.
 		err = dev.Register()
 		if err != nil {
-			logger.Error("Failed to register device", log.Ctx{"err": err, "instance": d.Name(), "device": entry.Name})
+			d.common.logger.Error("Failed to register device", log.Ctx{"err": err, "instance": d.Name(), "device": entry.Name})
 			continue
 		}
 	}
@@ -1340,7 +1342,7 @@ func (d *lxc) deviceAdd(deviceName string, rawConfig deviceConfig.Device, instan
 
 // deviceStart loads a new device and calls its Start() function.
 func (d *lxc) deviceStart(deviceName string, rawConfig deviceConfig.Device, instanceRunning bool) (*deviceConfig.RunConfig, error) {
-	logger := logging.AddContext(logger.Log, log.Ctx{"device": deviceName, "type": rawConfig["type"], "project": d.Project(), "instance": d.Name()})
+	logger := logging.AddContext(d.common.logger, log.Ctx{"device": deviceName, "type": rawConfig["type"], "project": d.Project(), "instance": d.Name()})
 	logger.Debug("Starting device")
 
 	dev, configCopy, err := d.deviceLoad(deviceName, rawConfig)
@@ -1505,7 +1507,7 @@ func (d *lxc) deviceUpdate(deviceName string, rawConfig deviceConfig.Device, old
 // Accepts a stopHookNetnsPath argument which is required when run from the onStopNS hook before the
 // container's network namespace is unmounted (which is required for NIC device cleanup).
 func (d *lxc) deviceStop(deviceName string, rawConfig deviceConfig.Device, instanceRunning bool, stopHookNetnsPath string) error {
-	logger := logging.AddContext(logger.Log, log.Ctx{"device": deviceName, "type": rawConfig["type"], "project": d.Project(), "instance": d.Name()})
+	logger := logging.AddContext(d.common.logger, log.Ctx{"device": deviceName, "type": rawConfig["type"], "project": d.Project(), "instance": d.Name()})
 	logger.Debug("Stopping device")
 
 	dev, configCopy, err := d.deviceLoad(deviceName, rawConfig)
@@ -1631,7 +1633,7 @@ func (d *lxc) deviceDetachNIC(configCopy map[string]string, netIF []deviceConfig
 			if err != nil {
 				return errors.Wrapf(err, "Failed to detach interface: %q to %q", configCopy["name"], devName)
 			}
-			logger.Debugf("Detached NIC device interface: %q to %q", configCopy["name"], devName)
+			d.common.logger.Debug(fmt.Sprintf("Detached NIC device interface: %q to %q", configCopy["name"], devName))
 		}
 	}
 
@@ -1677,7 +1679,7 @@ func (d *lxc) deviceHandleMounts(mounts []deviceConfig.MountEntryItem) error {
 					// Only warn here and don't fail as removing a directory
 					// mount may fail if there was already files inside
 					// directory before it was mouted over preventing delete.
-					logger.Warnf("Could not remove the device path inside container: %s", err)
+					d.common.logger.Warn(fmt.Sprintf("Could not remove the device path inside container: %s", err))
 				}
 			}
 		}
@@ -1688,7 +1690,7 @@ func (d *lxc) deviceHandleMounts(mounts []deviceConfig.MountEntryItem) error {
 
 // deviceRemove loads a new device and calls its Remove() function.
 func (d *lxc) deviceRemove(deviceName string, rawConfig deviceConfig.Device, instanceRunning bool) error {
-	logger := logging.AddContext(logger.Log, log.Ctx{"device": deviceName, "type": rawConfig["type"], "project": d.Project(), "instance": d.Name()})
+	logger := logging.AddContext(d.common.logger, log.Ctx{"device": deviceName, "type": rawConfig["type"], "project": d.Project(), "instance": d.Name()})
 
 	dev, _, err := d.deviceLoad(deviceName, rawConfig)
 
@@ -1895,7 +1897,7 @@ func (d *lxc) startCommon() (string, []func() error, error) {
 			return "", nil, fmt.Errorf("Container is protected against filesystem shifting")
 		}
 
-		logger.Debugf("Container idmap changed, remapping")
+		d.common.logger.Debug("Container idmap changed, remapping")
 		d.updateProgress("Remapping container filesystem")
 
 		storageType, err := d.getStorageType()
@@ -2013,7 +2015,7 @@ func (d *lxc) startCommon() (string, []func() error, error) {
 		revert.Add(func() {
 			err := d.deviceStop(dev.Name, dev.Config, false, "")
 			if err != nil {
-				logger.Errorf("Failed to cleanup device %q: %v", dev.Name, err)
+				d.common.logger.Error(fmt.Sprintf("Failed to cleanup device %q: %v", dev.Name, err))
 			}
 		})
 		if runConf == nil {
@@ -2253,7 +2255,7 @@ func (d *lxc) Start(stateful bool) error {
 		"stateful":  stateful}
 
 	if op.Action() == "start" {
-		logger.Info("Starting container", ctxMap)
+		d.common.logger.Info("Starting container", ctxMap)
 	}
 
 	// If stateful, restore now
@@ -2300,7 +2302,7 @@ func (d *lxc) Start(stateful bool) error {
 		}
 
 		if op.Action() == "start" {
-			logger.Info("Started container", ctxMap)
+			d.common.logger.Info("Started container", ctxMap)
 			d.state.Events.SendLifecycle(d.project, "container-started",
 				fmt.Sprintf("/1.0/containers/%s", d.name), nil)
 		}
@@ -2358,7 +2360,7 @@ func (d *lxc) Start(stateful bool) error {
 			}
 		}
 
-		logger.Error("Failed starting container", ctxMap)
+		d.common.logger.Error("Failed starting container", ctxMap)
 
 		// Return the actual error
 		op.Done(err)
@@ -2376,7 +2378,7 @@ func (d *lxc) Start(stateful bool) error {
 	}
 
 	if op.Action() == "start" {
-		logger.Info("Started container", ctxMap)
+		d.common.logger.Info("Started container", ctxMap)
 		d.state.Events.SendLifecycle(d.project, "container-started",
 			fmt.Sprintf("/1.0/containers/%s", d.name), nil)
 	}
@@ -2442,7 +2444,7 @@ func (d *lxc) onStart(_ map[string]string) error {
 			d.fromHook = false
 			err := d.setNetworkPriority()
 			if err != nil {
-				logger.Error("Failed to apply network priority", log.Ctx{"container": d.name, "err": err})
+				d.common.logger.Error("Failed to apply network priority", log.Ctx{"container": d.name, "err": err})
 			}
 		}(d)
 	}
@@ -2501,7 +2503,7 @@ func (d *lxc) Stop(stateful bool) error {
 		"stateful":  stateful}
 
 	if op.Action() == "stop" {
-		logger.Info("Stopping container", ctxMap)
+		d.common.logger.Info("Stopping container", ctxMap)
 	}
 
 	// Handle stateful stop
@@ -2541,11 +2543,11 @@ func (d *lxc) Stop(stateful bool) error {
 		d.stateful = true
 		err = d.state.Cluster.UpdateInstanceStatefulFlag(d.id, true)
 		if err != nil {
-			logger.Error("Failed stopping container", ctxMap)
+			d.common.logger.Error("Failed stopping container", ctxMap)
 			return err
 		}
 
-		logger.Info("Stopped container", ctxMap)
+		d.common.logger.Info("Stopped container", ctxMap)
 		d.state.Events.SendLifecycle(d.project, "container-stopped",
 			fmt.Sprintf("/1.0/containers/%s", d.name), nil)
 
@@ -2607,7 +2609,7 @@ func (d *lxc) Stop(stateful bool) error {
 	}
 
 	if op.Action() == "stop" {
-		logger.Info("Stopped container", ctxMap)
+		d.common.logger.Info("Stopped container", ctxMap)
 		d.state.Events.SendLifecycle(d.project, "container-stopped",
 			fmt.Sprintf("/1.0/containers/%s", d.name), nil)
 	}
@@ -2646,7 +2648,7 @@ func (d *lxc) Shutdown(timeout time.Duration) error {
 		"timeout":   timeout}
 
 	if op.Action() == "stop" {
-		logger.Info("Shutting down container", ctxMap)
+		d.common.logger.Info("Shutting down container", ctxMap)
 	}
 
 	// Load the go-lxc struct
@@ -2676,7 +2678,7 @@ func (d *lxc) Shutdown(timeout time.Duration) error {
 	}
 
 	if op.Action() == "stop" {
-		logger.Info("Shut down container", ctxMap)
+		d.common.logger.Info("Shut down container", ctxMap)
 		d.state.Events.SendLifecycle(d.project, "container-shutdown",
 			fmt.Sprintf("/1.0/containers/%s", d.name), nil)
 	}
@@ -2695,14 +2697,14 @@ func (d *lxc) Restart(timeout time.Duration) error {
 		"used":      d.lastUsedDate,
 		"timeout":   timeout}
 
-	logger.Info("Restarting container", ctxMap)
+	d.common.logger.Info("Restarting container", ctxMap)
 
 	err := d.common.restart(d, timeout)
 	if err != nil {
 		return err
 	}
 
-	logger.Info("Restarted container", ctxMap)
+	d.common.logger.Info("Restarted container", ctxMap)
 	d.state.Events.SendLifecycle(d.project, "container-restarted",
 		fmt.Sprintf("/1.0/containers/%s", d.name), nil)
 
@@ -2717,7 +2719,7 @@ func (d *lxc) onStopNS(args map[string]string) error {
 
 	// Validate target.
 	if !shared.StringInSlice(target, []string{"stop", "reboot"}) {
-		logger.Error("Container sent invalid target to OnStopNS", log.Ctx{"container": d.Name(), "target": target})
+		d.common.logger.Error("Container sent invalid target to OnStopNS", log.Ctx{"container": d.Name(), "target": target})
 		return fmt.Errorf("Invalid stop target %q", target)
 	}
 
@@ -2735,7 +2737,7 @@ func (d *lxc) onStop(args map[string]string) error {
 
 	// Validate target
 	if !shared.StringInSlice(target, []string{"stop", "reboot"}) {
-		logger.Error("Container sent invalid target to OnStop", log.Ctx{"container": d.Name(), "target": target})
+		d.common.logger.Error("Container sent invalid target to OnStop", log.Ctx{"container": d.Name(), "target": target})
 		return fmt.Errorf("Invalid stop target: %s", target)
 	}
 
@@ -2766,7 +2768,7 @@ func (d *lxc) onStop(args map[string]string) error {
 		"stateful":  false}
 
 	if op == nil {
-		logger.Debug(fmt.Sprintf("Container initiated %s", target), ctxMap)
+		d.common.logger.Debug(fmt.Sprintf("Container initiated %s", target), ctxMap)
 	}
 
 	// Record power state
@@ -2786,7 +2788,7 @@ func (d *lxc) onStop(args map[string]string) error {
 
 		// Wait for other post-stop actions to be done and the container actually stopping.
 		d.IsRunning()
-		logger.Debug("Container stopped, cleaning up", log.Ctx{"container": d.Name()})
+		d.common.logger.Debug("Container stopped, cleaning up", log.Ctx{"container": d.Name()})
 
 		// Clean up devices.
 		d.cleanupDevices(false, "")
@@ -2834,7 +2836,7 @@ func (d *lxc) onStop(args map[string]string) error {
 
 		// Log and emit lifecycle if not user triggered
 		if op == nil {
-			logger.Info("Shut down container", ctxMap)
+			d.common.logger.Info("Shut down container", ctxMap)
 			d.state.Events.SendLifecycle(d.project, "container-shutdown", fmt.Sprintf("/1.0/containers/%s", d.name), nil)
 		}
 
@@ -2883,7 +2885,7 @@ func (d *lxc) cleanupDevices(instanceRunning bool, stopHookNetnsPath string) {
 		if err == device.ErrUnsupportedDevType {
 			continue
 		} else if err != nil {
-			logger.Errorf("Failed to stop device %q: %v", dev.Name, err)
+			d.common.logger.Error(fmt.Sprintf("Failed to stop device %q: %v", dev.Name, err))
 		}
 	}
 }
@@ -2909,7 +2911,7 @@ func (d *lxc) Freeze() error {
 
 	// Check if the CGroup is available
 	if !d.state.OS.CGInfo.Supports(cgroup.Freezer, cg) {
-		logger.Info("Unable to freeze container (lack of kernel support)", ctxMap)
+		d.common.logger.Info("Unable to freeze container (lack of kernel support)", ctxMap)
 		return nil
 	}
 
@@ -2918,24 +2920,24 @@ func (d *lxc) Freeze() error {
 		return fmt.Errorf("The container is already frozen")
 	}
 
-	logger.Info("Freezing container", ctxMap)
+	d.common.logger.Info("Freezing container", ctxMap)
 
 	// Load the go-lxc struct
 	err = d.initLXC(false)
 	if err != nil {
 		ctxMap["err"] = err
-		logger.Error("Failed freezing container", ctxMap)
+		d.common.logger.Error("Failed freezing container", ctxMap)
 		return err
 	}
 
 	err = d.c.Freeze()
 	if err != nil {
 		ctxMap["err"] = err
-		logger.Error("Failed freezing container", ctxMap)
+		d.common.logger.Error("Failed freezing container", ctxMap)
 		return err
 	}
 
-	logger.Info("Froze container", ctxMap)
+	d.common.logger.Info("Froze container", ctxMap)
 	d.state.Events.SendLifecycle(d.project, "container-paused",
 		fmt.Sprintf("/1.0/containers/%s", d.name), nil)
 
@@ -2963,7 +2965,7 @@ func (d *lxc) Unfreeze() error {
 
 	// Check if the CGroup is available
 	if !d.state.OS.CGInfo.Supports(cgroup.Freezer, cg) {
-		logger.Info("Unable to unfreeze container (lack of kernel support)", ctxMap)
+		d.common.logger.Info("Unable to unfreeze container (lack of kernel support)", ctxMap)
 		return nil
 	}
 
@@ -2972,21 +2974,21 @@ func (d *lxc) Unfreeze() error {
 		return fmt.Errorf("The container is already running")
 	}
 
-	logger.Info("Unfreezing container", ctxMap)
+	d.common.logger.Info("Unfreezing container", ctxMap)
 
 	// Load the go-lxc struct
 	err = d.initLXC(false)
 	if err != nil {
-		logger.Error("Failed unfreezing container", ctxMap)
+		d.common.logger.Error("Failed unfreezing container", ctxMap)
 		return err
 	}
 
 	err = d.c.Unfreeze()
 	if err != nil {
-		logger.Error("Failed unfreezing container", ctxMap)
+		d.common.logger.Error("Failed unfreezing container", ctxMap)
 	}
 
-	logger.Info("Unfroze container", ctxMap)
+	d.common.logger.Info("Unfroze container", ctxMap)
 	d.state.Events.SendLifecycle(d.project, "container-resumed",
 		fmt.Sprintf("/1.0/containers/%s", d.name), nil)
 
@@ -3234,7 +3236,7 @@ func (d *lxc) Restore(sourceContainer instance.Instance, stateful bool) error {
 		"used":      d.lastUsedDate,
 		"source":    sourceContainer.Name()}
 
-	logger.Info("Restoring container", ctxMap)
+	d.common.logger.Info("Restoring container", ctxMap)
 
 	// Initialize storage interface for the container and mount the rootfs for criu state check.
 	pool, err := storagePools.GetPoolByInstance(d.state, d)
@@ -3280,7 +3282,7 @@ func (d *lxc) Restore(sourceContainer instance.Instance, stateful bool) error {
 	// Don't pass as user-requested as there's no way to fix a bad config.
 	err = d.Update(args, false)
 	if err != nil {
-		logger.Error("Failed restoring container configuration", ctxMap)
+		d.common.logger.Error("Failed restoring container configuration", ctxMap)
 		return err
 	}
 
@@ -3297,7 +3299,7 @@ func (d *lxc) Restore(sourceContainer instance.Instance, stateful bool) error {
 			return fmt.Errorf("Stateful snapshot restore requested by snapshot is stateless")
 		}
 
-		logger.Debug("Performing stateful restore", ctxMap)
+		d.common.logger.Debug("Performing stateful restore", ctxMap)
 		d.stateful = true
 
 		criuMigrationArgs := instance.CriuMigrationArgs{
@@ -3319,16 +3321,16 @@ func (d *lxc) Restore(sourceContainer instance.Instance, stateful bool) error {
 		// Remove the state from the parent container; we only keep this in snapshots.
 		err2 := os.RemoveAll(d.StatePath())
 		if err2 != nil {
-			logger.Error("Failed to delete snapshot state", log.Ctx{"path": d.StatePath(), "err": err2})
+			d.common.logger.Error("Failed to delete snapshot state", log.Ctx{"path": d.StatePath(), "err": err2})
 		}
 
 		if err != nil {
-			logger.Info("Failed restoring container", ctxMap)
+			d.common.logger.Info("Failed restoring container", ctxMap)
 			return err
 		}
 
-		logger.Debug("Performed stateful restore", ctxMap)
-		logger.Info("Restored container", ctxMap)
+		d.common.logger.Debug("Performed stateful restore", ctxMap)
+		d.common.logger.Info("Restored container", ctxMap)
 		return nil
 	}
 
@@ -3339,11 +3341,11 @@ func (d *lxc) Restore(sourceContainer instance.Instance, stateful bool) error {
 
 	// Restart the container.
 	if wasRunning {
-		logger.Info("Restored container", ctxMap)
+		d.common.logger.Info("Restored container", ctxMap)
 		return d.Start(false)
 	}
 
-	logger.Info("Restored container", ctxMap)
+	d.common.logger.Info("Restored container", ctxMap)
 	return nil
 }
 
@@ -3372,11 +3374,11 @@ func (d *lxc) Delete(force bool) error {
 		"ephemeral": d.ephemeral,
 		"used":      d.lastUsedDate}
 
-	logger.Info("Deleting container", ctxMap)
+	d.common.logger.Info("Deleting container", ctxMap)
 
 	if !force && shared.IsTrue(d.expandedConfig["security.protection.delete"]) && !d.IsSnapshot() {
 		err := fmt.Errorf("Container is protected")
-		logger.Warn("Failed to delete container", log.Ctx{"name": d.Name(), "err": err})
+		d.common.logger.Warn("Failed to delete container", log.Ctx{"name": d.Name(), "err": err})
 		return err
 	}
 
@@ -3409,7 +3411,7 @@ func (d *lxc) Delete(force bool) error {
 			// calling its Delete function.
 			err := instance.DeleteSnapshots(d.state, d.Project(), d.Name())
 			if err != nil {
-				logger.Error("Failed to delete instance snapshots", log.Ctx{"project": d.Project(), "instance": d.Name(), "err": err})
+				d.common.logger.Error("Failed to delete instance snapshots", log.Ctx{"project": d.Project(), "instance": d.Name(), "err": err})
 				return err
 			}
 
@@ -3441,7 +3443,7 @@ func (d *lxc) Delete(force bool) error {
 		// Delete the MAAS entry.
 		err = d.maasDelete()
 		if err != nil {
-			logger.Error("Failed deleting container MAAS record", log.Ctx{"name": d.Name(), "err": err})
+			d.common.logger.Error("Failed deleting container MAAS record", log.Ctx{"name": d.Name(), "err": err})
 			return err
 		}
 
@@ -3459,11 +3461,11 @@ func (d *lxc) Delete(force bool) error {
 
 	// Remove the database record of the instance or snapshot instance.
 	if err := d.state.Cluster.DeleteInstance(d.project, d.Name()); err != nil {
-		logger.Error("Failed deleting container entry", log.Ctx{"name": d.Name(), "err": err})
+		d.common.logger.Error("Failed deleting container entry", log.Ctx{"name": d.Name(), "err": err})
 		return err
 	}
 
-	logger.Info("Deleted container", ctxMap)
+	d.common.logger.Info("Deleted container", ctxMap)
 
 	if d.IsSnapshot() {
 		d.state.Events.SendLifecycle(d.project, "container-snapshot-deleted",
@@ -3489,7 +3491,7 @@ func (d *lxc) Rename(newName string) error {
 		"used":      d.lastUsedDate,
 		"newname":   newName}
 
-	logger.Info("Renaming container", ctxMap)
+	d.common.logger.Info("Renaming container", ctxMap)
 
 	// Sanity checks.
 	err := instance.ValidName(newName, d.IsSnapshot())
@@ -3526,7 +3528,7 @@ func (d *lxc) Rename(newName string) error {
 		// Rename all the instance snapshot database entries.
 		results, err := d.state.Cluster.GetInstanceSnapshotsNames(d.project, oldName)
 		if err != nil {
-			logger.Error("Failed to get container snapshots", ctxMap)
+			d.common.logger.Error("Failed to get container snapshots", ctxMap)
 			return err
 		}
 
@@ -3538,7 +3540,7 @@ func (d *lxc) Rename(newName string) error {
 				return tx.RenameInstanceSnapshot(d.project, oldName, oldSnapName, baseSnapName)
 			})
 			if err != nil {
-				logger.Error("Failed renaming snapshot", ctxMap)
+				d.common.logger.Error("Failed renaming snapshot", ctxMap)
 				return err
 			}
 		}
@@ -3555,7 +3557,7 @@ func (d *lxc) Rename(newName string) error {
 		return tx.RenameInstance(d.project, oldName, newName)
 	})
 	if err != nil {
-		logger.Error("Failed renaming container", ctxMap)
+		d.common.logger.Error("Failed renaming container", ctxMap)
 		return err
 	}
 
@@ -3565,7 +3567,7 @@ func (d *lxc) Rename(newName string) error {
 	if shared.PathExists(d.LogPath()) {
 		err := os.Rename(d.LogPath(), shared.LogPath(newFullName))
 		if err != nil {
-			logger.Error("Failed renaming container", ctxMap)
+			d.common.logger.Error("Failed renaming container", ctxMap)
 			return err
 		}
 	}
@@ -3621,7 +3623,7 @@ func (d *lxc) Rename(newName string) error {
 		return err
 	}
 
-	logger.Info("Renamed container", ctxMap)
+	d.common.logger.Info("Renamed container", ctxMap)
 
 	if d.IsSnapshot() {
 		d.state.Events.SendLifecycle(d.project, "container-snapshot-renamed",
@@ -4410,7 +4412,7 @@ func (d *lxc) updateDevices(removeDevices deviceConfig.Devices, addDevices devic
 
 			// If update is non-user requested (i.e from a snapshot restore), there's nothing we can
 			// do to fix the config and we don't want to prevent the snapshot restore so log and allow.
-			logger.Error("Failed to add device, skipping as non-user requested", log.Ctx{"project": d.Project(), "instance": d.Name(), "device": dev.Name, "err": err})
+			d.common.logger.Error("Failed to add device, skipping as non-user requested", log.Ctx{"project": d.Project(), "instance": d.Name(), "device": dev.Name, "err": err})
 			continue
 		}
 
@@ -4447,12 +4449,12 @@ func (d *lxc) Export(w io.Writer, properties map[string]string) (api.ImageMetada
 		return meta, fmt.Errorf("Cannot export a running instance as an image")
 	}
 
-	logger.Info("Exporting instance", ctxMap)
+	d.common.logger.Info("Exporting instance", ctxMap)
 
 	// Start the storage.
 	_, err := d.mount()
 	if err != nil {
-		logger.Error("Failed exporting instance", ctxMap)
+		d.common.logger.Error("Failed exporting instance", ctxMap)
 		return meta, err
 	}
 	defer d.unmount()
@@ -4460,7 +4462,7 @@ func (d *lxc) Export(w io.Writer, properties map[string]string) (api.ImageMetada
 	// Get IDMap to unshift container as the tarball is created.
 	idmap, err := d.DiskIdmap()
 	if err != nil {
-		logger.Error("Failed exporting instance", ctxMap)
+		d.common.logger.Error("Failed exporting instance", ctxMap)
 		return meta, err
 	}
 
@@ -4480,7 +4482,7 @@ func (d *lxc) Export(w io.Writer, properties map[string]string) (api.ImageMetada
 
 		err = tarWriter.WriteFile(path[offset:], path, fi, false)
 		if err != nil {
-			logger.Debugf("Error tarring up %s: %s", path, err)
+			d.common.logger.Debug(fmt.Sprintf("Error tarring up %s: %s", path, err))
 			return err
 		}
 		return nil
@@ -4493,7 +4495,7 @@ func (d *lxc) Export(w io.Writer, properties map[string]string) (api.ImageMetada
 		tempDir, err := ioutil.TempDir("", "lxd_lxd_metadata_")
 		if err != nil {
 			tarWriter.Close()
-			logger.Error("Failed exporting instance", ctxMap)
+			d.common.logger.Error("Failed exporting instance", ctxMap)
 			return meta, err
 		}
 		defer os.RemoveAll(tempDir)
@@ -4505,7 +4507,7 @@ func (d *lxc) Export(w io.Writer, properties map[string]string) (api.ImageMetada
 			parent, err := instance.LoadByProjectAndName(d.state, d.project, parentName)
 			if err != nil {
 				tarWriter.Close()
-				logger.Error("Failed exporting instance", ctxMap)
+				d.common.logger.Error("Failed exporting instance", ctxMap)
 				return meta, err
 			}
 
@@ -4517,7 +4519,7 @@ func (d *lxc) Export(w io.Writer, properties map[string]string) (api.ImageMetada
 		if arch == "" {
 			arch, err = osarch.ArchitectureName(d.state.OS.Architectures[0])
 			if err != nil {
-				logger.Error("Failed exporting instance", ctxMap)
+				d.common.logger.Error("Failed exporting instance", ctxMap)
 				return meta, err
 			}
 		}
@@ -4530,7 +4532,7 @@ func (d *lxc) Export(w io.Writer, properties map[string]string) (api.ImageMetada
 		data, err := yaml.Marshal(&meta)
 		if err != nil {
 			tarWriter.Close()
-			logger.Error("Failed exporting instance", ctxMap)
+			d.common.logger.Error("Failed exporting instance", ctxMap)
 			return meta, err
 		}
 
@@ -4539,22 +4541,22 @@ func (d *lxc) Export(w io.Writer, properties map[string]string) (api.ImageMetada
 		err = ioutil.WriteFile(fnam, data, 0644)
 		if err != nil {
 			tarWriter.Close()
-			logger.Error("Failed exporting instance", ctxMap)
+			d.common.logger.Error("Failed exporting instance", ctxMap)
 			return meta, err
 		}
 
 		fi, err := os.Lstat(fnam)
 		if err != nil {
 			tarWriter.Close()
-			logger.Error("Failed exporting instance", ctxMap)
+			d.common.logger.Error("Failed exporting instance", ctxMap)
 			return meta, err
 		}
 
 		tmpOffset := len(path.Dir(fnam)) + 1
 		if err := tarWriter.WriteFile(fnam[tmpOffset:], fnam, fi, false); err != nil {
 			tarWriter.Close()
-			logger.Debugf("Error writing to tarfile: %s", err)
-			logger.Error("Failed exporting instance", ctxMap)
+			d.common.logger.Debug(fmt.Sprintf("Error writing to tarfile: %s", err))
+			d.common.logger.Error("Failed exporting instance", ctxMap)
 			return meta, err
 		}
 	} else {
@@ -4562,14 +4564,14 @@ func (d *lxc) Export(w io.Writer, properties map[string]string) (api.ImageMetada
 		content, err := ioutil.ReadFile(fnam)
 		if err != nil {
 			tarWriter.Close()
-			logger.Error("Failed exporting instance", ctxMap)
+			d.common.logger.Error("Failed exporting instance", ctxMap)
 			return meta, err
 		}
 
 		err = yaml.Unmarshal(content, &meta)
 		if err != nil {
 			tarWriter.Close()
-			logger.Error("Failed exporting instance", ctxMap)
+			d.common.logger.Error("Failed exporting instance", ctxMap)
 			return meta, err
 		}
 
@@ -4580,7 +4582,7 @@ func (d *lxc) Export(w io.Writer, properties map[string]string) (api.ImageMetada
 			tempDir, err := ioutil.TempDir("", "lxd_lxd_metadata_")
 			if err != nil {
 				tarWriter.Close()
-				logger.Error("Failed exporting instance", ctxMap)
+				d.common.logger.Error("Failed exporting instance", ctxMap)
 				return meta, err
 			}
 			defer os.RemoveAll(tempDir)
@@ -4588,7 +4590,7 @@ func (d *lxc) Export(w io.Writer, properties map[string]string) (api.ImageMetada
 			data, err := yaml.Marshal(&meta)
 			if err != nil {
 				tarWriter.Close()
-				logger.Error("Failed exporting instance", ctxMap)
+				d.common.logger.Error("Failed exporting instance", ctxMap)
 				return meta, err
 			}
 
@@ -4597,7 +4599,7 @@ func (d *lxc) Export(w io.Writer, properties map[string]string) (api.ImageMetada
 			err = ioutil.WriteFile(fnam, data, 0644)
 			if err != nil {
 				tarWriter.Close()
-				logger.Error("Failed exporting instance", ctxMap)
+				d.common.logger.Error("Failed exporting instance", ctxMap)
 				return meta, err
 			}
 		}
@@ -4606,8 +4608,8 @@ func (d *lxc) Export(w io.Writer, properties map[string]string) (api.ImageMetada
 		fi, err := os.Lstat(fnam)
 		if err != nil {
 			tarWriter.Close()
-			logger.Debugf("Error statting %s during export", fnam)
-			logger.Error("Failed exporting instance", ctxMap)
+			d.common.logger.Debug(fmt.Sprintf("Error statting %s during export", fnam))
+			d.common.logger.Error("Failed exporting instance", ctxMap)
 			return meta, err
 		}
 
@@ -4619,8 +4621,8 @@ func (d *lxc) Export(w io.Writer, properties map[string]string) (api.ImageMetada
 		}
 		if err != nil {
 			tarWriter.Close()
-			logger.Debugf("Error writing to tarfile: %s", err)
-			logger.Error("Failed exporting instance", ctxMap)
+			d.common.logger.Debug(fmt.Sprintf("Error writing to tarfile: %s", err))
+			d.common.logger.Error("Failed exporting instance", ctxMap)
 			return meta, err
 		}
 	}
@@ -4629,7 +4631,7 @@ func (d *lxc) Export(w io.Writer, properties map[string]string) (api.ImageMetada
 	fnam = d.RootfsPath()
 	err = filepath.Walk(fnam, writeToTar)
 	if err != nil {
-		logger.Error("Failed exporting instance", ctxMap)
+		d.common.logger.Error("Failed exporting instance", ctxMap)
 		return meta, err
 	}
 
@@ -4638,18 +4640,18 @@ func (d *lxc) Export(w io.Writer, properties map[string]string) (api.ImageMetada
 	if shared.PathExists(fnam) {
 		err = filepath.Walk(fnam, writeToTar)
 		if err != nil {
-			logger.Error("Failed exporting instance", ctxMap)
+			d.common.logger.Error("Failed exporting instance", ctxMap)
 			return meta, err
 		}
 	}
 
 	err = tarWriter.Close()
 	if err != nil {
-		logger.Error("Failed exporting instance", ctxMap)
+		d.common.logger.Error("Failed exporting instance", ctxMap)
 		return meta, err
 	}
 
-	logger.Info("Exported instance", ctxMap)
+	d.common.logger.Info("Exported instance", ctxMap)
 	return meta, nil
 }
 
@@ -4698,7 +4700,7 @@ func (d *lxc) Migrate(args *instance.CriuMigrationArgs) error {
 		return fmt.Errorf("Unable to perform container live migration. CRIU isn't installed")
 	}
 
-	logger.Info("Migrating container", ctxMap)
+	d.common.logger.Info("Migrating container", ctxMap)
 
 	prettyCmd := ""
 	switch args.Cmd {
@@ -4712,7 +4714,7 @@ func (d *lxc) Migrate(args *instance.CriuMigrationArgs) error {
 		prettyCmd = "feature-check"
 	default:
 		prettyCmd = "unknown"
-		logger.Warn("Unknown migrate call", log.Ctx{"cmd": args.Cmd})
+		d.common.logger.Warn("Unknown migrate call", log.Ctx{"cmd": args.Cmd})
 	}
 
 	pool, err := d.getStoragePool()
@@ -4807,7 +4809,7 @@ func (d *lxc) Migrate(args *instance.CriuMigrationArgs) error {
 		}
 		migrateErr = d.c.Migrate(args.Cmd, opts)
 		if migrateErr != nil {
-			logger.Info("CRIU feature check failed", ctxMap)
+			d.common.logger.Info("CRIU feature check failed", ctxMap)
 			return migrateErr
 		}
 		return nil
@@ -4856,20 +4858,20 @@ func (d *lxc) Migrate(args *instance.CriuMigrationArgs) error {
 
 	collectErr := collectCRIULogFile(d, finalStateDir, args.Function, prettyCmd)
 	if collectErr != nil {
-		logger.Error("Error collecting checkpoint log file", log.Ctx{"err": collectErr})
+		d.common.logger.Error("Error collecting checkpoint log file", log.Ctx{"err": collectErr})
 	}
 
 	if migrateErr != nil {
 		log, err2 := getCRIULogErrors(finalStateDir, prettyCmd)
 		if err2 == nil {
-			logger.Info("Failed migrating container", ctxMap)
+			d.common.logger.Info("Failed migrating container", ctxMap)
 			migrateErr = fmt.Errorf("%s %s failed\n%s", args.Function, prettyCmd, log)
 		}
 
 		return migrateErr
 	}
 
-	logger.Info("Migrated container", ctxMap)
+	d.common.logger.Info("Migrated container", ctxMap)
 
 	return nil
 }
@@ -5075,7 +5077,7 @@ func (d *lxc) FileExists(path string) error {
 		}
 
 		for _, line := range strings.Split(strings.TrimRight(stderr, "\n"), "\n") {
-			logger.Debugf("forkcheckfile: %s", line)
+			d.common.logger.Debug(fmt.Sprintf("forkcheckfile: %s", line))
 		}
 	}
 
@@ -5187,7 +5189,7 @@ func (d *lxc) FilePull(srcpath string, dstpath string) (int64, int64, os.FileMod
 			continue
 		}
 
-		logger.Debugf("forkgetfile: %s", line)
+		d.common.logger.Debug(fmt.Sprintf("forkgetfile: %s", line))
 	}
 
 	if err != nil {
@@ -5507,10 +5509,10 @@ func (d *lxc) Exec(req api.InstanceExecPost, stdin *os.File, stdout *os.File, st
 
 	attachedPid := shared.ReadPid(rStatus)
 	if attachedPid <= 0 {
-		logger.Errorf("Failed to retrieve PID of executing child process")
+		d.common.logger.Error("Failed to retrieve PID of executing child process")
 		return nil, fmt.Errorf("Failed to retrieve PID of executing child process")
 	}
-	logger.Debugf("Retrieved PID %d of executing child process", attachedPid)
+	d.common.logger.Debug(fmt.Sprintf("Retrieved PID %d of executing child process", attachedPid))
 
 	instCmd := &lxcCmd{
 		cmd:              &cmd,
@@ -5557,28 +5559,28 @@ func (d *lxc) diskState() map[string]api.InstanceStateDisk {
 		if dev.Config["path"] == "/" {
 			pool, err := storagePools.GetPoolByInstance(d.state, d)
 			if err != nil {
-				logger.Error("Error loading storage pool", log.Ctx{"project": d.Project(), "instance": d.Name(), "err": err})
+				d.common.logger.Error("Error loading storage pool", log.Ctx{"project": d.Project(), "instance": d.Name(), "err": err})
 				continue
 			}
 
 			usage, err = pool.GetInstanceUsage(d)
 			if err != nil {
 				if err != storageDrivers.ErrNotSupported {
-					logger.Error("Error getting disk usage", log.Ctx{"project": d.Project(), "instance": d.Name(), "err": err})
+					d.common.logger.Error("Error getting disk usage", log.Ctx{"project": d.Project(), "instance": d.Name(), "err": err})
 				}
 				continue
 			}
 		} else if dev.Config["pool"] != "" {
 			pool, err := storagePools.GetPoolByName(d.state, dev.Config["pool"])
 			if err != nil {
-				logger.Error("Error loading storage pool", log.Ctx{"project": d.Project(), "poolName": dev.Config["pool"], "err": err})
+				d.common.logger.Error("Error loading storage pool", log.Ctx{"project": d.Project(), "poolName": dev.Config["pool"], "err": err})
 				continue
 			}
 
 			usage, err = pool.GetCustomVolumeUsage(d.Project(), dev.Config["source"])
 			if err != nil {
 				if err != storageDrivers.ErrNotSupported {
-					logger.Error("Error getting volume usage", log.Ctx{"project": d.Project(), "volume": dev.Config["source"], "err": err})
+					d.common.logger.Error("Error getting volume usage", log.Ctx{"project": d.Project(), "volume": dev.Config["source"], "err": err})
 				}
 				continue
 			}
@@ -5651,7 +5653,7 @@ func (d *lxc) networkState() map[string]api.InstanceStateNetwork {
 		nw, err := netutils.NetnsGetifaddrs(int32(pid))
 		if err != nil {
 			couldUseNetnsGetifaddrs = false
-			logger.Error("Failed to retrieve network information via netlink", log.Ctx{"container": d.name, "pid": pid})
+			d.common.logger.Error("Failed to retrieve network information via netlink", log.Ctx{"container": d.name, "pid": pid})
 		} else {
 			result = nw
 		}
@@ -5676,7 +5678,7 @@ func (d *lxc) networkState() map[string]api.InstanceStateNetwork {
 
 		// Process forkgetnet response
 		if err != nil {
-			logger.Error("Error calling 'lxd forknet", log.Ctx{"container": d.name, "err": err, "pid": pid})
+			d.common.logger.Error("Error calling 'lxd forknet", log.Ctx{"container": d.name, "err": err, "pid": pid})
 			return result
 		}
 
@@ -5688,7 +5690,7 @@ func (d *lxc) networkState() map[string]api.InstanceStateNetwork {
 		nw := map[string]api.InstanceStateNetwork{}
 		err = json.Unmarshal([]byte(out), &nw)
 		if err != nil {
-			logger.Error("Failure to read forknet json", log.Ctx{"container": d.name, "err": err})
+			d.common.logger.Error("Failure to read forknet json", log.Ctx{"container": d.name, "err": err})
 			return result
 		}
 		result = nw
@@ -6072,7 +6074,7 @@ func (d *lxc) removeUnixDevices() error {
 		devicePath := filepath.Join(d.DevicesPath(), f.Name())
 		err := os.Remove(devicePath)
 		if err != nil {
-			logger.Error("Failed removing unix device", log.Ctx{"err": err, "path": devicePath})
+			d.common.logger.Error("Failed removing unix device", log.Ctx{"err": err, "path": devicePath})
 		}
 	}
 
@@ -6271,7 +6273,7 @@ func (d *lxc) removeDiskDevices() error {
 		diskPath := filepath.Join(d.DevicesPath(), f.Name())
 		err := os.Remove(diskPath)
 		if err != nil {
-			logger.Error("Failed to remove disk device path", log.Ctx{"err": err, "path": diskPath})
+			d.common.logger.Error("Failed to remove disk device path", log.Ctx{"err": err, "path": diskPath})
 		}
 	}
 
diff --git a/lxd/instance/drivers/driver_qemu.go b/lxd/instance/drivers/driver_qemu.go
index 13bae74f7e..64001ae2ef 100644
--- a/lxd/instance/drivers/driver_qemu.go
+++ b/lxd/instance/drivers/driver_qemu.go
@@ -113,6 +113,7 @@ func qemuInstantiate(s *state.State, args db.InstanceArgs, expandedDevices devic
 			lastUsedDate: args.LastUsedDate,
 			localConfig:  args.Config,
 			localDevices: args.Devices,
+			logger:       logging.AddContext(logger.Log, log.Ctx{"instanceType": args.Type.String, "instanceName": args.Name}),
 			name:         args.Name,
 			node:         args.Node,
 			profiles:     args.Profiles,
@@ -166,6 +167,7 @@ func qemuCreate(s *state.State, args db.InstanceArgs) (instance.Instance, error)
 			lastUsedDate: args.LastUsedDate,
 			localConfig:  args.Config,
 			localDevices: args.Devices,
+			logger:       logging.AddContext(logger.Log, log.Ctx{"instanceType": args.Type.String, "instanceName": args.Name}),
 			name:         args.Name,
 			node:         args.Node,
 			profiles:     args.Profiles,
@@ -207,7 +209,7 @@ func qemuCreate(s *state.State, args db.InstanceArgs) (instance.Instance, error)
 		"ephemeral": d.ephemeral,
 	}
 
-	logger.Info("Creating instance", ctxMap)
+	d.common.logger.Info("Creating instance", ctxMap)
 
 	// Load the config.
 	err = d.init()
@@ -290,7 +292,7 @@ func qemuCreate(s *state.State, args db.InstanceArgs) (instance.Instance, error)
 		}
 	}
 
-	logger.Info("Created instance", ctxMap)
+	d.common.logger.Info("Created instance", ctxMap)
 	d.state.Events.SendLifecycle(d.project, "virtual-machine-created", fmt.Sprintf("/1.0/virtual-machines/%s", d.name), nil)
 
 	revert.Success()
@@ -358,7 +360,7 @@ func (d *qemu) getMonitorEventHandler() func(event string, data map[string]inter
 
 		inst, err := instance.LoadByProjectAndName(state, projectName, instanceName)
 		if err != nil {
-			logger.Error("Failed to load instance", "project", projectName, "instance", instanceName, "err", err)
+			d.common.logger.Error("Failed to load instance", "project", projectName, "instance", instanceName, "err", err)
 			return
 		}
 
@@ -371,7 +373,7 @@ func (d *qemu) getMonitorEventHandler() func(event string, data map[string]inter
 
 			err = inst.(*qemu).onStop(target)
 			if err != nil {
-				logger.Error("Failed to cleanly stop instance", "project", projectName, "instance", instanceName, "err", err)
+				d.common.logger.Error("Failed to cleanly stop instance", "project", projectName, "instance", instanceName, "err", err)
 				return
 			}
 		}
@@ -790,7 +792,7 @@ func (d *qemu) Start(stateful bool) error {
 			return err
 		}
 	} else {
-		logger.Warn("Unable to use virtio-fs for config drive, using 9p as a fallback: virtiofsd missing")
+		d.common.logger.Warn("Unable to use virtio-fs for config drive, using 9p as a fallback: virtiofsd missing")
 	}
 
 	// Generate UUID if not present.
@@ -830,7 +832,7 @@ func (d *qemu) Start(stateful bool) error {
 		revert.Add(func() {
 			err := d.deviceStop(dev.Name, dev.Config, false)
 			if err != nil {
-				logger.Errorf("Failed to cleanup device %q: %v", dev.Name, err)
+				d.common.logger.Error(fmt.Sprintf("Failed to cleanup device %q: %v", dev.Name, err))
 			}
 		})
 
@@ -1017,7 +1019,7 @@ func (d *qemu) Start(stateful bool) error {
 
 	pid, err := d.pid()
 	if err != nil {
-		logger.Errorf(`Failed to get VM process ID "%d"`, pid)
+		d.common.logger.Error(fmt.Sprintf(`Failed to get VM process ID "%d"`, pid))
 		op.Done(err)
 		return err
 	}
@@ -1025,13 +1027,13 @@ func (d *qemu) Start(stateful bool) error {
 	revert.Add(func() {
 		proc, err := os.FindProcess(pid)
 		if err != nil {
-			logger.Errorf(`Failed to find VM process "%d"`, pid)
+			d.common.logger.Error(fmt.Sprintf(`Failed to find VM process "%d"`, pid))
 			return
 		}
 
 		proc.Kill()
 		if err != nil {
-			logger.Errorf(`Failed to kill VM process "%d"`, pid)
+			d.common.logger.Error(fmt.Sprintf(`Failed to kill VM process "%d"`, pid))
 		}
 	})
 
@@ -1216,14 +1218,14 @@ func (d *qemu) RegisterDevices() {
 		}
 
 		if err != nil {
-			logger.Error("Failed to load device to register", log.Ctx{"err": err, "instance": d.Name(), "device": entry.Name})
+			d.common.logger.Error("Failed to load device to register", log.Ctx{"err": err, "instance": d.Name(), "device": entry.Name})
 			continue
 		}
 
 		// Check whether device wants to register for any events.
 		err = dev.Register()
 		if err != nil {
-			logger.Error("Failed to register device", log.Ctx{"err": err, "instance": d.Name(), "device": entry.Name})
+			d.common.logger.Error("Failed to register device", log.Ctx{"err": err, "instance": d.Name(), "device": entry.Name})
 			continue
 		}
 	}
@@ -1401,7 +1403,7 @@ func (d *qemu) generateConfigShare() error {
 	// Add the VM agent.
 	path, err := exec.LookPath("lxd-agent")
 	if err != nil {
-		logger.Warnf("lxd-agent not found, skipping its inclusion in the VM config drive: %v", err)
+		d.common.logger.Warn(fmt.Sprintf("lxd-agent not found, skipping its inclusion in the VM config drive: %v", err))
 	} else {
 		// Install agent into config drive dir if found.
 		path, err = filepath.EvalSymlinks(path)
@@ -2272,7 +2274,7 @@ func (d *qemu) addDriveConfig(sb *strings.Builder, bootIndexes map[string]int, d
 
 	// If drive config indicates we need to use unsafe I/O then use it.
 	if shared.StringInSlice(qemuUnsafeIO, driveConf.Opts) {
-		logger.Warnf("Using unsafe cache I/O with %s", driveConf.DevPath)
+		d.common.logger.Warn(fmt.Sprintf("Using unsafe cache I/O with %s", driveConf.DevPath))
 		aioMode = "threads"
 		cacheMode = "unsafe" // Use host cache, but ignore all sync requests from guest.
 	} else if shared.PathExists(driveConf.DevPath) && !shared.IsBlockdevPath(driveConf.DevPath) {
@@ -2287,7 +2289,7 @@ func (d *qemu) addDriveConfig(sb *strings.Builder, bootIndexes map[string]int, d
 		if fsType == "zfs" || fsType == "btrfs" {
 			if driveConf.FSType != "iso9660" {
 				// Only warn about using writeback cache if the drive image is writable.
-				logger.Warnf("Using writeback cache I/O with %q as backing filesystem is %q", driveConf.DevPath, fsType)
+				d.common.logger.Warn(fmt.Sprintf("Using writeback cache I/O with %q as backing filesystem is %q", driveConf.DevPath, fsType))
 			}
 
 			aioMode = "threads"
@@ -2683,7 +2685,7 @@ func (d *qemu) Restore(source instance.Instance, stateful bool) error {
 		"used":      d.lastUsedDate,
 		"source":    source.Name()}
 
-	logger.Info("Restoring instance", ctxMap)
+	d.common.logger.Info("Restoring instance", ctxMap)
 
 	// Load the storage driver.
 	pool, err := storagePools.GetPoolByInstance(d.state, d)
@@ -2720,7 +2722,7 @@ func (d *qemu) Restore(source instance.Instance, stateful bool) error {
 	// Don't pass as user-requested as there's no way to fix a bad config.
 	err = d.Update(args, false)
 	if err != nil {
-		logger.Error("Failed restoring instance configuration", ctxMap)
+		d.common.logger.Error("Failed restoring instance configuration", ctxMap)
 		return err
 	}
 
@@ -2735,11 +2737,11 @@ func (d *qemu) Restore(source instance.Instance, stateful bool) error {
 
 	// Restart the insance.
 	if wasRunning {
-		logger.Info("Restored instance", ctxMap)
+		d.common.logger.Info("Restored instance", ctxMap)
 		return d.Start(false)
 	}
 
-	logger.Info("Restored instance", ctxMap)
+	d.common.logger.Info("Restored instance", ctxMap)
 	return nil
 }
 
@@ -2754,7 +2756,7 @@ func (d *qemu) Rename(newName string) error {
 		"used":      d.lastUsedDate,
 		"newname":   newName}
 
-	logger.Info("Renaming instance", ctxMap)
+	d.common.logger.Info("Renaming instance", ctxMap)
 
 	// Sanity checks.
 	err := instance.ValidName(newName, d.IsSnapshot())
@@ -2791,7 +2793,7 @@ func (d *qemu) Rename(newName string) error {
 		// Rename all the instance snapshot database entries.
 		results, err := d.state.Cluster.GetInstanceSnapshotsNames(d.project, oldName)
 		if err != nil {
-			logger.Error("Failed to get instance snapshots", ctxMap)
+			d.common.logger.Error("Failed to get instance snapshots", ctxMap)
 			return err
 		}
 
@@ -2803,7 +2805,7 @@ func (d *qemu) Rename(newName string) error {
 				return tx.RenameInstanceSnapshot(d.project, oldName, oldSnapName, baseSnapName)
 			})
 			if err != nil {
-				logger.Error("Failed renaming snapshot", ctxMap)
+				d.common.logger.Error("Failed renaming snapshot", ctxMap)
 				return err
 			}
 		}
@@ -2820,7 +2822,7 @@ func (d *qemu) Rename(newName string) error {
 		return tx.RenameInstance(d.project, oldName, newName)
 	})
 	if err != nil {
-		logger.Error("Failed renaming instance", ctxMap)
+		d.common.logger.Error("Failed renaming instance", ctxMap)
 		return err
 	}
 
@@ -2830,7 +2832,7 @@ func (d *qemu) Rename(newName string) error {
 	if shared.PathExists(d.LogPath()) {
 		err := os.Rename(d.LogPath(), shared.LogPath(newFullName))
 		if err != nil {
-			logger.Error("Failed renaming instance", ctxMap)
+			d.common.logger.Error("Failed renaming instance", ctxMap)
 			return err
 		}
 	}
@@ -2878,7 +2880,7 @@ func (d *qemu) Rename(newName string) error {
 		return err
 	}
 
-	logger.Info("Renamed instance", ctxMap)
+	d.common.logger.Info("Renamed instance", ctxMap)
 
 	if d.IsSnapshot() {
 		d.state.Events.SendLifecycle(d.project, "virtual-machine-snapshot-renamed",
@@ -3344,7 +3346,7 @@ func (d *qemu) updateDevices(removeDevices deviceConfig.Devices, addDevices devi
 
 			// If update is non-user requested (i.e from a snapshot restore), there's nothing we can
 			// do to fix the config and we don't want to prevent the snapshot restore so log and allow.
-			logger.Error("Failed to add device, skipping as non-user requested", log.Ctx{"project": d.Project(), "instance": d.Name(), "device": dev.Name, "err": err})
+			d.common.logger.Error("Failed to add device, skipping as non-user requested", log.Ctx{"project": d.Project(), "instance": d.Name(), "device": dev.Name, "err": err})
 			continue
 		}
 
@@ -3452,7 +3454,7 @@ func (d *qemu) removeUnixDevices() error {
 		devicePath := filepath.Join(d.DevicesPath(), f.Name())
 		err := os.Remove(devicePath)
 		if err != nil {
-			logger.Error("Failed removing unix device", log.Ctx{"err": err, "path": devicePath})
+			d.common.logger.Error("Failed removing unix device", log.Ctx{"err": err, "path": devicePath})
 		}
 	}
 
@@ -3485,7 +3487,7 @@ func (d *qemu) removeDiskDevices() error {
 		diskPath := filepath.Join(d.DevicesPath(), f.Name())
 		err := os.Remove(diskPath)
 		if err != nil {
-			logger.Error("Failed to remove disk device path", log.Ctx{"err": err, "path": diskPath})
+			d.common.logger.Error("Failed to remove disk device path", log.Ctx{"err": err, "path": diskPath})
 		}
 	}
 
@@ -3515,7 +3517,7 @@ func (d *qemu) cleanupDevices() {
 		if err == device.ErrUnsupportedDevType {
 			continue
 		} else if err != nil {
-			logger.Errorf("Failed to stop device '%s': %v", dev.Name, err)
+			d.common.logger.Error(fmt.Sprintf("Failed to stop device '%s': %v", dev.Name, err))
 		}
 	}
 }
@@ -3544,7 +3546,7 @@ func (d *qemu) Delete(force bool) error {
 		"ephemeral": d.ephemeral,
 		"used":      d.lastUsedDate}
 
-	logger.Info("Deleting instance", ctxMap)
+	d.common.logger.Info("Deleting instance", ctxMap)
 
 	// Check if instance is delete protected.
 	if !force && shared.IsTrue(d.expandedConfig["security.protection.delete"]) && !d.IsSnapshot() {
@@ -3604,7 +3606,7 @@ func (d *qemu) Delete(force bool) error {
 		// Delete the MAAS entry.
 		err = d.maasDelete()
 		if err != nil {
-			logger.Error("Failed deleting instance MAAS record", log.Ctx{"project": d.Project(), "instance": d.Name(), "err": err})
+			d.common.logger.Error("Failed deleting instance MAAS record", log.Ctx{"project": d.Project(), "instance": d.Name(), "err": err})
 			return err
 		}
 
@@ -3622,11 +3624,11 @@ func (d *qemu) Delete(force bool) error {
 
 	// Remove the database record of the instance or snapshot instance.
 	if err := d.state.Cluster.DeleteInstance(d.Project(), d.Name()); err != nil {
-		logger.Error("Failed deleting instance entry", log.Ctx{"project": d.Project(), "instance": d.Name(), "err": err})
+		d.common.logger.Error("Failed deleting instance entry", log.Ctx{"project": d.Project(), "instance": d.Name(), "err": err})
 		return err
 	}
 
-	logger.Info("Deleted instance", ctxMap)
+	d.common.logger.Info("Deleted instance", ctxMap)
 
 	if d.IsSnapshot() {
 		d.state.Events.SendLifecycle(d.project, "virtual-machine-snapshot-deleted",
@@ -3698,12 +3700,12 @@ func (d *qemu) Export(w io.Writer, properties map[string]string) (api.ImageMetad
 		return meta, fmt.Errorf("Cannot export a running instance as an image")
 	}
 
-	logger.Info("Exporting instance", ctxMap)
+	d.common.logger.Info("Exporting instance", ctxMap)
 
 	// Start the storage.
 	mountInfo, err := d.mount()
 	if err != nil {
-		logger.Error("Failed exporting instance", ctxMap)
+		d.common.logger.Error("Failed exporting instance", ctxMap)
 		return meta, err
 	}
 	defer d.unmount()
@@ -3722,7 +3724,7 @@ func (d *qemu) Export(w io.Writer, properties map[string]string) (api.ImageMetad
 
 		err = tarWriter.WriteFile(path[offset:], path, fi, false)
 		if err != nil {
-			logger.Debugf("Error tarring up %s: %s", path, err)
+			d.common.logger.Debug(fmt.Sprintf("Error tarring up %s: %s", path, err))
 			return err
 		}
 
@@ -3736,7 +3738,7 @@ func (d *qemu) Export(w io.Writer, properties map[string]string) (api.ImageMetad
 		tempDir, err := ioutil.TempDir("", "lxd_lxd_metadata_")
 		if err != nil {
 			tarWriter.Close()
-			logger.Error("Failed exporting instance", ctxMap)
+			d.common.logger.Error("Failed exporting instance", ctxMap)
 			return meta, err
 		}
 		defer os.RemoveAll(tempDir)
@@ -3748,7 +3750,7 @@ func (d *qemu) Export(w io.Writer, properties map[string]string) (api.ImageMetad
 			parent, err := instance.LoadByProjectAndName(d.state, d.project, parentName)
 			if err != nil {
 				tarWriter.Close()
-				logger.Error("Failed exporting instance", ctxMap)
+				d.common.logger.Error("Failed exporting instance", ctxMap)
 				return meta, err
 			}
 
@@ -3760,7 +3762,7 @@ func (d *qemu) Export(w io.Writer, properties map[string]string) (api.ImageMetad
 		if arch == "" {
 			arch, err = osarch.ArchitectureName(d.state.OS.Architectures[0])
 			if err != nil {
-				logger.Error("Failed exporting instance", ctxMap)
+				d.common.logger.Error("Failed exporting instance", ctxMap)
 				return meta, err
 			}
 		}
@@ -3773,7 +3775,7 @@ func (d *qemu) Export(w io.Writer, properties map[string]string) (api.ImageMetad
 		data, err := yaml.Marshal(&meta)
 		if err != nil {
 			tarWriter.Close()
-			logger.Error("Failed exporting instance", ctxMap)
+			d.common.logger.Error("Failed exporting instance", ctxMap)
 			return meta, err
 		}
 
@@ -3782,21 +3784,21 @@ func (d *qemu) Export(w io.Writer, properties map[string]string) (api.ImageMetad
 		err = ioutil.WriteFile(fnam, data, 0644)
 		if err != nil {
 			tarWriter.Close()
-			logger.Error("Failed exporting instance", ctxMap)
+			d.common.logger.Error("Failed exporting instance", ctxMap)
 			return meta, err
 		}
 
 		fi, err := os.Lstat(fnam)
 		if err != nil {
 			tarWriter.Close()
-			logger.Error("Failed exporting instance", ctxMap)
+			d.common.logger.Error("Failed exporting instance", ctxMap)
 			return meta, err
 		}
 
 		tmpOffset := len(filepath.Dir(fnam)) + 1
 		if err := tarWriter.WriteFile(fnam[tmpOffset:], fnam, fi, false); err != nil {
 			tarWriter.Close()
-			logger.Error("Failed exporting instance", ctxMap)
+			d.common.logger.Error("Failed exporting instance", ctxMap)
 			return meta, err
 		}
 	} else {
@@ -3804,14 +3806,14 @@ func (d *qemu) Export(w io.Writer, properties map[string]string) (api.ImageMetad
 		content, err := ioutil.ReadFile(fnam)
 		if err != nil {
 			tarWriter.Close()
-			logger.Error("Failed exporting instance", ctxMap)
+			d.common.logger.Error("Failed exporting instance", ctxMap)
 			return meta, err
 		}
 
 		err = yaml.Unmarshal(content, &meta)
 		if err != nil {
 			tarWriter.Close()
-			logger.Error("Failed exporting instance", ctxMap)
+			d.common.logger.Error("Failed exporting instance", ctxMap)
 			return meta, err
 		}
 
@@ -3822,7 +3824,7 @@ func (d *qemu) Export(w io.Writer, properties map[string]string) (api.ImageMetad
 			tempDir, err := ioutil.TempDir("", "lxd_lxd_metadata_")
 			if err != nil {
 				tarWriter.Close()
-				logger.Error("Failed exporting instance", ctxMap)
+				d.common.logger.Error("Failed exporting instance", ctxMap)
 				return meta, err
 			}
 			defer os.RemoveAll(tempDir)
@@ -3830,7 +3832,7 @@ func (d *qemu) Export(w io.Writer, properties map[string]string) (api.ImageMetad
 			data, err := yaml.Marshal(&meta)
 			if err != nil {
 				tarWriter.Close()
-				logger.Error("Failed exporting instance", ctxMap)
+				d.common.logger.Error("Failed exporting instance", ctxMap)
 				return meta, err
 			}
 
@@ -3839,7 +3841,7 @@ func (d *qemu) Export(w io.Writer, properties map[string]string) (api.ImageMetad
 			err = ioutil.WriteFile(fnam, data, 0644)
 			if err != nil {
 				tarWriter.Close()
-				logger.Error("Failed exporting instance", ctxMap)
+				d.common.logger.Error("Failed exporting instance", ctxMap)
 				return meta, err
 			}
 		}
@@ -3848,8 +3850,8 @@ func (d *qemu) Export(w io.Writer, properties map[string]string) (api.ImageMetad
 		fi, err := os.Lstat(fnam)
 		if err != nil {
 			tarWriter.Close()
-			logger.Debugf("Error statting %s during export", fnam)
-			logger.Error("Failed exporting instance", ctxMap)
+			d.common.logger.Debug(fmt.Sprintf("Error statting %s during export", fnam))
+			d.common.logger.Error("Failed exporting instance", ctxMap)
 			return meta, err
 		}
 
@@ -3861,8 +3863,8 @@ func (d *qemu) Export(w io.Writer, properties map[string]string) (api.ImageMetad
 		}
 		if err != nil {
 			tarWriter.Close()
-			logger.Debugf("Error writing to tarfile: %s", err)
-			logger.Error("Failed exporting instance", ctxMap)
+			d.common.logger.Debug(fmt.Sprintf("Error writing to tarfile: %s", err))
+			d.common.logger.Error("Failed exporting instance", ctxMap)
 			return meta, err
 		}
 	}
@@ -3900,18 +3902,18 @@ func (d *qemu) Export(w io.Writer, properties map[string]string) (api.ImageMetad
 	if shared.PathExists(fnam) {
 		err = filepath.Walk(fnam, writeToTar)
 		if err != nil {
-			logger.Error("Failed exporting instance", ctxMap)
+			d.common.logger.Error("Failed exporting instance", ctxMap)
 			return meta, err
 		}
 	}
 
 	err = tarWriter.Close()
 	if err != nil {
-		logger.Error("Failed exporting instance", ctxMap)
+		d.common.logger.Error("Failed exporting instance", ctxMap)
 		return meta, err
 	}
 
-	logger.Info("Exported instance", ctxMap)
+	d.common.logger.Info("Exported instance", ctxMap)
 	return meta, nil
 }
 
@@ -3939,7 +3941,7 @@ func (d *qemu) FilePull(srcPath string, dstPath string) (int64, int64, os.FileMo
 
 	agent, err := lxdClient.ConnectLXDHTTP(nil, client)
 	if err != nil {
-		logger.Errorf("Failed to connect to lxd-agent on %s: %v", d.Name(), err)
+		d.common.logger.Error(fmt.Sprintf("Failed to connect to lxd-agent on %s: %v", d.Name(), err))
 		return 0, 0, 0, "", nil, fmt.Errorf("Failed to connect to lxd-agent")
 	}
 	defer agent.Disconnect()
@@ -3983,7 +3985,7 @@ func (d *qemu) FilePush(fileType string, srcPath string, dstPath string, uid int
 
 	agent, err := lxdClient.ConnectLXDHTTP(nil, client)
 	if err != nil {
-		logger.Errorf("Failed to connect to lxd-agent on %s: %v", d.Name(), err)
+		d.common.logger.Error(fmt.Sprintf("Failed to connect to lxd-agent on %s: %v", d.Name(), err))
 		return fmt.Errorf("Failed to connect to lxd-agent")
 	}
 	defer agent.Disconnect()
@@ -4124,7 +4126,7 @@ func (d *qemu) Exec(req api.InstanceExecPost, stdin *os.File, stdout *os.File, s
 
 	agent, err := lxdClient.ConnectLXDHTTP(nil, client)
 	if err != nil {
-		logger.Errorf("Failed to connect to lxd-agent on %s: %v", d.Name(), err)
+		d.common.logger.Error(fmt.Sprintf("Failed to connect to lxd-agent on %s: %v", d.Name(), err))
 		return nil, fmt.Errorf("Failed to connect to lxd-agent")
 	}
 	revert.Add(agent.Disconnect)
@@ -4322,7 +4324,7 @@ func (d *qemu) RenderState() (*api.InstanceState, error) {
 		status, err = d.agentGetState()
 		if err != nil {
 			if err != errQemuAgentOffline {
-				logger.Warn("Could not get VM state from agent", log.Ctx{"project": d.Project(), "instance": d.Name(), "err": err})
+				d.common.logger.Warn("Could not get VM state from agent", log.Ctx{"project": d.Project(), "instance": d.Name(), "err": err})
 			}
 
 			// Fallback data if agent is not reachable.
@@ -4336,7 +4338,7 @@ func (d *qemu) RenderState() (*api.InstanceState, error) {
 
 				dev, _, err := d.deviceLoad(k, m)
 				if err != nil {
-					logger.Warn("Could not load device", log.Ctx{"project": d.Project(), "instance": d.Name(), "device": k, "err": err})
+					d.common.logger.Warn("Could not load device", log.Ctx{"project": d.Project(), "instance": d.Name(), "device": k, "err": err})
 					continue
 				}
 
@@ -4390,7 +4392,7 @@ func (d *qemu) RenderState() (*api.InstanceState, error) {
 	status.StatusCode = statusCode
 	status.Disk, err = d.diskState()
 	if err != nil && err != storageDrivers.ErrNotSupported {
-		logger.Warn("Error getting disk usage", log.Ctx{"project": d.Project(), "instance": d.Name(), "err": err})
+		d.common.logger.Warn("Error getting disk usage", log.Ctx{"project": d.Project(), "instance": d.Name(), "err": err})
 	}
 
 	return status, nil
@@ -4931,7 +4933,7 @@ func (d *qemu) devlxdEventSend(eventType string, eventMessage interface{}) error
 
 	agent, err := lxdClient.ConnectLXDHTTP(nil, client)
 	if err != nil {
-		logger.Errorf("Failed to connect to lxd-agent on %s: %v", d.Name(), err)
+		d.common.logger.Error(fmt.Sprintf("Failed to connect to lxd-agent on %s: %v", d.Name(), err))
 		return fmt.Errorf("Failed to connect to lxd-agent")
 	}
 	defer agent.Disconnect()


More information about the lxc-devel mailing list