[lxc-devel] [lxd/master] Bugfixes

stgraber on Github lxc-bot at linuxcontainers.org
Thu Feb 23 07:06:44 UTC 2017


A non-text attachment was scrubbed...
Name: not available
Type: text/x-mailbox
Size: 301 bytes
Desc: not available
URL: <http://lists.linuxcontainers.org/pipermail/lxc-devel/attachments/20170223/54d5385d/attachment.bin>
-------------- next part --------------
From 4ff0d95761b016365f6f4d03af6126871a0c4624 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?St=C3=A9phane=20Graber?= <stgraber at ubuntu.com>
Date: Thu, 23 Feb 2017 00:56:38 -0500
Subject: [PATCH 1/2] tests: Fix mixed tabs/spaces
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Signed-off-by: Stéphane Graber <stgraber at ubuntu.com>
---
 test/suites/storage_profiles.sh | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/test/suites/storage_profiles.sh b/test/suites/storage_profiles.sh
index da27169..37c2f8c 100644
--- a/test/suites/storage_profiles.sh
+++ b/test/suites/storage_profiles.sh
@@ -13,12 +13,12 @@ test_storage_profiles() {
 
     HAS_ZFS="dir"
     if which zfs >/dev/null 2>&1; then
-	    HAS_ZFS="zfs"
+      HAS_ZFS="zfs"
     fi
 
     HAS_BTRFS="dir"
     if which zfs >/dev/null 2>&1; then
-	    HAS_BTRFS="btrfs"
+      HAS_BTRFS="btrfs"
     fi
 
     # shellcheck disable=SC1009

From 894b355fbf1d17dcd8a562d0e45435c71977c077 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?St=C3=A9phane=20Graber?= <stgraber at ubuntu.com>
Date: Thu, 23 Feb 2017 01:46:45 -0500
Subject: [PATCH 2/2] Cleanup root device validation
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

A few rules:
 - A container may only have a single root device.

 - A profile may only have a single root device.

 - A container can rely on profiles for its root device, but the first
   rule still applies.

 - Removing a root device from a profile that a container depends upon
   isn't allowed.

 - Adding a root device to a profile which a container uses but under a
   different name than the existing root device is allowed at the profile
   level but isn't allowed at the container level. The profile will get
   saved but the container will not start.

Signed-off-by: Stéphane Graber <stgraber at ubuntu.com>
---
 lxd/container.go                |  73 ++++++++++---------------
 lxd/container_lxc.go            |  98 ++++++++++++---------------------
 lxd/containers_post.go          | 116 ++++++++++++++++++++++------------------
 lxd/patches.go                  |  32 +++++------
 lxd/profiles_utils.go           |   7 +--
 test/suites/storage_profiles.sh |  19 +++----
 6 files changed, 148 insertions(+), 197 deletions(-)

diff --git a/lxd/container.go b/lxd/container.go
index 3b32f4e..6b15e5a 100644
--- a/lxd/container.go
+++ b/lxd/container.go
@@ -229,14 +229,28 @@ func isRootDiskDevice(device types.Device) bool {
 	return false
 }
 
-func containerGetRootDiskDevice(devices types.Devices) (string, types.Device) {
-	for devName, dev := range devices {
-		if isRootDiskDevice(dev) {
-			return devName, dev
+func containerGetRootDiskDevice(devices types.Devices) (string, types.Device, error) {
+	var devName string
+	var dev types.Device
+
+	count := 0
+	for n, d := range devices {
+		if isRootDiskDevice(d) {
+			count += 1
+			devName = n
+			dev = d
 		}
 	}
 
-	return "", types.Device{}
+	if count == 1 {
+		return devName, dev, nil
+	}
+
+	if count > 1 {
+		return "", types.Device{}, fmt.Errorf("More than one root device found.")
+	}
+
+	return "", types.Device{}, fmt.Errorf("No root device could be found.")
 }
 
 func containerValidDevices(devices types.Devices, profile bool, expanded bool) error {
@@ -330,9 +344,9 @@ func containerValidDevices(devices types.Devices, profile bool, expanded bool) e
 
 	// Checks on the expanded config
 	if expanded {
-		k, _ := containerGetRootDiskDevice(devices)
-		if k == "" {
-			return fmt.Errorf("Container is lacking rootfs entry")
+		_, _, err := containerGetRootDiskDevice(devices)
+		if err != nil {
+			return err
 		}
 	}
 
@@ -447,7 +461,6 @@ type container interface {
 
 	// FIXME: Those should be internal functions
 	// Needed for migration for now.
-	GetStoragePoolFromDevices() (string, error)
 	StorageStart() error
 	StorageStop() error
 	Storage() storage
@@ -681,37 +694,6 @@ func containerCreateInternal(d *Daemon, args containerArgs) (container, error) {
 		}
 	}
 
-	// Check that there are no contradicting root disk devices.
-	var profileRootDiskDevices []string
-	for _, pName := range args.Profiles {
-		_, p, err := dbProfileGet(d.db, pName)
-		if err != nil {
-			return nil, fmt.Errorf("Could not load profile '%s'.", pName)
-		}
-
-		k, v := containerGetRootDiskDevice(p.Devices)
-		if k != "" && v["pool"] == "" {
-			return nil, fmt.Errorf("A root disk device must have the \"pool\" property set.")
-		} else if k != "" && !shared.StringInSlice(k, profileRootDiskDevices) {
-			profileRootDiskDevices = append(profileRootDiskDevices, k)
-		}
-	}
-
-	k, newLocalRootDiskDevice := containerGetRootDiskDevice(args.Devices)
-	// Check whether container has a local root device with a "pool"
-	// property set.
-	if k != "" && newLocalRootDiskDevice["pool"] == "" {
-		return nil, fmt.Errorf("A root disk device must have the \"pool\" property set.")
-	} else if k == "" {
-		// Check whether the container's profiles provide a unique root
-		// device.
-		if len(profileRootDiskDevices) == 0 {
-			return nil, fmt.Errorf("Container relies on profile's root disk device but none was found")
-		} else if len(profileRootDiskDevices) > 1 {
-			return nil, fmt.Errorf("Container relies on profile's root disk device but conflicting devices were found")
-		}
-	}
-
 	// Create the container entry
 	id, err := dbContainerCreate(d.db, args)
 	if err != nil {
@@ -738,6 +720,7 @@ func containerCreateInternal(d *Daemon, args containerArgs) (container, error) {
 	args.CreationDate = dbArgs.CreationDate
 	args.LastUsedDate = dbArgs.LastUsedDate
 
+	// Setup the container struct and finish creation (storage and idmap)
 	c, err := containerLXCCreate(d, args)
 	if err != nil {
 		return nil, err
@@ -748,11 +731,9 @@ func containerCreateInternal(d *Daemon, args containerArgs) (container, error) {
 
 func containerConfigureInternal(c container) error {
 	// Find the root device
-	localDevices := c.LocalDevices()
-	_, rootDiskDevice := containerGetRootDiskDevice(localDevices)
-	if rootDiskDevice["pool"] == "" {
-		expandedDevices := c.ExpandedDevices()
-		_, rootDiskDevice = containerGetRootDiskDevice(expandedDevices)
+	_, rootDiskDevice, err := containerGetRootDiskDevice(c.ExpandedDevices())
+	if err != nil {
+		return err
 	}
 
 	if rootDiskDevice["size"] != "" {
@@ -768,7 +749,7 @@ func containerConfigureInternal(c container) error {
 		}
 	}
 
-	err := writeBackupFile(c)
+	err = writeBackupFile(c)
 	if err != nil {
 		return err
 	}
diff --git a/lxd/container_lxc.go b/lxd/container_lxc.go
index 1dd70d4..e0c1451 100644
--- a/lxd/container_lxc.go
+++ b/lxd/container_lxc.go
@@ -210,42 +210,42 @@ func containerLXCCreate(d *Daemon, args containerArgs) (container, error) {
 		return nil, err
 	}
 
-	// Retrieve the storage pool we've been given from the container's
-	// devices.
-	storagePool, err := c.GetStoragePoolFromDevices()
+	// Retrieve the container's storage pool
+	_, rootDiskDevice, err := containerGetRootDiskDevice(c.expandedDevices)
 	if err != nil {
+		c.Delete()
 		return nil, err
 	}
-	c.storagePool = storagePool
 
-	// Get the storage pool ID for the container.
-	poolID, pool, err := dbStoragePoolGet(d.db, c.storagePool)
-	if err != nil {
+	if rootDiskDevice["pool"] == "" {
 		c.Delete()
-		return nil, err
+		return nil, fmt.Errorf("The container's root device is missing the pool property.")
 	}
 
-	// Validate the requested storage volume configuration.
-	volumeConfig := map[string]string{}
-	err = storageVolumeValidateConfig(c.storagePool, volumeConfig, pool)
+	c.storagePool = rootDiskDevice["pool"]
+
+	// Get the storage pool ID for the container
+	poolID, pool, err := dbStoragePoolGet(d.db, c.storagePool)
 	if err != nil {
 		c.Delete()
 		return nil, err
 	}
 
+	// Fill in any default volume config
+	volumeConfig := map[string]string{}
 	err = storageVolumeFillDefault(c.storagePool, volumeConfig, pool)
 	if err != nil {
 		return nil, err
 	}
 
-	// Create a new database entry for the container's storage volume.
+	// Create a new database entry for the container's storage volume
 	_, err = dbStoragePoolVolumeCreate(d.db, args.Name, storagePoolVolumeTypeContainer, poolID, volumeConfig)
 	if err != nil {
 		c.Delete()
 		return nil, err
 	}
 
-	// Initialize the container storage.
+	// Initialize the container storage
 	cStorage, err := storagePoolVolumeContainerCreateInit(d, c.storagePool, args.Name)
 	if err != nil {
 		c.Delete()
@@ -254,21 +254,6 @@ func containerLXCCreate(d *Daemon, args containerArgs) (container, error) {
 	}
 	c.storage = cStorage
 
-	// Validate expanded config
-	err = containerValidConfig(d, c.expandedConfig, false, true)
-	if err != nil {
-		c.Delete()
-		shared.LogError("Failed creating container", ctxMap)
-		return nil, err
-	}
-
-	err = containerValidDevices(c.expandedDevices, false, true)
-	if err != nil {
-		c.Delete()
-		shared.LogError("Failed creating container", ctxMap)
-		return nil, err
-	}
-
 	// Setup initial idmap config
 	var idmap *shared.IdmapSet
 	base := 0
@@ -1386,22 +1371,6 @@ func (c *containerLXC) initLXC() error {
 	return nil
 }
 
-// Initialize the storage pool for this container: The devices of the container
-// are inspected for a suitable storage pool. This function is called by
-// containerLXCCreate() to detect a suitable storage pool.
-func (c *containerLXC) GetStoragePoolFromDevices() (string, error) {
-	_, rootDiskDevice := containerGetRootDiskDevice(c.localDevices)
-	if rootDiskDevice["pool"] == "" {
-		_, rootDiskDevice = containerGetRootDiskDevice(c.expandedDevices)
-	}
-	if rootDiskDevice["pool"] == "" {
-		return "", fmt.Errorf("No storage pool found in the containers devices.")
-	}
-
-	c.storagePool = rootDiskDevice["pool"]
-	return c.storagePool, nil
-}
-
 // This function is called on all non-create operations where an entry for the
 // container's storage volume will already exist in the database and so we can
 // retrieve it.
@@ -2713,16 +2682,15 @@ func (c *containerLXC) Delete() error {
 
 	shared.LogInfo("Deleting container", ctxMap)
 
-	// Initialize storage interface for the container.
-	err := c.initStorageInterface()
-	if err != nil {
-		return err
-	}
+	// Attempt to initialize storage interface for the container.
+	c.initStorageInterface()
 
 	if c.IsSnapshot() {
 		// Remove the snapshot
-		if err := c.storage.ContainerSnapshotDelete(c); err != nil {
-			shared.LogWarn("Failed to delete snapshot", log.Ctx{"name": c.Name(), "err": err})
+		if c.storage != nil {
+			if err := c.storage.ContainerSnapshotDelete(c); err != nil {
+				shared.LogWarn("Failed to delete snapshot", log.Ctx{"name": c.Name(), "err": err})
+			}
 		}
 	} else {
 		// Remove all snapshot
@@ -2734,7 +2702,7 @@ func (c *containerLXC) Delete() error {
 		c.cleanup()
 
 		// Delete the container from disk
-		if shared.PathExists(c.Path()) {
+		if shared.PathExists(c.Path()) && c.storage != nil {
 			if err := c.storage.ContainerDelete(c); err != nil {
 				shared.LogError("Failed deleting container storage", ctxMap)
 				return err
@@ -2743,19 +2711,23 @@ func (c *containerLXC) Delete() error {
 	}
 
 	// Remove the database record
-	if err = dbContainerRemove(c.daemon.db, c.Name()); err != nil {
+	if err := dbContainerRemove(c.daemon.db, c.Name()); err != nil {
 		shared.LogError("Failed deleting container entry", ctxMap)
 		return err
 	}
 
-	// Get the name of the storage pool the container is attached to. This
-	// reverse-engineering works because container names are globally
-	// unique.
-	poolID := c.storage.ContainerPoolIDGet()
-	// Remove volume from storage pool.
-	err = dbStoragePoolVolumeDelete(c.daemon.db, c.Name(), storagePoolVolumeTypeContainer, poolID)
-	if err != nil {
-		return err
+	// Remove the database entry for the pool device
+	if c.storage != nil {
+		// Get the name of the storage pool the container is attached to. This
+		// reverse-engineering works because container names are globally
+		// unique.
+		poolID := c.storage.ContainerPoolIDGet()
+
+		// Remove volume from storage pool.
+		err := dbStoragePoolVolumeDelete(c.daemon.db, c.Name(), storagePoolVolumeTypeContainer, poolID)
+		if err != nil {
+			return err
+		}
 	}
 
 	// Update network files
@@ -3249,7 +3221,7 @@ func (c *containerLXC) Update(args containerArgs, userRequested bool) error {
 	}
 
 	// Retrieve old root disk devices.
-	oldLocalRootDiskDeviceKey, oldLocalRootDiskDevice := containerGetRootDiskDevice(oldLocalDevices)
+	oldLocalRootDiskDeviceKey, oldLocalRootDiskDevice, _ := containerGetRootDiskDevice(oldLocalDevices)
 	var oldProfileRootDiskDevices []string
 	for k, v := range oldExpandedDevices {
 		if isRootDiskDevice(v) && k != oldLocalRootDiskDeviceKey && !shared.StringInSlice(k, oldProfileRootDiskDevices) {
@@ -3258,7 +3230,7 @@ func (c *containerLXC) Update(args containerArgs, userRequested bool) error {
 	}
 
 	// Retrieve new root disk devices.
-	newLocalRootDiskDeviceKey, newLocalRootDiskDevice := containerGetRootDiskDevice(c.localDevices)
+	newLocalRootDiskDeviceKey, newLocalRootDiskDevice, _ := containerGetRootDiskDevice(c.localDevices)
 	var newProfileRootDiskDevices []string
 	for k, v := range c.expandedDevices {
 		if isRootDiskDevice(v) && k != newLocalRootDiskDeviceKey && !shared.StringInSlice(k, newProfileRootDiskDevices) {
diff --git a/lxd/containers_post.go b/lxd/containers_post.go
index 19ab406..689ccc7 100644
--- a/lxd/containers_post.go
+++ b/lxd/containers_post.go
@@ -167,15 +167,20 @@ func createFromNone(d *Daemon, req *api.ContainersPost) Response {
 }
 
 func createFromMigration(d *Daemon, req *api.ContainersPost) Response {
+	// Validate migration mode
 	if req.Source.Mode != "pull" && req.Source.Mode != "push" {
 		return NotImplemented
 	}
 
+	var c container
+
+	// Parse the architecture name
 	architecture, err := osarch.ArchitectureId(req.Architecture)
 	if err != nil {
-		architecture = 0
+		return BadRequest(err)
 	}
 
+	// Prepare the container creation request
 	args := containerArgs{
 		Architecture: architecture,
 		BaseImage:    req.Source.BaseImage,
@@ -187,79 +192,63 @@ func createFromMigration(d *Daemon, req *api.ContainersPost) Response {
 		Profiles:     req.Profiles,
 	}
 
-	var c container
-	_, _, err = dbImageGet(d.db, req.Source.BaseImage, false, true)
+	// Grab the container's root device if one is specified
+	storagePool := ""
+	storagePoolProfile := ""
 
-	/* Only create a container from an image if we're going to
-	 * rsync over the top of it. In the case of a better file
-	 * transfer mechanism, let's just use that.
-	 *
-	 * TODO: we could invent some negotiation here, where if the
-	 * source and sink both have the same image, we can clone from
-	 * it, but we have to know before sending the snapshot that
-	 * we're sending the whole thing or just a delta from the
-	 * image, so one extra negotiation round trip is needed. An
-	 * alternative is to move actual container object to a later
-	 * point and just negotiate it over the migration control
-	 * socket. Anyway, it'll happen later :)
-	 */
+	localRootDiskDeviceKey, localRootDiskDevice, _ := containerGetRootDiskDevice(req.Devices)
+	if localRootDiskDeviceKey != "" {
+		storagePool = localRootDiskDevice["pool"]
+	}
 
-	localRootDiskDeviceKey, v := containerGetRootDiskDevice(req.Devices)
-	poolForCopyOrMove := ""
 	// Handle copying/moving between two storage-api LXD instances.
-	// FIXME(brauner): Atm, we do not let users target a specific pool to
-	// move to. So when we receive an invalid/non-existing storage pool, we
-	// simply set it to "" and perform the same pool-searching algorithm as
-	// in the non-storage-api to storage-api LXD instance case seen below.
-	// the container will receive on the remote.
-	if localRootDiskDeviceKey != "" && v["pool"] != "" {
-		_, err := dbStoragePoolGetID(d.db, v["pool"])
+	if storagePool != "" {
+		_, err := dbStoragePoolGetID(d.db, storagePool)
 		if err == NoSuchObjectError {
-			v["pool"] = ""
+			storagePool = ""
 		}
 	}
 
-	// Handle copying or moving containers between a non-storage-api and a
-	// storage-api LXD instance.
-	if localRootDiskDeviceKey == "" || localRootDiskDeviceKey != "" && v["pool"] == "" {
-		// Request came without a root disk device or without the pool
-		// property set. Try to retrieve this information from a
-		// profile. (No need to check for conflicting storage pool
-		// properties in the profiles. This will be handled by
-		// containerCreateInternal().)
+	// If we don't have a valid pool yet, look through profiles
+	if storagePool == "" {
 		for _, pName := range req.Profiles {
 			_, p, err := dbProfileGet(d.db, pName)
 			if err != nil {
 				return InternalError(err)
 			}
 
-			k, v := containerGetRootDiskDevice(p.Devices)
+			k, v, _ := containerGetRootDiskDevice(p.Devices)
 			if k != "" && v["pool"] != "" {
-				poolForCopyOrMove = v["pool"]
-				break
+				// Keep going as we want the last one in the profile chain
+				storagePool = v["pool"]
+				storagePoolProfile = pName
 			}
 		}
-		if poolForCopyOrMove == "" {
-			pools, err := dbStoragePools(d.db)
-			if err != nil {
-				return InternalError(err)
-			}
+	}
 
-			if len(pools) != 1 {
-				return InternalError(fmt.Errorf("No unique storage pool found."))
-			}
+	// If there is just a single pool in the database, use that
+	if storagePool == "" {
+		pools, err := dbStoragePools(d.db)
+		if err != nil {
+			return InternalError(err)
+		}
 
-			poolForCopyOrMove = pools[0]
+		if len(pools) == 1 {
+			storagePool = pools[0]
 		}
 	}
 
-	if localRootDiskDeviceKey == "" {
+	if storagePool == "" {
+		return BadRequest(fmt.Errorf("Can't find a storage pool for the container to use"))
+	}
+
+	if localRootDiskDeviceKey == "" && storagePoolProfile == "" {
 		// Give the container it's own local root disk device with a
 		// pool property.
 		rootDev := map[string]string{}
 		rootDev["type"] = "disk"
 		rootDev["path"] = "/"
-		rootDev["pool"] = poolForCopyOrMove
+		rootDev["pool"] = storagePool
 		if args.Devices == nil {
 			args.Devices = map[string]map[string]string{}
 		}
@@ -274,29 +263,50 @@ func createFromMigration(d *Daemon, req *api.ContainersPost) Response {
 			rootDevName = fmt.Sprintf("root%d", i)
 			continue
 		}
+
 		args.Devices["root"] = rootDev
-	} else if args.Devices[localRootDiskDeviceKey]["pool"] == "" {
-		// Give the container's root disk device a pool property.
-		args.Devices[localRootDiskDeviceKey]["pool"] = poolForCopyOrMove
+	} else if localRootDiskDeviceKey != "" && localRootDiskDevice["pool"] == "" {
+		args.Devices[localRootDiskDeviceKey]["pool"] = storagePool
 	}
 
+	/* Only create a container from an image if we're going to
+	 * rsync over the top of it. In the case of a better file
+	 * transfer mechanism, let's just use that.
+	 *
+	 * TODO: we could invent some negotiation here, where if the
+	 * source and sink both have the same image, we can clone from
+	 * it, but we have to know before sending the snapshot that
+	 * we're sending the whole thing or just a delta from the
+	 * image, so one extra negotiation round trip is needed. An
+	 * alternative is to move actual container object to a later
+	 * point and just negotiate it over the migration control
+	 * socket. Anyway, it'll happen later :)
+	 */
+	_, _, err = dbImageGet(d.db, req.Source.BaseImage, false, true)
 	if err != nil {
 		c, err = containerCreateAsEmpty(d, args)
 		if err != nil {
 			return InternalError(err)
 		}
 	} else {
+		// Retrieve the future storage pool
 		cM, err := containerLXCLoad(d, args)
 		if err != nil {
 			return InternalError(err)
 		}
 
-		_, err = cM.GetStoragePoolFromDevices()
+		_, rootDiskDevice, err := containerGetRootDiskDevice(cM.ExpandedDevices())
 		if err != nil {
 			return InternalError(err)
 		}
 
-		ps, err := storagePoolInit(d, cM.StoragePool())
+		if rootDiskDevice["pool"] == "" {
+			return BadRequest(fmt.Errorf("The container's root device is missing the pool property."))
+		}
+
+		storagePool = rootDiskDevice["pool"]
+
+		ps, err := storagePoolInit(d, storagePool)
 		if err != nil {
 			return InternalError(err)
 		}
diff --git a/lxd/patches.go b/lxd/patches.go
index 5592b8a..f8056cd 100644
--- a/lxd/patches.go
+++ b/lxd/patches.go
@@ -1227,11 +1227,12 @@ func updatePoolPropertyForAllObjects(d *Daemon, poolName string, allcontainers [
 			}
 
 			// Check for a root disk device entry
-			k, _ := containerGetRootDiskDevice(p.Devices)
+			k, _, _ := containerGetRootDiskDevice(p.Devices)
 			if k != "" {
 				if p.Devices[k]["pool"] != "" {
 					continue
 				}
+
 				p.Devices[k]["pool"] = poolName
 			} else if k == "" && pName == "default" {
 				// The default profile should have a valid root
@@ -1295,9 +1296,7 @@ func updatePoolPropertyForAllObjects(d *Daemon, poolName string, allcontainers [
 		}
 	}
 
-	// When no default profile is detected or some containers do not rely on
-	// the default profile for their root disk device, these containers will
-	// be given a valid local root disk device."
+	// Make sure all containers and snapshots have a valid disk configuration
 	for _, ct := range allcontainers {
 		c, err := containerLoadByName(d, ct)
 		if err != nil {
@@ -1320,26 +1319,17 @@ func updatePoolPropertyForAllObjects(d *Daemon, poolName string, allcontainers [
 			args.Ctype = cTypeRegular
 		}
 
-		// Check expanded devices for a valid root entry. If it exists,
-		// we skip this container.
+		// Check if the container already has a valid root device entry (profile or previous upgrade)
 		expandedDevices := c.ExpandedDevices()
-		k, _ := containerGetRootDiskDevice(expandedDevices)
-		if k != "" && expandedDevices[k]["pool"] != "" {
-			// On partial upgrade the container might already have a
-			// valid root disk device entry.
-			if expandedDevices[k]["pool"] == poolName {
-				continue
-			}
+		k, d, _ := containerGetRootDiskDevice(expandedDevices)
+		if k != "" && d["pool"] != "" {
+			continue
 		}
 
-		// Check for a local root disk device entry and set the pool
-		// property.
+		// Look for a local root device entry
 		localDevices := c.LocalDevices()
-		k, _ = containerGetRootDiskDevice(localDevices)
+		k, d, _ = containerGetRootDiskDevice(localDevices)
 		if k != "" {
-			if localDevices[k]["pool"] != "" {
-				continue
-			}
 			localDevices[k]["pool"] = poolName
 			args.Devices = localDevices
 		} else {
@@ -1352,12 +1342,14 @@ func updatePoolPropertyForAllObjects(d *Daemon, poolName string, allcontainers [
 			// is currently using under the name "root".
 			rootDevName := "root"
 			for i := 0; i < 100; i++ {
-				if localDevices[rootDevName] == nil {
+				if expandedDevices[rootDevName] == nil {
 					break
 				}
+
 				rootDevName = fmt.Sprintf("root%d", i)
 				continue
 			}
+
 			localDevices[rootDevName] = rootDev
 		}
 
diff --git a/lxd/profiles_utils.go b/lxd/profiles_utils.go
index 503daf1..c432b63 100644
--- a/lxd/profiles_utils.go
+++ b/lxd/profiles_utils.go
@@ -22,17 +22,18 @@ func doProfileUpdate(d *Daemon, name string, id int64, profile *api.Profile, req
 	containers := getContainersWithProfile(d, name)
 
 	// Check if the root device is supposed to be changed or removed.
-	oldProfileRootDiskDeviceKey, oldProfileRootDiskDevice := containerGetRootDiskDevice(profile.Devices)
-	_, newProfileRootDiskDevice := containerGetRootDiskDevice(req.Devices)
+	oldProfileRootDiskDeviceKey, oldProfileRootDiskDevice, _ := containerGetRootDiskDevice(profile.Devices)
+	_, newProfileRootDiskDevice, _ := containerGetRootDiskDevice(req.Devices)
 	if len(containers) > 0 &&
 		oldProfileRootDiskDevice["pool"] != "" &&
 		newProfileRootDiskDevice["pool"] == "" ||
 		(oldProfileRootDiskDevice["pool"] != newProfileRootDiskDevice["pool"]) {
+
 		// Check for containers using the device
 		for _, container := range containers {
 			// Check if the device is locally overridden
 			localDevices := container.LocalDevices()
-			k, v := containerGetRootDiskDevice(localDevices)
+			k, v, _ := containerGetRootDiskDevice(localDevices)
 			if k != "" && v["pool"] != "" {
 				continue
 			}
diff --git a/test/suites/storage_profiles.sh b/test/suites/storage_profiles.sh
index 37c2f8c..8e746af 100644
--- a/test/suites/storage_profiles.sh
+++ b/test/suites/storage_profiles.sh
@@ -46,7 +46,6 @@ test_storage_profiles() {
     for i in $(seq 1 3); do
       lxc launch testimage c"${i}" --profile dummy
     done
-    wait
 
     # Check that we can't remove or change the root disk device since containers
     # are actually using it.
@@ -55,7 +54,8 @@ test_storage_profiles() {
 
     # Give all the containers we started their own local root disk device.
     for i in $(seq 1 2); do
-      lxc config device add c"${i}" root disk path="/" pool="lxdtest-$(basename "${LXD_DIR}")-pool1"
+      ! lxc config device add c"${i}" root disk path="/" pool="lxdtest-$(basename "${LXD_DIR}")-pool1"
+      lxc config device add c"${i}" rootfs disk path="/" pool="lxdtest-$(basename "${LXD_DIR}")-pool1"
     done
 
     # Try to set new pool. This should fail since there is a single container
@@ -68,7 +68,8 @@ test_storage_profiles() {
     ! lxc profile device remove dummy rootfs
 
     # Give the last container a local root disk device.
-    lxc config device add c3 root disk path="/" pool="lxdtest-$(basename "${LXD_DIR}")-pool1"
+    ! lxc config device add c3 root disk path="/" pool="lxdtest-$(basename "${LXD_DIR}")-pool1"
+    lxc config device add c3 rootfs disk path="/" pool="lxdtest-$(basename "${LXD_DIR}")-pool1"
 
     # Try to set new pool. This should work since the container has a local disk
     lxc profile device set dummy rootfs pool "lxdtest-$(basename "${LXD_DIR}")-pool2"
@@ -78,7 +79,7 @@ test_storage_profiles() {
     lxc profile device remove dummy rootfs
 
     # Add back a root device to the profile.
-    lxc profile device add dummy rootfs1 disk path="/" pool="lxdtest-$(basename "${LXD_DIR}")-pool1"
+    ! lxc profile device add dummy rootfs1 disk path="/" pool="lxdtest-$(basename "${LXD_DIR}")-pool1"
 
     # Try to add another root device to the profile that tries to set a pool
     # property. This should fail. This is also a test for whether it is possible
@@ -101,13 +102,8 @@ test_storage_profiles() {
     # contradicting root devices.
     ! lxc launch testimage cConflictingProfiles --p dummy -p dummyDup -p dummyNoDup
 
-    # Verify that we can create a container with profiles that have
-    # contradicting root devices if the container has a local root device set.
-    lxc launch testimage cConflictingProfiles2 -s "lxdtest-$(basename "${LXD_DIR}")-pool2" -p dummy -p dummyDup -p dummyNoDup 
-
-    # Verify that we cannot remove the local root disk device if the profiles
-    # have contradicting root disk devices.
-    ! lxc config device remove cConflictingProfiles2 root
+    # And that even with a local disk, a container can't have multiple root devices
+    ! lxc launch testimage cConflictingProfiles -s "lxdtest-$(basename "${LXD_DIR}")-pool2" -p dummy -p dummyDup -p dummyNoDup 
 
     # Check that we cannot assign conflicting profiles to a container that
     # relies on another profiles root disk device.
@@ -132,7 +128,6 @@ test_storage_profiles() {
       ! lxc profile assign c"${i}" dummyDup,dummyNoDup
     done
 
-    lxc delete -f cConflictingProfiles2
     lxc delete -f cNonConflictingProfiles
     lxc delete -f cOnDefault
     for i in $(seq 1 3); do


More information about the lxc-devel mailing list