[lxc-devel] [lxd/master] storage: verify root disk devices in profiles
brauner on Github
lxc-bot at linuxcontainers.org
Sat Feb 18 14:33:34 UTC 2017
A non-text attachment was scrubbed...
Name: not available
Type: text/x-mailbox
Size: 301 bytes
Desc: not available
URL: <http://lists.linuxcontainers.org/pipermail/lxc-devel/attachments/20170218/82723806/attachment.bin>
-------------- next part --------------
From daf02e678e3a436ee0e6677e4cd2b5b634efcd65 Mon Sep 17 00:00:00 2001
From: Christian Brauner <christian.brauner at ubuntu.com>
Date: Fri, 17 Feb 2017 18:20:07 +0100
Subject: [PATCH 1/8] lxd/container: path may only be used by one disk
Signed-off-by: Christian Brauner <christian.brauner at ubuntu.com>
---
lxd/container.go | 10 ++++++++++
1 file changed, 10 insertions(+)
diff --git a/lxd/container.go b/lxd/container.go
index e4ef23c..91538ae 100644
--- a/lxd/container.go
+++ b/lxd/container.go
@@ -227,6 +227,7 @@ func containerValidDevices(devices types.Devices, profile bool, expanded bool) e
return nil
}
+ var diskDevicePaths []string
// Check each device individually
for name, m := range devices {
if m["type"] == "" {
@@ -256,6 +257,15 @@ func containerValidDevices(devices types.Devices, profile bool, expanded bool) e
return fmt.Errorf("Missing parent for %s type nic.", m["nictype"])
}
} else if m["type"] == "disk" {
+ if !expanded && !shared.StringInSlice(m["path"], diskDevicePaths) {
+ diskDevicePaths = append(diskDevicePaths, m["path"])
+ } else if !expanded && len(diskDevicePaths) > 0 {
+ // Only the root disk device should be able to
+ // appear in both the profiles and the local
+ // devices.
+ return fmt.Errorf("More than one disk device uses the same path: %s.", m["path"])
+ }
+
if m["path"] == "" {
return fmt.Errorf("Disk entry is missing the required \"path\" property.")
}
From 7a058f326bd7968aa661a62ee0f0e3abc0b4ea6f Mon Sep 17 00:00:00 2001
From: Christian Brauner <christian.brauner at ubuntu.com>
Date: Thu, 16 Feb 2017 21:11:06 +0100
Subject: [PATCH 2/8] lxd/profiles: check for root disk device changes
Signed-off-by: Christian Brauner <christian.brauner at ubuntu.com>
---
lxd/profiles.go | 97 ---------------------------
lxd/profiles_utils.go | 181 ++++++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 181 insertions(+), 97 deletions(-)
create mode 100644 lxd/profiles_utils.go
diff --git a/lxd/profiles.go b/lxd/profiles.go
index a80e88b..eb64ef2 100644
--- a/lxd/profiles.go
+++ b/lxd/profiles.go
@@ -6,7 +6,6 @@ import (
"fmt"
"io/ioutil"
"net/http"
- "reflect"
"strings"
"github.com/gorilla/mux"
@@ -240,102 +239,6 @@ func profilePatch(d *Daemon, r *http.Request) Response {
return doProfileUpdate(d, name, id, profile, req)
}
-func doProfileUpdate(d *Daemon, name string, id int64, profile *api.Profile, req api.ProfilePut) Response {
- // Sanity checks
- err := containerValidConfig(d, req.Config, true, false)
- if err != nil {
- return BadRequest(err)
- }
-
- err = containerValidDevices(req.Devices, true, false)
- if err != nil {
- return BadRequest(err)
- }
-
- // Get the container list
- containers := getContainersWithProfile(d, name)
-
- // Check that we only change the root disk device for profiles that do
- // not have any containers currently using it.
- for _, v := range req.Devices {
- if v["type"] == "disk" && v["path"] == "/" && v["source"] == "" && len(containers) > 0 {
- return BadRequest(fmt.Errorf("Cannot change root disk device of a profile if containers are still using it."))
- }
- }
-
- // Update the database
- tx, err := dbBegin(d.db)
- if err != nil {
- return InternalError(err)
- }
-
- if profile.Description != req.Description {
- err = dbProfileDescriptionUpdate(tx, id, req.Description)
- if err != nil {
- tx.Rollback()
- return InternalError(err)
- }
- }
-
- // Optimize for description-only changes
- if reflect.DeepEqual(profile.Config, req.Config) && reflect.DeepEqual(profile.Devices, req.Devices) {
- err = txCommit(tx)
- if err != nil {
- return InternalError(err)
- }
-
- return EmptySyncResponse
- }
-
- err = dbProfileConfigClear(tx, id)
- if err != nil {
- tx.Rollback()
- return InternalError(err)
- }
-
- err = dbProfileConfigAdd(tx, id, req.Config)
- if err != nil {
- tx.Rollback()
- return SmartError(err)
- }
-
- err = dbDevicesAdd(tx, "profile", id, req.Devices)
- if err != nil {
- tx.Rollback()
- return SmartError(err)
- }
-
- err = txCommit(tx)
- if err != nil {
- return InternalError(err)
- }
-
- // Update all the containers using the profile. Must be done after txCommit due to DB lock.
- failures := map[string]error{}
- for _, c := range containers {
- err = c.Update(containerArgs{
- Architecture: c.Architecture(),
- Ephemeral: c.IsEphemeral(),
- Config: c.LocalConfig(),
- Devices: c.LocalDevices(),
- Profiles: c.Profiles()}, true)
-
- if err != nil {
- failures[c.Name()] = err
- }
- }
-
- if len(failures) != 0 {
- msg := "The following containers failed to update (profile change still saved):\n"
- for cname, err := range failures {
- msg += fmt.Sprintf(" - %s: %s\n", cname, err)
- }
- return InternalError(fmt.Errorf("%s", msg))
- }
-
- return EmptySyncResponse
-}
-
// The handler for the post operation.
func profilePost(d *Daemon, r *http.Request) Response {
name := mux.Vars(r)["name"]
diff --git a/lxd/profiles_utils.go b/lxd/profiles_utils.go
new file mode 100644
index 0000000..e62bb97
--- /dev/null
+++ b/lxd/profiles_utils.go
@@ -0,0 +1,181 @@
+package main
+
+import (
+ "fmt"
+ "reflect"
+
+ "github.com/lxc/lxd/lxd/types"
+ "github.com/lxc/lxd/shared/api"
+)
+
+func doProfileUpdate(d *Daemon, name string, id int64, profile *api.Profile, req api.ProfilePut) Response {
+ // Sanity checks
+ err := containerValidConfig(d, req.Config, true, false)
+ if err != nil {
+ return BadRequest(err)
+ }
+
+ err = containerValidDevices(req.Devices, true, false)
+ if err != nil {
+ return BadRequest(err)
+ }
+
+ containers := getContainersWithProfile(d, name)
+
+ // Check if the root device is supposed to be changed or removed.
+ var oldProfileRootDevice types.Device
+ for _, v := range profile.Devices {
+ // Detect whether the old profile has a root device.
+ if v["type"] == "disk" && v["path"] == "/" && v["source"] == "" {
+ oldProfileRootDevice = v
+ break
+ }
+ }
+
+ var newProfileRootDevice types.Device
+ for _, v := range req.Devices {
+ // Detect whether the new profile has a root device.
+ if v["type"] == "disk" && v["path"] == "/" && v["source"] == "" {
+ newProfileRootDevice = v
+ break
+ }
+ }
+
+ // Interesting cases:
+ // - root device removed: old profile provides a root device and the new
+ // profile provides none
+ // - root device changed: old profile provides a root device and the
+ // new profile changes some properties
+ if (oldProfileRootDevice != nil && newProfileRootDevice == nil) || (oldProfileRootDevice != nil && newProfileRootDevice != nil && !reflect.DeepEqual(oldProfileRootDevice, newProfileRootDevice)) {
+ // Iterate through all containers using that profile and remove
+ // all containers that have a local root device.
+ for i, c := range containers {
+ for _, cDevice := range c.LocalDevices() {
+ if cDevice["type"] == "disk" && cDevice["path"] == "/" && cDevice["source"] == "" {
+ copy(containers[i:], containers[i+1:])
+ containers[len(containers)-1] = nil
+ containers = containers[:len(containers)-1]
+ break
+ }
+ }
+ }
+
+ // The root device is supposed to be removed: Now look for each
+ // container that is using the profile's root device if they all
+ // have at least another profile that provides the same device
+ // before we allow to change it.
+ if (len(containers) > 0) && (newProfileRootDevice == nil) {
+ for i, c := range containers {
+ for _, p := range c.Profiles() {
+ // Search through all of the containers profiles.
+ if p == profile.Name {
+ continue
+ }
+
+ _, pInfo, err := dbProfileGet(d.db, profile.Name)
+ if err != nil {
+ continue
+ }
+
+ for _, pDev := range pInfo.Devices {
+ // There's another profile that provides a root device on the same pool.
+ // So we can delete the container from the list.
+ if pDev["type"] == "disk" && pDev["path"] == "/" && pDev["source"] == "" {
+ if pDev["pool"] == oldProfileRootDevice["pool"] {
+ copy(containers[i:], containers[i+1:])
+ containers[len(containers)-1] = nil
+ containers = containers[:len(containers)-1]
+ break
+ }
+ }
+ }
+ }
+ }
+ if len(containers) > 0 {
+ return BadRequest(fmt.Errorf("Device is in use by container."))
+ }
+ } else if (len(containers) > 0) && (oldProfileRootDevice["pool"] != "") {
+ // In this case we don't need to check whether the
+ // container gets its rootfs from another profile since
+ // changing the pool here would cause the container to
+ // belong to profiles that demand that the container
+ // belongs to different pools which is inconsistent. So
+ // simply refuse in that case.
+ if newProfileRootDevice["pool"] == "" || (oldProfileRootDevice["pool"] != newProfileRootDevice["pool"]) {
+ return BadRequest(fmt.Errorf("Device is in use by container."))
+ }
+ }
+ }
+
+ // Update the database
+ tx, err := dbBegin(d.db)
+ if err != nil {
+ return InternalError(err)
+ }
+
+ if profile.Description != req.Description {
+ err = dbProfileDescriptionUpdate(tx, id, req.Description)
+ if err != nil {
+ tx.Rollback()
+ return InternalError(err)
+ }
+ }
+
+ // Optimize for description-only changes
+ if reflect.DeepEqual(profile.Config, req.Config) && reflect.DeepEqual(profile.Devices, req.Devices) {
+ err = txCommit(tx)
+ if err != nil {
+ return InternalError(err)
+ }
+
+ return EmptySyncResponse
+ }
+
+ err = dbProfileConfigClear(tx, id)
+ if err != nil {
+ tx.Rollback()
+ return InternalError(err)
+ }
+
+ err = dbProfileConfigAdd(tx, id, req.Config)
+ if err != nil {
+ tx.Rollback()
+ return SmartError(err)
+ }
+
+ err = dbDevicesAdd(tx, "profile", id, req.Devices)
+ if err != nil {
+ tx.Rollback()
+ return SmartError(err)
+ }
+
+ err = txCommit(tx)
+ if err != nil {
+ return InternalError(err)
+ }
+
+ // Update all the containers using the profile. Must be done after txCommit due to DB lock.
+ failures := map[string]error{}
+ for _, c := range containers {
+ err = c.Update(containerArgs{
+ Architecture: c.Architecture(),
+ Ephemeral: c.IsEphemeral(),
+ Config: c.LocalConfig(),
+ Devices: c.LocalDevices(),
+ Profiles: c.Profiles()}, true)
+
+ if err != nil {
+ failures[c.Name()] = err
+ }
+ }
+
+ if len(failures) != 0 {
+ msg := "The following containers failed to update (profile change still saved):\n"
+ for cname, err := range failures {
+ msg += fmt.Sprintf(" - %s: %s\n", cname, err)
+ }
+ return InternalError(fmt.Errorf("%s", msg))
+ }
+
+ return EmptySyncResponse
+}
From d9b6c3f150946394a5e19b03355b3c20c8daeec2 Mon Sep 17 00:00:00 2001
From: Christian Brauner <christian.brauner at ubuntu.com>
Date: Sat, 18 Feb 2017 13:13:19 +0100
Subject: [PATCH 3/8] lxd/container_lxc: ensure proper root disk device
Signed-off-by: Christian Brauner <christian.brauner at ubuntu.com>
---
lxd/container_lxc.go | 23 +++++++++++++----------
1 file changed, 13 insertions(+), 10 deletions(-)
diff --git a/lxd/container_lxc.go b/lxd/container_lxc.go
index 8a62d9f..9945e8b 100644
--- a/lxd/container_lxc.go
+++ b/lxd/container_lxc.go
@@ -373,6 +373,7 @@ type containerLXC struct {
fromHook bool
localConfig map[string]string
localDevices types.Devices
+ profileDevices types.Devices
profiles []string
// Cache
@@ -1426,10 +1427,8 @@ func (c *containerLXC) expandConfig() error {
func (c *containerLXC) expandDevices() error {
devices := types.Devices{}
- rootDevices := 0
- profileStoragePool := ""
-
// Apply all the profiles
+ var profileRootDiskDevices []string
for _, p := range c.profiles {
profileDevices, err := dbDevices(c.daemon.db, p, true)
if err != nil {
@@ -1440,11 +1439,11 @@ func (c *containerLXC) expandDevices() error {
for k, v := range profileDevices {
devices[k] = v
- if v["type"] == "disk" && v["path"] == "/" {
- rootDevices++
- profileStoragePool = v["pool"]
+ if v["type"] == "disk" && v["path"] == "/" && v["source"] == "" && v["pool"] != "" {
+ if !shared.StringInSlice(v["pool"], profileRootDiskDevices) {
+ profileRootDiskDevices = append(profileRootDiskDevices, v["pool"])
+ }
}
-
}
}
@@ -1457,15 +1456,19 @@ func (c *containerLXC) expandDevices() error {
}
}
- if rootDevices > 1 {
+ // In case the container relies on profiles for its root device there
+ // can only be one.
+ if len(profileRootDiskDevices) > 1 && c.storagePool == "" {
return fmt.Errorf("Failed to detect unique root device: Multiple root devices detected.")
}
if c.storagePool == "" {
- if profileStoragePool == "" {
+ if profileRootDiskDevices != nil {
+ c.storagePool = profileRootDiskDevices[0]
+ }
+ if c.storagePool == "" {
return fmt.Errorf("No storage pool specified.")
}
- c.storagePool = profileStoragePool
}
c.expandedDevices = devices
From 759f531d163085008b9d84012a93d23d814c09e8 Mon Sep 17 00:00:00 2001
From: Christian Brauner <christian.brauner at ubuntu.com>
Date: Sat, 18 Feb 2017 13:14:14 +0100
Subject: [PATCH 4/8] lxd/container_put: verify root disk device changes
Signed-off-by: Christian Brauner <christian.brauner at ubuntu.com>
---
lxd/container_put.go | 78 ++++++++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 78 insertions(+)
diff --git a/lxd/container_put.go b/lxd/container_put.go
index 135a41b..1323ebd 100644
--- a/lxd/container_put.go
+++ b/lxd/container_put.go
@@ -8,6 +8,7 @@ import (
"github.com/gorilla/mux"
+ "github.com/lxc/lxd/lxd/types"
"github.com/lxc/lxd/shared"
"github.com/lxc/lxd/shared/api"
"github.com/lxc/lxd/shared/osarch"
@@ -39,6 +40,83 @@ func containerPut(d *Daemon, r *http.Request) Response {
return BadRequest(err)
}
+ // Check whether the old local devices have a root disk device.
+ var oldLocalRootDevice types.Device
+ for _, v := range c.LocalDevices() {
+ // Detect whether the old profile has a root device.
+ if v["type"] == "disk" && v["path"] == "/" && v["source"] == "" && v["pool"] != "" {
+ oldLocalRootDevice = v
+ }
+ }
+
+ // Check whether the new local devices have a root disk device.
+ var newLocalRootDevice types.Device
+ for _, v := range configRaw.Devices {
+ // Detect whether the new profile has a root device.
+ if v["type"] == "disk" && v["path"] == "/" && v["source"] == "" && v["pool"] != "" {
+ newLocalRootDevice = v
+ }
+ }
+
+ var profileRootDiskDevices []string
+ // Check whether the old profiles have a root disk device.
+ for _, pName := range c.Profiles() {
+ _, p, err := dbProfileGet(d.db, pName)
+ if err != nil {
+ continue
+ }
+
+ for _, v := range p.Devices {
+ if v["type"] == "disk" && v["path"] == "/" && v["source"] == "" && v["pool"] != "" {
+ if !shared.StringInSlice(v["pool"], profileRootDiskDevices) {
+ profileRootDiskDevices = append(profileRootDiskDevices, v["pool"])
+ }
+ break
+ }
+ }
+ }
+
+ // Check whether the new profiles have a root disk device.
+ for _, pName := range configRaw.Profiles {
+ _, p, err := dbProfileGet(d.db, pName)
+ if err != nil {
+ continue
+ }
+
+ for _, v := range p.Devices {
+ if v["type"] == "disk" && v["path"] == "/" && v["source"] == "" && v["pool"] != "" {
+ if !shared.StringInSlice(v["pool"], profileRootDiskDevices) {
+ profileRootDiskDevices = append(profileRootDiskDevices, v["pool"])
+ }
+ break
+ }
+ }
+ }
+
+ // We know there are two different root disk devices specified in the
+ // profiles the container is supposed to be attached to. We
+ // shouldn't allow that to happen as this would be nasty to deal with
+ // when the container removes it's root disk device.
+ if len(profileRootDiskDevices) > 1 && newLocalRootDevice == nil || (len(profileRootDiskDevices) > 1 && newLocalRootDevice["pool"] == "") {
+ return BadRequest(fmt.Errorf("Profiles specifiy conflicting pools for the containers root disk device."))
+ }
+
+ // The old local devices provided a root disk device and the new ones
+ // don't. So check whether the profiles provide a unique one.
+ if oldLocalRootDevice != nil && newLocalRootDevice == nil {
+ if len(profileRootDiskDevices) == 0 {
+ return BadRequest(fmt.Errorf("The change would leave the container without a root disk device."))
+ } else if profileRootDiskDevices[0] != oldLocalRootDevice["pool"] {
+ return BadRequest(fmt.Errorf("The update would change the containers' storage pool."))
+ }
+ } else if newLocalRootDevice["pool"] != "" {
+ if oldLocalRootDevice["pool"] != "" && oldLocalRootDevice["pool"] != newLocalRootDevice["pool"] {
+ return BadRequest(fmt.Errorf("The update would change the containers' storage pool."))
+ } else if oldLocalRootDevice["pool"] == "" && newLocalRootDevice["pool"] != profileRootDiskDevices[0] {
+ return BadRequest(fmt.Errorf("The update would change the containers' storage pool."))
+ }
+ }
+
architecture, err := osarch.ArchitectureId(configRaw.Architecture)
if err != nil {
architecture = 0
From d11a6b3639d9d2b49f260a86375fd2e16acedd46 Mon Sep 17 00:00:00 2001
From: Christian Brauner <christian.brauner at ubuntu.com>
Date: Sat, 18 Feb 2017 15:28:57 +0100
Subject: [PATCH 5/8] lxd/containers_post: verify root disk devices
Signed-off-by: Christian Brauner <christian.brauner at ubuntu.com>
---
lxd/containers_post.go | 30 ++++++++++++++++++++++++++++++
1 file changed, 30 insertions(+)
diff --git a/lxd/containers_post.go b/lxd/containers_post.go
index e21eca0..37ced73 100644
--- a/lxd/containers_post.go
+++ b/lxd/containers_post.go
@@ -432,6 +432,36 @@ func containersPost(d *Daemon, r *http.Request) Response {
return BadRequest(fmt.Errorf("Invalid container name: '%s' is reserved for snapshots", shared.SnapshotDelimiter))
}
+ // Check that there are no contradicting root disk devices.
+ var profileRootDiskDevices []string
+ for _, pName := range req.Profiles {
+ _, p, err := dbProfileGet(d.db, pName)
+ if err != nil {
+ continue
+ }
+
+ for _, v := range p.Devices {
+ if v["type"] == "disk" && v["path"] == "/" && v["source"] == "" && v["pool"] != "" {
+ if !shared.StringInSlice(v["pool"], profileRootDiskDevices) {
+ profileRootDiskDevices = append(profileRootDiskDevices, v["pool"])
+ }
+ break
+ }
+ }
+ }
+
+ var newLocalRootDevice types.Device
+ for _, v := range req.Devices {
+ // Detect whether the new profile has a root device.
+ if v["type"] == "disk" && v["path"] == "/" && v["source"] == "" && v["pool"] != "" {
+ newLocalRootDevice = v
+ }
+ }
+
+ if len(profileRootDiskDevices) > 1 && newLocalRootDevice == nil || (len(profileRootDiskDevices) > 1 && newLocalRootDevice["pool"] == "") {
+ return BadRequest(fmt.Errorf("Profiles specify contradicting storage pools the container belongs to."))
+ }
+
switch req.Source.Type {
case "image":
return createFromImage(d, &req)
From f8597907930bc41069e35c520d3acc26f3ba3159 Mon Sep 17 00:00:00 2001
From: Christian Brauner <christian.brauner at ubuntu.com>
Date: Sat, 18 Feb 2017 15:29:42 +0100
Subject: [PATCH 6/8] lxd/profiles_put: verify root disk devices
Signed-off-by: Christian Brauner <christian.brauner at ubuntu.com>
---
lxd/profiles_utils.go | 85 +++++++++++++++++++++++----------------------------
1 file changed, 38 insertions(+), 47 deletions(-)
diff --git a/lxd/profiles_utils.go b/lxd/profiles_utils.go
index e62bb97..deba448 100644
--- a/lxd/profiles_utils.go
+++ b/lxd/profiles_utils.go
@@ -23,86 +23,77 @@ func doProfileUpdate(d *Daemon, name string, id int64, profile *api.Profile, req
containers := getContainersWithProfile(d, name)
// Check if the root device is supposed to be changed or removed.
- var oldProfileRootDevice types.Device
+ var oldProfileRootDiskDevice types.Device
for _, v := range profile.Devices {
// Detect whether the old profile has a root device.
if v["type"] == "disk" && v["path"] == "/" && v["source"] == "" {
- oldProfileRootDevice = v
+ oldProfileRootDiskDevice = v
break
}
}
- var newProfileRootDevice types.Device
+ var newProfileRootDiskDevice types.Device
for _, v := range req.Devices {
// Detect whether the new profile has a root device.
if v["type"] == "disk" && v["path"] == "/" && v["source"] == "" {
- newProfileRootDevice = v
+ newProfileRootDiskDevice = v
break
}
}
- // Interesting cases:
- // - root device removed: old profile provides a root device and the new
- // profile provides none
- // - root device changed: old profile provides a root device and the
- // new profile changes some properties
- if (oldProfileRootDevice != nil && newProfileRootDevice == nil) || (oldProfileRootDevice != nil && newProfileRootDevice != nil && !reflect.DeepEqual(oldProfileRootDevice, newProfileRootDevice)) {
- // Iterate through all containers using that profile and remove
- // all containers that have a local root device.
- for i, c := range containers {
- for _, cDevice := range c.LocalDevices() {
- if cDevice["type"] == "disk" && cDevice["path"] == "/" && cDevice["source"] == "" {
- copy(containers[i:], containers[i+1:])
- containers[len(containers)-1] = nil
- containers = containers[:len(containers)-1]
- break
+ if len(containers) > 0 && oldProfileRootDiskDevice != nil && oldProfileRootDiskDevice["pool"] != "" {
+ if newProfileRootDiskDevice == nil || newProfileRootDiskDevice["pool"] == "" || (oldProfileRootDiskDevice["pool"] != newProfileRootDiskDevice["pool"]) {
+ // Check for any containers that have a local root disk device.
+ var remainingContainers []container
+ for _, c := range containers {
+ foundLocalRootDevice := false
+ for _, v := range c.LocalDevices() {
+ if v["type"] == "disk" && v["path"] == "/" && v["source"] == "" && v["pool"] != "" {
+ foundLocalRootDevice = true
+ break
+ }
+ }
+
+ if !foundLocalRootDevice {
+ remainingContainers = append(remainingContainers, c)
}
}
- }
+ containers = remainingContainers
+ remainingContainers = nil
- // The root device is supposed to be removed: Now look for each
- // container that is using the profile's root device if they all
- // have at least another profile that provides the same device
- // before we allow to change it.
- if (len(containers) > 0) && (newProfileRootDevice == nil) {
- for i, c := range containers {
+ // Check if another profile provides the same root disk device.
+ for _, c := range containers {
+ foundSecondProfileRootDevice := false
for _, p := range c.Profiles() {
- // Search through all of the containers profiles.
+ // Search through all of the containers
+ // profiles. Only skip the one we
+ // are about to change.
if p == profile.Name {
continue
}
- _, pInfo, err := dbProfileGet(d.db, profile.Name)
+ _, pInfo, err := dbProfileGet(d.db, p)
if err != nil {
continue
}
- for _, pDev := range pInfo.Devices {
+ for _, v := range pInfo.Devices {
// There's another profile that provides a root device on the same pool.
// So we can delete the container from the list.
- if pDev["type"] == "disk" && pDev["path"] == "/" && pDev["source"] == "" {
- if pDev["pool"] == oldProfileRootDevice["pool"] {
- copy(containers[i:], containers[i+1:])
- containers[len(containers)-1] = nil
- containers = containers[:len(containers)-1]
- break
- }
+ if v["type"] == "disk" && v["path"] == "/" && v["source"] == "" && oldProfileRootDiskDevice["pool"] == v["pool"] {
+ foundSecondProfileRootDevice = true
+ break
}
}
}
+ if !foundSecondProfileRootDevice {
+ remainingContainers = append(remainingContainers, c)
+ }
}
+ containers = remainingContainers
+ remainingContainers = nil
if len(containers) > 0 {
- return BadRequest(fmt.Errorf("Device is in use by container."))
- }
- } else if (len(containers) > 0) && (oldProfileRootDevice["pool"] != "") {
- // In this case we don't need to check whether the
- // container gets its rootfs from another profile since
- // changing the pool here would cause the container to
- // belong to profiles that demand that the container
- // belongs to different pools which is inconsistent. So
- // simply refuse in that case.
- if newProfileRootDevice["pool"] == "" || (oldProfileRootDevice["pool"] != newProfileRootDevice["pool"]) {
- return BadRequest(fmt.Errorf("Device is in use by container."))
+ return BadRequest(fmt.Errorf("At least one container relies on this profiles' root disk device."))
}
}
}
From d143eea329e0c43818a16986fc931ba82c7d35f8 Mon Sep 17 00:00:00 2001
From: Christian Brauner <christian.brauner at ubuntu.com>
Date: Sat, 18 Feb 2017 15:30:02 +0100
Subject: [PATCH 7/8] lxd/container_lxc: verify root disk devices
Signed-off-by: Christian Brauner <christian.brauner at ubuntu.com>
---
lxd/container_lxc.go | 75 ++++++++++++++++++++++++++++++----------------------
1 file changed, 44 insertions(+), 31 deletions(-)
diff --git a/lxd/container_lxc.go b/lxd/container_lxc.go
index 9945e8b..420923b 100644
--- a/lxd/container_lxc.go
+++ b/lxd/container_lxc.go
@@ -3227,54 +3227,67 @@ func (c *containerLXC) Update(args containerArgs, userRequested bool) error {
c.localConfig["volatile.idmap.base"] = fmt.Sprintf("%v", base)
}
- // Apply disk quota changes
- for _, m := range addDevices {
- var oldRootfsSize string
- for _, m := range oldExpandedDevices {
- if m["type"] == "disk" && m["path"] == "/" {
- oldRootfsSize = m["size"]
- break
- }
+ // Check whether the old local devices have a root disk device.
+ var oldLocalRootDevice types.Device
+ oldRootfsSize := ""
+ for _, v := range oldLocalDevices {
+ if v["type"] == "disk" && v["path"] == "/" && v["source"] == "" && v["pool"] != "" {
+ oldLocalRootDevice = v
+ oldRootfsSize = v["size"]
+ break
}
+ }
- if m["size"] != oldRootfsSize {
- size, err := shared.ParseByteSizeString(m["size"])
- if err != nil {
- return err
- }
-
- err = c.storage.ContainerSetQuota(c, size)
- if err != nil {
- return err
+ if oldLocalRootDevice == nil {
+ for _, v := range oldExpandedDevices {
+ if v["type"] == "disk" && v["path"] == "/" && v["source"] == "" && v["pool"] != "" {
+ oldLocalRootDevice = v
+ oldRootfsSize = v["size"]
+ break
}
}
}
- // Confirm that the storage pool didn't change.
- var oldRootfs types.Device
- for _, m := range oldExpandedDevices {
- if m["type"] == "disk" && m["path"] == "/" {
- oldRootfs = m
+ // Check whether the new local devices have a root disk device.
+ var newLocalRootDevice types.Device
+ newRootfsSize := ""
+ for _, v := range c.localDevices {
+ if v["type"] == "disk" && v["path"] == "/" && v["source"] == "" && v["pool"] != "" {
+ newLocalRootDevice = v
+ newRootfsSize = v["size"]
break
}
}
- var newRootfs types.Device
- for _, name := range c.expandedDevices.DeviceNames() {
- m := c.expandedDevices[name]
- if m["type"] == "disk" && m["path"] == "/" {
- newRootfs = m
- break
+ if newLocalRootDevice == nil {
+ for _, v := range c.expandedDevices {
+ if v["type"] == "disk" && v["path"] == "/" && v["source"] == "" && v["pool"] != "" {
+ newLocalRootDevice = v
+ newRootfsSize = v["size"]
+ break
+ }
}
}
- if oldRootfs["pool"] != "" && (oldRootfs["pool"] != newRootfs["pool"]) {
- return fmt.Errorf("Changing the storage pool of a container is not yet implemented.")
+ if newLocalRootDevice == nil || newLocalRootDevice["pool"] == "" {
+ return fmt.Errorf("No root disk device found.")
+ }
+
+ if newRootfsSize != oldRootfsSize {
+ size, err := shared.ParseByteSizeString(newRootfsSize)
+ if err != nil {
+ return err
+ }
+
+ err = c.storage.ContainerSetQuota(c, size)
+ if err != nil {
+ return err
+ }
}
// Apply the live changes
if c.IsRunning() {
- if oldRootfs["source"] != newRootfs["source"] {
+ if oldLocalRootDevice["source"] != oldLocalRootDevice["source"] {
return fmt.Errorf("Cannot change the rootfs path of a running container")
}
From 445aba8aaa51846f5d920391df64019a3b477dd7 Mon Sep 17 00:00:00 2001
From: Christian Brauner <christian.brauner at ubuntu.com>
Date: Sat, 18 Feb 2017 15:30:35 +0100
Subject: [PATCH 8/8] test: add test for root disk devices in profiles
Signed-off-by: Christian Brauner <christian.brauner at ubuntu.com>
---
test/main.sh | 1 +
test/suites/storage_profiles.sh | 129 ++++++++++++++++++++++++++++++++++++++++
2 files changed, 130 insertions(+)
create mode 100644 test/suites/storage_profiles.sh
diff --git a/test/main.sh b/test/main.sh
index be3c592..77ab9e3 100755
--- a/test/main.sh
+++ b/test/main.sh
@@ -587,5 +587,6 @@ run_test test_cpu_profiling "CPU profiling"
run_test test_mem_profiling "memory profiling"
run_test test_storage "storage"
run_test test_lxd_autoinit "lxd init auto"
+run_test test_storage_profiles "storage profiles"
TEST_RESULT=success
diff --git a/test/suites/storage_profiles.sh b/test/suites/storage_profiles.sh
new file mode 100644
index 0000000..f8c20cd
--- /dev/null
+++ b/test/suites/storage_profiles.sh
@@ -0,0 +1,129 @@
+#!/bin/sh
+
+test_storage_profiles() {
+ # shellcheck disable=2039
+
+ LXD_STORAGE_DIR=$(mktemp -d -p "${TEST_DIR}" XXXXXXXXX)
+ chmod +x "${LXD_STORAGE_DIR}"
+ spawn_lxd "${LXD_STORAGE_DIR}" false
+ (
+ set -e
+ # shellcheck disable=2030
+ LXD_DIR="${LXD_STORAGE_DIR}"
+
+ HAS_ZFS="dir"
+ if which zfs >/dev/null 2>&1; then
+ HAS_ZFS="zfs"
+ fi
+
+ HAS_BTRFS="dir"
+ if which zfs >/dev/null 2>&1; then
+ HAS_BTRFS="btrfs"
+ fi
+
+ # shellcheck disable=SC1009
+ # Create loop file zfs pool.
+ lxc storage create "lxdtest-$(basename "${LXD_DIR}")-pool1" "${HAS_ZFS}"
+
+ # Create loop file btrfs pool.
+ lxc storage create "lxdtest-$(basename "${LXD_DIR}")-pool2" "${HAS_BTRFS}"
+
+ lxc storage create "lxdtest-$(basename "${LXD_DIR}")-pool4" dir
+
+ # Set default storage pool for image import.
+ lxc profile device add default root disk path="/" pool="lxdtest-$(basename "${LXD_DIR}")-pool1"
+
+ # Import image into default storage pool.
+ ensure_import_testimage
+
+ lxc profile create dummy
+
+ # Create a new profile that provides a root device for some containers.
+ lxc profile device add dummy rootfs disk path="/" pool="lxdtest-$(basename "${LXD_DIR}")-pool1"
+
+ # Begin interesting test cases.
+
+ for i in $(seq 1 3); do
+ lxc launch testimage c"${i}" --profile dummy
+ done
+ wait
+
+ # Check that we can't remove or change the root disk device since containers
+ # are actually using it.
+ ! lxc profile device remove dummy rootfs
+ ! lxc profile device set dummy rootfs pool "lxdtest-$(basename "${LXD_DIR}")-pool2"
+
+ # Give all the containers we started their own local root disk device.
+ for i in $(seq 1 3); do
+ lxc config device add c"${i}" root disk path="/" pool="lxdtest-$(basename "${LXD_DIR}")-pool1"
+ done
+
+ # Try to set new pool. This should work since the container has a local disk
+ lxc profile device set dummy rootfs pool "lxdtest-$(basename "${LXD_DIR}")-pool2"
+ lxc profile device set dummy rootfs pool "lxdtest-$(basename "${LXD_DIR}")-pool1"
+ # Check that we can now remove the root disk device since no container is
+ # actually using it.
+ lxc profile device remove dummy rootfs
+
+ # Add back a root device to the profile.
+ lxc profile device add dummy rootfs1 disk path="/" pool="lxdtest-$(basename "${LXD_DIR}")-pool1"
+
+ # Try to add another root device to the profile that tries to set a pool
+ # property. This should fail. This is also a test for whether it is possible
+ # to put multiple disk devices on the same path. This must fail!
+ ! lxc profile device add dummy rootfs2 disk path="/" pool="lxdtest-$(basename "${LXD_DIR}")-pool2"
+
+ # Add another root device to the profile that does not set a pool property.
+ # This should not work since it would use the same path.
+ ! lxc profile device add dummy rootfs3 disk path="/"
+
+ # Create a second profile.
+ lxc profile create dummyDup
+ lxc profile device add dummyDup rootfs1 disk path="/" pool="lxdtest-$(basename "${LXD_DIR}")-pool1"
+
+ # Create a third profile
+ lxc profile create dummyNoDup
+ lxc profile device add dummyNoDup rootfs2 disk path="/" pool="lxdtest-$(basename "${LXD_DIR}")-pool2"
+
+ # Verify that we cannot create a container with profiles that have
+ # contradicting root devices.
+ ! lxc launch testimage cConflictingProfiles --p dummy -p dummyDup -p dummyNoDup
+
+ # Verify that we can create a container with profiles that have
+ # contradicting root devices if the container has a local root device set.
+ lxc launch testimage cConflictingProfiles2 -s "lxdtest-$(basename "${LXD_DIR}")-pool2" -p dummy -p dummyDup -p dummyNoDup
+
+ # Verify that we cannot remove the local root disk device if the profiles
+ # have contradicting root disk devices.
+ ! lxc config device remove cConflictingProfiles2 root
+
+ # Verify that we can create a container with two profiles that speficy the
+ # same root disk device.
+ lxc launch testimage cNonConflictingProfiles -p dummy -p dummyDup
+
+ # Try to remove the root disk device from one of the profiles.
+ lxc profile device remove dummy rootfs1
+
+ # Try to remove the root disk device from the second profile.
+ ! lxc profile device remove dummyDup rootfs1
+
+ # Test that we can't remove the root disk device from the containers config
+ # when the profile it is attached to specifies no root device.
+ for i in $(seq 1 3); do
+ ! lxc config device remove c"${i}" root
+ # Must fail.
+ ! lxc profile assign c"${i}" dummyDup,dummyNoDup
+ done
+
+ lxc delete -f cConflictingProfiles2
+ lxc delete -f cNonConflictingProfiles
+ for i in $(seq 1 3); do
+ lxc delete -f c"${i}"
+ done
+
+ )
+
+ # shellcheck disable=SC2031
+ LXD_DIR="${LXD_DIR}"
+ kill_lxd "${LXD_STORAGE_DIR}"
+}
More information about the lxc-devel
mailing list