[lxc-devel] [lxd/master] lxd/instance: Validate that pools and networks are not pending

freeekanayaka on Github lxc-bot at linuxcontainers.org
Tue Jun 9 11:36:59 UTC 2020


A non-text attachment was scrubbed...
Name: not available
Type: text/x-mailbox
Size: 361 bytes
Desc: not available
URL: <http://lists.linuxcontainers.org/pipermail/lxc-devel/attachments/20200609/2f1236b1/attachment.bin>
-------------- next part --------------
From 17a8dbf3b35cf625da83075c733c632909638141 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Tue, 9 Jun 2020 12:35:06 +0100
Subject: [PATCH] lxd/instance: Validate that pools and networks are not
 pending

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/db/storage_pools.go      |  2 ++
 lxd/instance/drivers/load.go | 27 +++++++++++++++++++++++++++
 test/suites/clustering.sh    | 19 ++++++++++++-------
 3 files changed, 41 insertions(+), 7 deletions(-)

diff --git a/lxd/db/storage_pools.go b/lxd/db/storage_pools.go
index f7c6d7a308..602de11927 100644
--- a/lxd/db/storage_pools.go
+++ b/lxd/db/storage_pools.go
@@ -510,6 +510,8 @@ func (c *Cluster) GetStoragePool(poolName string) (int64, *api.StoragePool, erro
 		storagePool.Status = "Pending"
 	case storagePoolCreated:
 		storagePool.Status = "Created"
+	case storagePoolErrored:
+		storagePool.Status = "Errored"
 	default:
 		storagePool.Status = "Unknown"
 	}
diff --git a/lxd/instance/drivers/load.go b/lxd/instance/drivers/load.go
index b534365fd0..75eb7d926a 100644
--- a/lxd/instance/drivers/load.go
+++ b/lxd/instance/drivers/load.go
@@ -81,6 +81,33 @@ func validDevices(state *state.State, cluster *db.Cluster, instanceType instance
 		if err != nil {
 			return errors.Wrap(err, "Failed detecting root disk device")
 		}
+
+		for deviceName, device := range devices {
+			if device["type"] == "disk" && device["pool"] != "" {
+				poolName := device["pool"]
+				_, pool, err := cluster.GetStoragePool(poolName)
+				if err != nil {
+					return errors.Wrapf(err, "Fetch storage pool %q", poolName)
+				}
+				if pool.Status != "Created" {
+					return fmt.Errorf(
+						"Device %q: storage pool %q is in %s state",
+						deviceName, poolName, pool.Status)
+				}
+			}
+			if device["type"] == "nic" && device["parent"] != "" {
+				networkName := device["parent"]
+				_, network, err := cluster.GetNetwork(networkName)
+				if err != nil {
+					return errors.Wrapf(err, "Fetch network %q", networkName)
+				}
+				if network.Status != "Created" {
+					return fmt.Errorf(
+						"Device %q: network %q is in %s state",
+						deviceName, networkName, network.Status)
+				}
+			}
+		}
 	}
 
 	return nil
diff --git a/test/suites/clustering.sh b/test/suites/clustering.sh
index 0e9d7209e5..bedcb3bc1a 100644
--- a/test/suites/clustering.sh
+++ b/test/suites/clustering.sh
@@ -398,11 +398,16 @@ test_clustering_storage() {
   fi
   LXD_DIR="${LXD_ONE_DIR}" lxc storage show pool1 | grep status: | grep -q Pending
 
+  LXD_DIR="${LXD_TWO_DIR}" ensure_import_testimage
+
   # The source config key is not legal for the final pool creation
   if [ "${driver}" = "dir" ]; then
     ! LXD_DIR="${LXD_ONE_DIR}" lxc storage create pool1 dir source=/foo || false
   fi
 
+  # A container can't be backed by a pending pool.
+  ! LXD_DIR="${LXD_ONE_DIR}" lxc init --target node2 -s pool1 testimage bar || false
+
   # Create the storage pool
   if [ "${driver}" = "lvm" ]; then
       LXD_DIR="${LXD_TWO_DIR}" lxc storage create pool1 "${driver}" volume.size=25MB
@@ -436,7 +441,6 @@ test_clustering_storage() {
 
   if [ "${driver}" = "ceph" ]; then
     # Test migration of ceph-based containers
-    LXD_DIR="${LXD_TWO_DIR}" ensure_import_testimage
     LXD_DIR="${LXD_ONE_DIR}" lxc launch --target node2 -s pool1 testimage foo
 
     # The container can't be moved if it's running
@@ -512,14 +516,11 @@ test_clustering_storage() {
     LXD_DIR="${LXD_ONE_DIR}" lxc storage volume delete pool1 v1
     LXD_DIR="${LXD_ONE_DIR}" lxc delete baz
     LXD_DIR="${LXD_ONE_DIR}" lxc delete buz
-
-    LXD_DIR="${LXD_ONE_DIR}" lxc image delete testimage
   fi
 
   # Test migration of zfs/btrfs-based containers
   if [ "${driver}" = "zfs" ] || [ "${driver}" = "btrfs" ]; then
     # Launch a container on node2
-    LXD_DIR="${LXD_TWO_DIR}" ensure_import_testimage
     LXD_DIR="${LXD_ONE_DIR}" lxc launch --target node2 testimage foo
     LXD_DIR="${LXD_ONE_DIR}" lxc info foo | grep -q "Location: node2"
 
@@ -548,9 +549,6 @@ test_clustering_storage() {
     # Purge the containers
     LXD_DIR="${LXD_ONE_DIR}" lxc delete bar
     LXD_DIR="${LXD_ONE_DIR}" lxc delete foo
-
-    # Delete the image too.
-    LXD_DIR="${LXD_ONE_DIR}" lxc image delete testimage
   fi
 
   # Delete the storage pool
@@ -593,6 +591,8 @@ test_clustering_storage() {
     LXD_DIR="${LXD_TWO_DIR}" lxc storage volume delete data webbaz
   fi
 
+  LXD_DIR="${LXD_ONE_DIR}" lxc image delete testimage
+
   printf 'config: {}\ndevices: {}' | LXD_DIR="${LXD_ONE_DIR}" lxc profile edit default
   LXD_DIR="${LXD_TWO_DIR}" lxc storage delete data
 
@@ -732,6 +732,11 @@ test_clustering_network() {
   ! LXD_DIR="${LXD_ONE_DIR}" lxc network create "${net}" --target node2 || false
   LXD_DIR="${LXD_ONE_DIR}" lxc network show "${net}" | grep status: | grep -q Pending
 
+  # A container can't be associated with a pending network.
+  LXD_DIR="${LXD_TWO_DIR}" ensure_import_testimage
+  ! LXD_DIR="${LXD_ONE_DIR}" lxc init --target node2 -n "${net}" testimage bar || false
+  LXD_DIR="${LXD_ONE_DIR}" lxc image delete testimage
+
   # The bridge.external_interfaces config key is not legal for the final network creation
   ! LXD_DIR="${LXD_ONE_DIR}" lxc network create "${net}" bridge.external_interfaces=foo || false
 


More information about the lxc-devel mailing list