[lxc-devel] [lxd/master] Support two-phase creation of a storage pool on single-node cluster
freeekanayaka on Github
lxc-bot at linuxcontainers.org
Thu May 7 10:23:26 UTC 2020
A non-text attachment was scrubbed...
Name: not available
Type: text/x-mailbox
Size: 378 bytes
Desc: not available
URL: <http://lists.linuxcontainers.org/pipermail/lxc-devel/attachments/20200507/7bab8da3/attachment.bin>
-------------- next part --------------
From 70251c86f69544d4fba19532b69b9229ac74812a Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Thu, 7 May 2020 11:21:07 +0100
Subject: [PATCH] Support two-phase creation of a storage pool on single-node
cluster
Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
lxd/storage_pools.go | 28 ++++++++++++-----
test/suites/clustering.sh | 66 +++++++++++++++++++++++++++++++++++++++
2 files changed, 86 insertions(+), 8 deletions(-)
diff --git a/lxd/storage_pools.go b/lxd/storage_pools.go
index 674288011e..ca4a838e1a 100644
--- a/lxd/storage_pools.go
+++ b/lxd/storage_pools.go
@@ -138,15 +138,27 @@ func storagePoolsPost(d *Daemon, r *http.Request) response.Response {
}
if count == 1 {
- // No targetNode was specified and we're either a single-node
- // cluster or not clustered at all, so create the storage
- // pool immediately.
- err = storagePoolCreateGlobal(d.State(), req)
- } else {
- // No targetNode was specified and we're clustered, so finalize the
- // config in the db and actually create the pool on all nodes.
- err = storagePoolsPostCluster(d, req)
+ // No targetNode was specified and we're either a
+ // single-node cluster or not clustered at all, so
+ // create the storage pool immediately, unless there's
+ // a pending storage pool (in that case we follow the
+ // regular two-stage process).
+ _, err := d.cluster.GetStoragePoolID(req.Name)
+ if err != nil {
+ if err != db.ErrNoSuchObject {
+ return response.InternalError(err)
+ }
+ err = storagePoolCreateGlobal(d.State(), req)
+ if err != nil {
+ return response.InternalError(err)
+ }
+ return resp
+ }
}
+
+ // No targetNode was specified and we're clustered, so finalize the
+ // config in the db and actually create the pool on all nodes.
+ err = storagePoolsPostCluster(d, req)
if err != nil {
return response.InternalError(err)
}
diff --git a/test/suites/clustering.sh b/test/suites/clustering.sh
index 0f3f6293e9..8d8eb0e401 100644
--- a/test/suites/clustering.sh
+++ b/test/suites/clustering.sh
@@ -1802,3 +1802,69 @@ test_clustering_remove_raft_node() {
kill_lxd "${LXD_THREE_DIR}"
kill_lxd "${LXD_FOUR_DIR}"
}
+
+# On a single-node cluster storage pools can be created either with the
+# two-stage process required multi-node clusters, or directly with the normal
+# procedure for non-clustered daemons.
+test_clustering_storage_single_node() {
+ # shellcheck disable=2039
+ local LXD_DIR
+
+ setup_clustering_bridge
+ prefix="lxd$$"
+ bridge="${prefix}"
+
+ # The random storage backend is not supported in clustering tests,
+ # since we need to have the same storage driver on all nodes.
+ driver="${LXD_BACKEND}"
+ if [ "${driver}" = "random" ] || [ "${driver}" = "lvm" ]; then
+ driver="dir"
+ fi
+
+ setup_clustering_netns 1
+ LXD_ONE_DIR=$(mktemp -d -p "${TEST_DIR}" XXX)
+ chmod +x "${LXD_ONE_DIR}"
+ ns1="${prefix}1"
+ spawn_lxd_and_bootstrap_cluster "${ns1}" "${bridge}" "${LXD_ONE_DIR}" "${driver}"
+
+ # Create a pending storage pool on the node.
+ driver_config=""
+ if [ "${driver}" = "btrfs" ]; then
+ driver_config="size=20GB"
+ fi
+ if [ "${driver}" = "zfs" ]; then
+ driver_config="size=20GB"
+ fi
+ if [ "${driver}" = "ceph" ]; then
+ driver_config="source=lxdtest-$(basename "${TEST_DIR}")-pool1"
+ fi
+ driver_config_node="${driver_config}"
+ if [ "${driver}" = "zfs" ]; then
+ driver_config_node="${driver_config_node} zfs.pool_name=pool1-$(basename "${TEST_DIR}")-${ns1}"
+ fi
+
+ if [ -n "${driver_config_node}" ]; then
+ # shellcheck disable=SC2086
+ LXD_DIR="${LXD_ONE_DIR}" lxc storage create pool1 "${driver}" ${driver_config_node} --target node1
+ else
+ LXD_DIR="${LXD_ONE_DIR}" lxc storage create pool1 "${driver}" --target node1
+ fi
+
+ # Finalize the storage pool creation
+ LXD_DIR="${LXD_ONE_DIR}" lxc storage create pool1 "${driver}"
+
+ LXD_DIR="${LXD_ONE_DIR}" lxc storage show pool1 | grep status: | grep -q Created
+
+ # Delete the storage pool
+ LXD_DIR="${LXD_ONE_DIR}" lxc storage delete pool1
+
+ # LXD_DIR="${LXD_TWO_DIR}" lxd shutdown
+ LXD_DIR="${LXD_ONE_DIR}" lxd shutdown
+ sleep 0.5
+ rm -f "${LXD_ONE_DIR}/unix.socket"
+
+ teardown_clustering_netns
+ teardown_clustering_bridge
+
+ kill_lxd "${LXD_ONE_DIR}"
+}
More information about the lxc-devel
mailing list