[lxc-devel] [lxd/master] ceph: add "ceph.osd.force_reuse" property

brauner on Github lxc-bot at linuxcontainers.org
Sat Aug 26 17:48:28 UTC 2017


A non-text attachment was scrubbed...
Name: not available
Type: text/x-mailbox
Size: 381 bytes
Desc: not available
URL: <http://lists.linuxcontainers.org/pipermail/lxc-devel/attachments/20170826/cfcbfc20/attachment.bin>
-------------- next part --------------
From 040927aaf43e2f5ea0b5cd56d2f4f91ff055b0c7 Mon Sep 17 00:00:00 2001
From: Christian Brauner <christian.brauner at ubuntu.com>
Date: Sat, 26 Aug 2017 19:47:26 +0200
Subject: [PATCH] ceph: add "ceph.osd.force_reuse" property

Closes #3716.

Signed-off-by: Christian Brauner <christian.brauner at ubuntu.com>
---
 lxd/storage_ceph.go         | 16 ++++++++++++++--
 lxd/storage_pools_config.go |  5 +++--
 2 files changed, 17 insertions(+), 4 deletions(-)

diff --git a/lxd/storage_ceph.go b/lxd/storage_ceph.go
index 849ce2a38..c0410a385 100644
--- a/lxd/storage_ceph.go
+++ b/lxd/storage_ceph.go
@@ -225,7 +225,7 @@ func (s *storageCeph) StoragePoolCreate() error {
 		}
 	}()
 
-	ok := cephRBDVolumeExists(s.ClusterName, s.OSDPoolName, s.pool.Name,
+	ok := cephRBDVolumeExists(s.ClusterName, s.OSDPoolName, s.OSDPoolName,
 		"lxd", s.UserName)
 	s.pool.Config["volatile.pool.pristine"] = "false"
 	if !ok {
@@ -234,7 +234,7 @@ func (s *storageCeph) StoragePoolCreate() error {
 		// this to detect whether this osd pool is already in use by
 		// another LXD instance.
 		err = cephRBDVolumeCreate(s.ClusterName, s.OSDPoolName,
-			s.pool.Name, "lxd", "0", s.UserName)
+			s.OSDPoolName, "lxd", "0", s.UserName)
 		if err != nil {
 			logger.Errorf(`Failed to create RBD storage volume `+
 				`"%s" on storage pool "%s": %s`, s.pool.Name,
@@ -243,6 +243,18 @@ func (s *storageCeph) StoragePoolCreate() error {
 		}
 		logger.Debugf(`Created RBD storage volume "%s" on storage `+
 			`pool "%s"`, s.pool.Name, s.pool.Name)
+	} else {
+		msg := fmt.Sprintf(`CEPH OSD storage pool "%s" in cluster `+
+			`"%s" seems to be in use by another LXD instace`,
+			s.pool.Name, s.ClusterName)
+		if s.pool.Config["ceph.osd.force_reuse"] == "" ||
+			!shared.IsTrue(s.pool.Config["ceph.osd.force_reuse"]) {
+			msg += `. Set "ceph.osd.force_reuse=true" to force ` +
+				`LXD to reuse the pool`
+			logger.Errorf(msg)
+			return fmt.Errorf(msg)
+		}
+		logger.Warnf(msg)
 	}
 
 	logger.Infof(`Created CEPH OSD storage pool "%s" in cluster "%s"`,
diff --git a/lxd/storage_pools_config.go b/lxd/storage_pools_config.go
index cc429186a..948cf93de 100644
--- a/lxd/storage_pools_config.go
+++ b/lxd/storage_pools_config.go
@@ -18,8 +18,9 @@ var storagePoolConfigKeys = map[string]func(value string) error{
 	"btrfs.mount_options": shared.IsAny,
 
 	// valid drivers: ceph
-	"ceph.cluster_name":  shared.IsAny,
-	"ceph.osd.pool_name": shared.IsAny,
+	"ceph.cluster_name":    shared.IsAny,
+	"ceph.osd.force_reuse": shared.IsBool,
+	"ceph.osd.pool_name":   shared.IsAny,
 	"ceph.osd.pg_num": func(value string) error {
 		if value == "" {
 			return nil


More information about the lxc-devel mailing list