[lxc-devel] [lxd/master] Fix cephfs cluster handling

stgraber on Github lxc-bot at linuxcontainers.org
Sat Sep 14 23:11:18 UTC 2019


A non-text attachment was scrubbed...
Name: not available
Type: text/x-mailbox
Size: 301 bytes
Desc: not available
URL: <http://lists.linuxcontainers.org/pipermail/lxc-devel/attachments/20190914/237e24e3/attachment.bin>
-------------- next part --------------
From ffe4997711e2f8903fa8f7d00354beed6ec3482a Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?St=C3=A9phane=20Graber?= <stgraber at ubuntu.com>
Date: Sun, 15 Sep 2019 00:24:06 +0200
Subject: [PATCH 1/2] lxd/init: Properly handle ceph/cephfs
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Signed-off-by: Stéphane Graber <stgraber at ubuntu.com>
---
 lxd/main_init.go             |  8 ++++++--
 lxd/main_init_interactive.go | 16 ++++++++++++----
 2 files changed, 18 insertions(+), 6 deletions(-)

diff --git a/lxd/main_init.go b/lxd/main_init.go
index de793b6254..a0a26cb157 100644
--- a/lxd/main_init.go
+++ b/lxd/main_init.go
@@ -165,11 +165,15 @@ func (c *cmdInit) availableStorageDrivers(poolType string) []string {
 
 	// Check available backends
 	for _, driver := range supportedStoragePoolDrivers {
-		if poolType == "remote" && driver != "ceph" {
+		if poolType == "remote" && !shared.StringInSlice(driver, []string{"ceph", "cephfs"}) {
 			continue
 		}
 
-		if poolType == "local" && driver == "ceph" {
+		if poolType == "local" && shared.StringInSlice(driver, []string{"ceph", "cephfs"}) {
+			continue
+		}
+
+		if poolType == "all" && driver == "cephfs" {
 			continue
 		}
 
diff --git a/lxd/main_init_interactive.go b/lxd/main_init_interactive.go
index 3abb254241..f3947ed526 100644
--- a/lxd/main_init_interactive.go
+++ b/lxd/main_init_interactive.go
@@ -456,10 +456,12 @@ func (c *cmdInit) askStoragePool(config *cmdInitData, d lxd.InstanceServer, pool
 		}
 
 		// Add to the default profile
-		config.Node.Profiles[0].Devices["root"] = map[string]string{
-			"type": "disk",
-			"path": "/",
-			"pool": pool.Name,
+		if config.Node.Profiles[0].Devices["root"] == nil {
+			config.Node.Profiles[0].Devices["root"] = map[string]string{
+				"type": "disk",
+				"path": "/",
+				"pool": pool.Name,
+			}
 		}
 
 		// Storage backend
@@ -502,6 +504,12 @@ func (c *cmdInit) askStoragePool(config *cmdInitData, d lxd.InstanceServer, pool
 
 				// Ask for the number of placement groups
 				pool.Config["ceph.osd.pg_num"] = cli.AskString("Number of placement groups [default=32]: ", "32", nil)
+			} else if pool.Driver == "cephfs" {
+				// Ask for the name of the cluster
+				pool.Config["cephfs.cluster_name"] = cli.AskString("Name of the existing CEPHfs cluster [default=ceph]: ", "ceph", nil)
+
+				// Ask for the name of the cluster
+				pool.Config["source"] = cli.AskString("Name of the CEPHfs volume: ", "", nil)
 			} else if cli.AskBool("Would you like to use an existing block device? (yes/no) [default=no]: ", "no") {
 				deviceExists := func(path string) error {
 					if !shared.IsBlockdevPath(path) {

From 8991da5bc6a0f4ee17106a7ca86a7dd766f86a63 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?St=C3=A9phane=20Graber?= <stgraber at ubuntu.com>
Date: Sun, 15 Sep 2019 01:09:56 +0200
Subject: [PATCH 2/2] lxd/storage/cephfs: Fix querying volume on cluster
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Closes: #6159

Signed-off-by: Stéphane Graber <stgraber at ubuntu.com>
---
 lxd/cluster/connect.go | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/lxd/cluster/connect.go b/lxd/cluster/connect.go
index fb8634f0ad..6f26255894 100644
--- a/lxd/cluster/connect.go
+++ b/lxd/cluster/connect.go
@@ -84,7 +84,7 @@ func ConnectIfVolumeIsRemote(cluster *db.Cluster, poolID int64, volumeName strin
 			return nil, err
 		}
 
-		if driver == "ceph" {
+		if driver == "ceph" || driver == "cephfs" {
 			return nil, nil
 		}
 


More information about the lxc-devel mailing list