[lxc-devel] [lxd/master] Attach already provisioned ceph rbd/fs to containers
abbykrish on Github
lxc-bot at linuxcontainers.org
Tue Nov 12 22:44:34 UTC 2019
A non-text attachment was scrubbed...
Name: not available
Type: text/x-mailbox
Size: 662 bytes
Desc: not available
URL: <http://lists.linuxcontainers.org/pipermail/lxc-devel/attachments/20191112/4537592d/attachment.bin>
-------------- next part --------------
From 00b9bd707e2ca836019a2b33415f3e8dcb2bf3da Mon Sep 17 00:00:00 2001
From: Rishabh Thakkar <rishabh.thakkar at gmail.com>
Date: Tue, 29 Oct 2019 17:01:36 -0500
Subject: [PATCH 1/9] api: Add container_disk_ceph API extension
Signed-off-by: Rishabh Thakkar <rishabh.thakkar at gmail.com>
---
doc/api-extensions.md | 3 +++
shared/version/api.go | 1 +
2 files changed, 4 insertions(+)
diff --git a/doc/api-extensions.md b/doc/api-extensions.md
index e33d9ecced..1456c7e876 100644
--- a/doc/api-extensions.md
+++ b/doc/api-extensions.md
@@ -866,3 +866,6 @@ Adds the `security.syscalls.intercept.mount`,
`security.syscalls.intercept.mount.shift` configuration keys to control whether
and how the mount system call will be interecepted by LXD and processed with
elevated permissions.
+
+## container\_disk\_ceph
+This allows for existing a CEPH RDB or FS to be directly connected to a LXD container.
\ No newline at end of file
diff --git a/shared/version/api.go b/shared/version/api.go
index f6f64cb741..749fa2deef 100644
--- a/shared/version/api.go
+++ b/shared/version/api.go
@@ -173,6 +173,7 @@ var APIExtensions = []string{
"backup_compression_algorithm",
"ceph_data_pool_name",
"container_syscall_intercept_mount",
+ "container_disk_ceph",
}
// APIExtensionsCount returns the number of available API extensions.
From 0a2d7d443b65acee2a2b01aea45b3c3fe1a4cc1b Mon Sep 17 00:00:00 2001
From: Rishabh Thakkar <rishabh.thakkar at gmail.com>
Date: Thu, 31 Oct 2019 18:19:53 -0500
Subject: [PATCH 2/9] lxd: Updated device utils with functions that lift logic
from storage_cephfs and ceph_utils and perform the rbdMap, rbdUnmap, and
mountfs
Signed-off-by: Rishabh Thakkar <rishabh.thakkar at gmail.com>
---
lxd/device/device_utils_disk.go | 186 ++++++++++++++++++++++++++++++++
1 file changed, 186 insertions(+)
diff --git a/lxd/device/device_utils_disk.go b/lxd/device/device_utils_disk.go
index c6545869fd..faa2c98b3b 100644
--- a/lxd/device/device_utils_disk.go
+++ b/lxd/device/device_utils_disk.go
@@ -1,8 +1,16 @@
package device
import (
+ "bufio"
"fmt"
+ "github.com/lxc/lxd/lxd/db"
+ driver "github.com/lxc/lxd/lxd/storage"
+ "github.com/lxc/lxd/shared/logger"
+ "os"
+ "os/exec"
"strings"
+ "syscall"
+ "time"
"golang.org/x/sys/unix"
@@ -117,3 +125,181 @@ func DiskMount(srcPath string, dstPath string, readonly bool, recursive bool, pr
return nil
}
+
+func diskCephRbdMap(clusterName string, userName string, poolName string, volumeName string) (string, error) {
+ devPath, err := shared.RunCommand(
+ "rbd",
+ "--id", userName,
+ "--cluster", clusterName,
+ "--pool", poolName,
+ "map",
+ fmt.Sprintf("%s_%s", db.StoragePoolVolumeTypeNameCustom, volumeName))
+ if err != nil {
+ return "", err
+ }
+
+ idx := strings.Index(devPath, "/dev/rbd")
+ if idx < 0 {
+ return "", fmt.Errorf("Failed to detect mapped device path")
+ }
+
+ devPath = devPath[idx:]
+ return strings.TrimSpace(devPath), nil
+}
+
+func diskCephRbdUnmap(clusterName string, userName string, poolName string, deviceName string, unmapUntilEINVAL bool) error {
+ unmapImageName := fmt.Sprintf("%s_%s", db.StoragePoolVolumeTypeNameCustom, deviceName)
+
+ busyCount := 0
+
+again:
+ _, err := shared.RunCommand(
+ "rbd",
+ "--id", userName,
+ "--cluster", clusterName,
+ "--pool", poolName,
+ "unmap",
+ unmapImageName)
+ if err != nil {
+ runError, ok := err.(shared.RunError)
+ if ok {
+ exitError, ok := runError.Err.(*exec.ExitError)
+ if ok {
+ waitStatus := exitError.Sys().(syscall.WaitStatus)
+ if waitStatus.ExitStatus() == 22 {
+ // EINVAL (already unmapped)
+ return nil
+ }
+
+ if waitStatus.ExitStatus() == 16 {
+ // EBUSY (currently in use)
+ busyCount++
+ if busyCount == 10 {
+ return err
+ }
+
+ // Wait a second an try again
+ time.Sleep(time.Second)
+ goto again
+ }
+ }
+ }
+
+ return err
+ }
+
+ if unmapUntilEINVAL {
+ goto again
+ }
+
+ return nil
+}
+
+func cephFsConfig(clusterName string, userName string) ([]string, string, error) {
+ // Parse the CEPH configuration
+ cephConf, err := os.Open(fmt.Sprintf("/etc/ceph/%s.conf", clusterName))
+ if err != nil {
+ return nil, "", err
+ }
+
+ cephMon := []string{}
+
+ scan := bufio.NewScanner(cephConf)
+ for scan.Scan() {
+ line := scan.Text()
+ line = strings.TrimSpace(line)
+
+ if line == "" {
+ continue
+ }
+
+ if strings.HasPrefix(line, "mon_host") {
+ fields := strings.SplitN(line, "=", 2)
+ if len(fields) < 2 {
+ continue
+ }
+
+ servers := strings.Split(fields[1], ",")
+ for _, server := range servers {
+ cephMon = append(cephMon, strings.TrimSpace(server))
+ }
+ break
+ }
+ }
+
+ if len(cephMon) == 0 {
+ return nil, "", fmt.Errorf("Couldn't find a CPEH mon")
+ }
+
+ // Parse the CEPH keyring
+ cephKeyring, err := os.Open(fmt.Sprintf("/etc/ceph/%v.client.%v.keyring", clusterName, userName))
+ if err != nil {
+ return nil, "", err
+ }
+
+ var cephSecret string
+
+ scan = bufio.NewScanner(cephKeyring)
+ for scan.Scan() {
+ line := scan.Text()
+ line = strings.TrimSpace(line)
+
+ if line == "" {
+ continue
+ }
+
+ if strings.HasPrefix(line, "key") {
+ fields := strings.SplitN(line, "=", 2)
+ if len(fields) < 2 {
+ continue
+ }
+
+ cephSecret = strings.TrimSpace(fields[1])
+ break
+ }
+ }
+
+ if cephSecret == "" {
+ return nil, "", fmt.Errorf("Couldn't find a keyring entry")
+ }
+
+ return cephMon, cephSecret, nil
+}
+
+func diskCephfsMount(clusterName string, userName string, fsName string, path string) error {
+ logger.Debugf("Mounting CEPHFS ")
+ // Parse the namespace / path
+ fields := strings.SplitN(fsName, "/", 2)
+ fsName = fields[0]
+ fsPath := "/"
+ if len(fields) > 1 {
+ fsPath = fields[1]
+ }
+
+ // Get the credentials and host
+ monAddresses, secret, err := cephFsConfig(clusterName, userName)
+ if err != nil {
+ return err
+ }
+
+ // Do the actual mount
+ connected := false
+ for _, monAddress := range monAddresses {
+ uri := fmt.Sprintf("%s:6789:/%s", monAddress, fsPath)
+ err = driver.TryMount(uri, path, "ceph", 0, fmt.Sprintf("name=%v,secret=%v,mds_namespace=%v", userName, secret, fsName))
+ if err != nil {
+ continue
+ }
+
+ connected = true
+ break
+ }
+
+ if !connected {
+ return err
+ }
+
+ logger.Debugf("Mounted CEPHFS")
+
+ return nil
+}
From 527f191ebe1afe5cd5d65e9ab1d123967cd37e0d Mon Sep 17 00:00:00 2001
From: anusha-paul <anusha.paul at utexas.edu>
Date: Fri, 1 Nov 2019 17:23:12 -0500
Subject: [PATCH 3/9] figured out where to call methods to mount ceph and
cephfs and where to unmap
---
lxd/device/disk.go | 89 ++++++++++++++++++++++++++++------------------
1 file changed, 55 insertions(+), 34 deletions(-)
diff --git a/lxd/device/disk.go b/lxd/device/disk.go
index 853598006b..e5d4aa5151 100644
--- a/lxd/device/disk.go
+++ b/lxd/device/disk.go
@@ -511,6 +511,12 @@ func (d *disk) createDevice() (string, error) {
isFile := false
if d.config["pool"] == "" {
isFile = !shared.IsDir(srcPath) && !IsBlockdev(srcPath)
+ //handle ceph case (anusha)
+ if (d.config['source'] == "cephfs") {
+ //filesystem mount
+ //diskCephfsMount(clusterName string, userName string, fsName string, path string) error
+ }
+
} else {
// Deal with mounting storage volumes created via the storage api. Extract the name
// of the storage volume that we are supposed to attach. We assume that the only
@@ -524,41 +530,51 @@ func (d *disk) createDevice() (string, error) {
return "", fmt.Errorf("When the \"pool\" property is set \"source\" must specify the name of a volume, not a path")
}
- volumeTypeName := ""
- volumeName := filepath.Clean(d.config["source"])
- slash := strings.Index(volumeName, "/")
- if (slash > 0) && (len(volumeName) > slash) {
- // Extract volume name.
- volumeName = d.config["source"][(slash + 1):]
- // Extract volume type.
- volumeTypeName = d.config["source"][:slash]
- }
-
- switch volumeTypeName {
- case db.StoragePoolVolumeTypeNameContainer:
- return "", fmt.Errorf("Using container storage volumes is not supported")
- case "":
- // We simply received the name of a storage volume.
- volumeTypeName = db.StoragePoolVolumeTypeNameCustom
- fallthrough
- case db.StoragePoolVolumeTypeNameCustom:
- srcPath = shared.VarPath("storage-pools", d.config["pool"], volumeTypeName, volumeName)
- case db.StoragePoolVolumeTypeNameImage:
- return "", fmt.Errorf("Using image storage volumes is not supported")
- default:
- return "", fmt.Errorf("Unknown storage type prefix \"%s\" found", volumeTypeName)
- }
-
- err := StorageVolumeMount(d.state, d.config["pool"], volumeName, volumeTypeName, d.instance)
- if err != nil {
- msg := fmt.Sprintf("Could not mount storage volume \"%s\" of type \"%s\" on storage pool \"%s\": %s.", volumeName, volumeTypeName, d.config["pool"], err)
- if !isRequired {
- // Will fail the PathExists test below.
- logger.Warn(msg)
- } else {
- return "", fmt.Errorf(msg)
+ if (d.config["source"] == "ceph") {
+ // get pool name, volume name, ceph.user_name, and ceph.cluster_name from d.config and make call to map
+ // after call to map, save the src path it returned in variable src_path
+ // d.volatileSet(map[string]string{"ceph_rbd_src_path": src_path})
+ //diskCephRbdMap(clusterName string, userName string, poolName string, volumeName string) (string, error)
+ }
+ else {
+ volumeTypeName := ""
+ volumeName := filepath.Clean(d.config["source"])
+ slash := strings.Index(volumeName, "/")
+ if (slash > 0) && (len(volumeName) > slash) {
+ // Extract volume name.
+ volumeName = d.config["source"][(slash + 1):]
+ // Extract volume type.
+ volumeTypeName = d.config["source"][:slash]
+ }
+
+ switch volumeTypeName {
+ case db.StoragePoolVolumeTypeNameContainer:
+ return "", fmt.Errorf("Using container storage volumes is not supported")
+ case "":
+ // We simply received the name of a storage volume.
+ volumeTypeName = db.StoragePoolVolumeTypeNameCustom
+ fallthrough
+ case db.StoragePoolVolumeTypeNameCustom:
+ srcPath = shared.VarPath("storage-pools", d.config["pool"], volumeTypeName, volumeName)
+ case db.StoragePoolVolumeTypeNameImage:
+ return "", fmt.Errorf("Using image storage volumes is not supported")
+ default:
+ return "", fmt.Errorf("Unknown storage type prefix \"%s\" found", volumeTypeName)
+ }
+
+ err := StorageVolumeMount(d.state, d.config["pool"], volumeName, volumeTypeName, d.instance)
+ if err != nil {
+ msg := fmt.Sprintf("Could not mount storage volume \"%s\" of type \"%s\" on storage pool \"%s\": %s.", volumeName, volumeTypeName, d.config["pool"], err)
+ if !isRequired {
+ // Will fail the PathExists test below.
+ logger.Warn(msg)
+ } else {
+ return "", fmt.Errorf(msg)
+ }
}
}
+
+
}
// Check if the source exists.
@@ -640,7 +656,12 @@ func (d *disk) postStop() error {
if err != nil {
return err
}
-
+ }
+ if d.config["source"] == "ceph" {
+ //unmap rbd storage from path
+ //get the map with v := d.volatileGet
+ //get the actual path with v[cepth_rbd_src_path]
+ //diskCephRbdUnmap(deviceName string) error
}
devPath := d.getDevicePath(d.name, d.config)
From 6cae22802cbf198d1182b7165da9c678aae4d314 Mon Sep 17 00:00:00 2001
From: Rishabh Thakkar <rishabh.thakkar at gmail.com>
Date: Tue, 5 Nov 2019 17:58:37 -0600
Subject: [PATCH 4/9] lxd: Added calls to util functions created that un/map
RBD and mount CephFS
Signed-off-by: Rishabh Thakkar <rishabh.thakkar at gmail.com>
---
lxd/device/device_utils_disk.go | 14 ++------
lxd/device/disk.go | 59 +++++++++++++++++++++++++--------
2 files changed, 47 insertions(+), 26 deletions(-)
diff --git a/lxd/device/device_utils_disk.go b/lxd/device/device_utils_disk.go
index faa2c98b3b..ef5e48acb6 100644
--- a/lxd/device/device_utils_disk.go
+++ b/lxd/device/device_utils_disk.go
@@ -147,17 +147,12 @@ func diskCephRbdMap(clusterName string, userName string, poolName string, volume
return strings.TrimSpace(devPath), nil
}
-func diskCephRbdUnmap(clusterName string, userName string, poolName string, deviceName string, unmapUntilEINVAL bool) error {
+func diskCephRbdUnmap(deviceName string) error {
unmapImageName := fmt.Sprintf("%s_%s", db.StoragePoolVolumeTypeNameCustom, deviceName)
-
busyCount := 0
-
again:
_, err := shared.RunCommand(
"rbd",
- "--id", userName,
- "--cluster", clusterName,
- "--pool", poolName,
"unmap",
unmapImageName)
if err != nil {
@@ -187,12 +182,7 @@ again:
return err
}
-
- if unmapUntilEINVAL {
- goto again
- }
-
- return nil
+ goto again
}
func cephFsConfig(clusterName string, userName string) ([]string, string, error) {
diff --git a/lxd/device/disk.go b/lxd/device/disk.go
index e5d4aa5151..e2586c519a 100644
--- a/lxd/device/disk.go
+++ b/lxd/device/disk.go
@@ -512,11 +512,47 @@ func (d *disk) createDevice() (string, error) {
if d.config["pool"] == "" {
isFile = !shared.IsDir(srcPath) && !IsBlockdev(srcPath)
//handle ceph case (anusha)
- if (d.config['source'] == "cephfs") {
+ if strings.HasPrefix(d.config["source"], "cephfs") {
//filesystem mount
- //diskCephfsMount(clusterName string, userName string, fsName string, path string) error
+ fields := strings.SplitN(d.config["source"], ":", 2)
+ fsName := fields[1]
+ userName := d.config["ceph.user_name"]
+ clusterName := d.config["ceph.cluster_name"]
+ path := d.config["path"]
+ err := diskCephfsMount(clusterName, userName, fsName, path)
+ if err != nil {
+ msg := fmt.Sprintf("Could not mount Ceph FS: %s.", err)
+ if !isRequired {
+ // Will fail the PathExists test below.
+ logger.Warn(msg)
+ } else {
+ return "", fmt.Errorf(msg)
+ }
+ }
+ } else if strings.HasPrefix(d.config["source"], "ceph") {
+ // get pool name, volume name, ceph.user_name, and ceph.cluster_name from d.config and make call to map
+ // after call to map, save the src path it returned in variable src_path
+ fields := strings.SplitN(d.config["source"], ":", 2)
+ fields = strings.SplitN(fields[1], "/", 2)
+ poolName := fields[0]
+ volumeName := fields[1]
+ userName := d.config["ceph.user_name"]
+ clusterName := d.config["ceph.cluster_name"]
+ src_path, err := diskCephRbdMap(clusterName, userName, poolName, volumeName)
+ if err != nil {
+ msg := fmt.Sprintf("Could not mount map Ceph RBD: %s.", err)
+ if !isRequired {
+ // Will fail the PathExists test below.
+ logger.Warn(msg)
+ } else {
+ return "", fmt.Errorf(msg)
+ }
+ }
+ err = d.volatileSet(map[string]string{"ceph_rbd_src_path": src_path})
+ if err != nil {
+ return "", err
+ }
}
-
} else {
// Deal with mounting storage volumes created via the storage api. Extract the name
// of the storage volume that we are supposed to attach. We assume that the only
@@ -528,15 +564,7 @@ func (d *disk) createDevice() (string, error) {
if filepath.IsAbs(d.config["source"]) {
return "", fmt.Errorf("When the \"pool\" property is set \"source\" must specify the name of a volume, not a path")
- }
-
- if (d.config["source"] == "ceph") {
- // get pool name, volume name, ceph.user_name, and ceph.cluster_name from d.config and make call to map
- // after call to map, save the src path it returned in variable src_path
- // d.volatileSet(map[string]string{"ceph_rbd_src_path": src_path})
- //diskCephRbdMap(clusterName string, userName string, poolName string, volumeName string) (string, error)
- }
- else {
+ } else {
volumeTypeName := ""
volumeName := filepath.Clean(d.config["source"])
slash := strings.Index(volumeName, "/")
@@ -574,7 +602,6 @@ func (d *disk) createDevice() (string, error) {
}
}
-
}
// Check if the source exists.
@@ -661,7 +688,11 @@ func (d *disk) postStop() error {
//unmap rbd storage from path
//get the map with v := d.volatileGet
//get the actual path with v[cepth_rbd_src_path]
- //diskCephRbdUnmap(deviceName string) error
+ v := d.volatileGet()
+ err := diskCephRbdUnmap(v["ceph_rbd_src_path"])
+ if err != nil {
+ return err
+ }
}
devPath := d.getDevicePath(d.name, d.config)
From 7fee9c564bb6fab9fe7b5ea298769144e36a7541 Mon Sep 17 00:00:00 2001
From: Rishabh Thakkar <rishabh.thakkar at gmail.com>
Date: Tue, 5 Nov 2019 22:32:57 -0600
Subject: [PATCH 5/9] test: Added tests for implemented functions
Signed-off-by: Rishabh Thakkar <rishabh.thakkar at gmail.com>
---
lxd/device/disk.go | 3 +-
test/suites/container_devices_disk.sh | 66 +++++++++++++++++++++++++++
2 files changed, 67 insertions(+), 2 deletions(-)
diff --git a/lxd/device/disk.go b/lxd/device/disk.go
index e2586c519a..dbd02bdb08 100644
--- a/lxd/device/disk.go
+++ b/lxd/device/disk.go
@@ -511,7 +511,6 @@ func (d *disk) createDevice() (string, error) {
isFile := false
if d.config["pool"] == "" {
isFile = !shared.IsDir(srcPath) && !IsBlockdev(srcPath)
- //handle ceph case (anusha)
if strings.HasPrefix(d.config["source"], "cephfs") {
//filesystem mount
fields := strings.SplitN(d.config["source"], ":", 2)
@@ -684,7 +683,7 @@ func (d *disk) postStop() error {
return err
}
}
- if d.config["source"] == "ceph" {
+ if strings.HasPrefix(d.config["source"], "ceph") {
//unmap rbd storage from path
//get the map with v := d.volatileGet
//get the actual path with v[cepth_rbd_src_path]
diff --git a/test/suites/container_devices_disk.sh b/test/suites/container_devices_disk.sh
index 91ef518b0f..cf27ba6d2e 100644
--- a/test/suites/container_devices_disk.sh
+++ b/test/suites/container_devices_disk.sh
@@ -59,3 +59,69 @@ test_container_devices_disk_shift() {
lxc storage volume delete "${POOL}" foo-shift
lxc stop foo -f
}
+#- Add a new "test_container_devices_disk_ceph" function
+#- Get storage backend with: lxd_backend=$(storage_backend "$LXD_DIR")
+#- If lxd_backend isn't ceph, return from the function
+#- If it is ceph, then create a temporary rbd pool name, something like "lxdtest-$(basename "${LXD_DIR}")-disk" would do the trick
+#- Create a pool with "ceph osd pool create $RBD_POOL_NAME 1"
+#- Create the rbd volume with "rbd create --pool $RBD_POOL_NAME blah 50MB"
+#- Map the rbd volume with "rbd map --pool $RBD_POOL_NAME blah"
+#- Create a filesystem on it with "mkfs.ext4"
+#- Unmap the volume with "rbd unmap /dev/rbdX"
+#- Create a privileged container (easiest) with "lxc launch testimage ceph-disk -c security.privileged=true"
+#- Attach the volume to the container with "lxc config device add ceph-disk rbd disk source=ceph:$RBD_POOL_NAME/blah ceph.user_name=admin ceph.cluster_name=ceph path=/ceph"
+#- Confirm that it's visible in the container with something like "lxc exec ceph-disk -- stat /ceph/lost+found"
+#- Restart the container to validate it works on startup "lxc restart ceph-disk"
+#- Confirm that it's visible in the container with something like "lxc exec cephfs-disk -- stat /cephfs"
+#- Delete the container "lxc delete -f ceph-disk"
+#- Add a new "test_container_devices_disk_cephfs" function
+#- Get storage backend with: lxd_backend=$(storage_backend "$LXD_DIR")
+#- If lxd_backend isn't ceph, return from the function
+#- If LXD_CEPH_CEPHFS is empty, return from the function
+#- Create a privileged container (easiest) with "lxc launch testimage ceph-fs -c security.privileged=true"
+#- Attach the volume to the container with "lxc config device add ceph-fs fs disk source=cephfs:$LXD_CEPH_CEPHFS/ ceph.user_name=admin ceph.cluster_name=ceph path=/cephfs"
+#- Confirm that it's visible in the container with something like "lxc exec cephfs-disk -- stat /cephfs"
+#- Restart the container to validate it works on startup "lxc restart cephfs-disk"
+#- Confirm that it's visible in the container with something like "lxc exec cephfs-disk -- stat /cephfs"
+#- Delete the container "lxc delete -f cephfs-disk"
+#- Add both functions to test_container_devices_disk
+
+test_container_devices_disk_ceph() {
+ local LXD_BACKEND
+
+ LXD_BACKEND=$(storage_backend "$LXD_DIR")
+ if ! [ "${LXD_BACKEND}" = "ceph" ]; then
+ return
+ fi
+ RBD_POOL_NAME=lxdtest-$(basename "${LXD_DIR}")-disk
+ ceph osd pool create $RBD_POOL_NAME 1
+ rbd create --pool $RBD_POOL_NAME --size 50MB
+ rbd map --pool $RBD_POOL_NAME --name admin
+ RBD_POOL_PATH="/dev/rbd/${RBD_POOL_NAME}"
+ mkfs.ext4 -m0 $RBD_POOL_PATH
+ rbd unmap $RBD_POOL_PATH
+ lxc launch testimage ceph-disk -c security.privileged=true
+ lxc config device add ceph-disk rbd disk source=ceph:$RBD_POOL_NAME/my-volume ceph.user_name=admin ceph.cluster_name=ceph path=/ceph
+ lxc exec ceph-disk -- stat /ceph/lost+found
+ lxc restart ceph-disk
+ lxc exec cephfs-disk -- stat /cephfs
+ lxc delete -f ceph-disklxc delete -f ceph-disk
+}
+
+test_container_devices_disk_cephfs() {
+ local LXD_BACKEND
+
+ LXD_BACKEND=$(storage_backend "$LXD_DIR")
+ if ! [ "${LXD_BACKEND}" = "ceph" ]|| [ -z "${LXD_CEPH_CEPHFS:-}" ]; then
+ return
+ fi
+# ceph osd pool create cephfs_data
+# ceph osd pool create cephfs_metadata
+# ceph fs new $LXD_CEPH_CEPHFS cephfs_metadata cephfs_data
+ lxc launch testimage ceph-fs -c security.privileged=true
+ lxc config device add ceph-fs fs disk source=cephfs:$LXD_CEPH_CEPHFS/ ceph.user_name=admin ceph.cluster_name=ceph path=/cephfs
+ lxc exec cephfs-disk -- stat /cephfs
+ lxc restart cephfs-disk
+ lxc exec cephfs-disk -- stat /cephfs
+ lxc delete -f cephfs-disk
+}
\ No newline at end of file
From f7e98a3b7a262536153359ce39dd165b101e8703 Mon Sep 17 00:00:00 2001
From: Rishabh Thakkar <rishabh.thakkar at gmail.com>
Date: Mon, 11 Nov 2019 09:47:41 -0600
Subject: [PATCH 6/9] test: Added calls to tests for implemented functions
Signed-off-by: Rishabh Thakkar <rishabh.thakkar at gmail.com>
---
test/suites/container_devices_disk.sh | 2 ++
1 file changed, 2 insertions(+)
diff --git a/test/suites/container_devices_disk.sh b/test/suites/container_devices_disk.sh
index cf27ba6d2e..9d7a2650dd 100644
--- a/test/suites/container_devices_disk.sh
+++ b/test/suites/container_devices_disk.sh
@@ -5,6 +5,8 @@ test_container_devices_disk() {
lxc launch testimage foo
test_container_devices_disk_shift
+ test_container_devices_disk_ceph
+ test_container_devices_disk_cephfs
lxc delete -f foo
}
From a477fbb589705b1551ecb7d16155364dd21c0f89 Mon Sep 17 00:00:00 2001
From: Rishabh Thakkar <rishabh.thakkar at gmail.com>
Date: Mon, 11 Nov 2019 09:56:36 -0600
Subject: [PATCH 7/9] test: Updated test script to correct image names
Signed-off-by: Rishabh Thakkar <rishabh.thakkar at gmail.com>
---
test/suites/container_devices_disk.sh | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/test/suites/container_devices_disk.sh b/test/suites/container_devices_disk.sh
index 9d7a2650dd..ab86e86438 100644
--- a/test/suites/container_devices_disk.sh
+++ b/test/suites/container_devices_disk.sh
@@ -122,8 +122,8 @@ test_container_devices_disk_cephfs() {
# ceph fs new $LXD_CEPH_CEPHFS cephfs_metadata cephfs_data
lxc launch testimage ceph-fs -c security.privileged=true
lxc config device add ceph-fs fs disk source=cephfs:$LXD_CEPH_CEPHFS/ ceph.user_name=admin ceph.cluster_name=ceph path=/cephfs
- lxc exec cephfs-disk -- stat /cephfs
- lxc restart cephfs-disk
- lxc exec cephfs-disk -- stat /cephfs
- lxc delete -f cephfs-disk
+ lxc exec ceph-fs -- stat /cephfs
+ lxc restart ceph-fs
+ lxc exec ceph-fs -- stat /cephfs
+ lxc delete -f ceph-fs
}
\ No newline at end of file
From 2a309ee4257088621e87810845365a7ec88bfc7e Mon Sep 17 00:00:00 2001
From: Rishabh Thakkar <rishabh.thakkar at gmail.com>
Date: Mon, 11 Nov 2019 13:17:25 -0600
Subject: [PATCH 8/9] lxd: Added exceptions to device validation for cephfs/rbd
Signed-off-by: Rishabh Thakkar <rishabh.thakkar at gmail.com>
---
lxd/device/config/devices.go | 5 +++++
1 file changed, 5 insertions(+)
diff --git a/lxd/device/config/devices.go b/lxd/device/config/devices.go
index fc91d73218..0398a20015 100644
--- a/lxd/device/config/devices.go
+++ b/lxd/device/config/devices.go
@@ -3,6 +3,7 @@ package config
import (
"fmt"
"sort"
+ "strings"
)
// Device represents a LXD container device
@@ -47,6 +48,10 @@ func (device Device) Validate(rules map[string]func(value string) error) error {
continue
}
+ if (k == "ceph.cluster_name" || k == "ceph.user_name") && (strings.HasPrefix(device["source"], "ceph")) {
+ continue
+ }
+
return fmt.Errorf("Invalid device option: %s", k)
}
From c5a60a145b211d7bafb6df7e8bc78cd4c4d9f207 Mon Sep 17 00:00:00 2001
From: Rishabh Thakkar <rishabh.thakkar at gmail.com>
Date: Tue, 12 Nov 2019 15:33:57 -0600
Subject: [PATCH 9/9] test: remove comments
Signed-off-by: Rishabh Thakkar <rishabh.thakkar at gmail.com>
---
test/suites/container_devices_disk.sh | 26 --------------------------
1 file changed, 26 deletions(-)
diff --git a/test/suites/container_devices_disk.sh b/test/suites/container_devices_disk.sh
index ab86e86438..695a54a794 100644
--- a/test/suites/container_devices_disk.sh
+++ b/test/suites/container_devices_disk.sh
@@ -61,32 +61,6 @@ test_container_devices_disk_shift() {
lxc storage volume delete "${POOL}" foo-shift
lxc stop foo -f
}
-#- Add a new "test_container_devices_disk_ceph" function
-#- Get storage backend with: lxd_backend=$(storage_backend "$LXD_DIR")
-#- If lxd_backend isn't ceph, return from the function
-#- If it is ceph, then create a temporary rbd pool name, something like "lxdtest-$(basename "${LXD_DIR}")-disk" would do the trick
-#- Create a pool with "ceph osd pool create $RBD_POOL_NAME 1"
-#- Create the rbd volume with "rbd create --pool $RBD_POOL_NAME blah 50MB"
-#- Map the rbd volume with "rbd map --pool $RBD_POOL_NAME blah"
-#- Create a filesystem on it with "mkfs.ext4"
-#- Unmap the volume with "rbd unmap /dev/rbdX"
-#- Create a privileged container (easiest) with "lxc launch testimage ceph-disk -c security.privileged=true"
-#- Attach the volume to the container with "lxc config device add ceph-disk rbd disk source=ceph:$RBD_POOL_NAME/blah ceph.user_name=admin ceph.cluster_name=ceph path=/ceph"
-#- Confirm that it's visible in the container with something like "lxc exec ceph-disk -- stat /ceph/lost+found"
-#- Restart the container to validate it works on startup "lxc restart ceph-disk"
-#- Confirm that it's visible in the container with something like "lxc exec cephfs-disk -- stat /cephfs"
-#- Delete the container "lxc delete -f ceph-disk"
-#- Add a new "test_container_devices_disk_cephfs" function
-#- Get storage backend with: lxd_backend=$(storage_backend "$LXD_DIR")
-#- If lxd_backend isn't ceph, return from the function
-#- If LXD_CEPH_CEPHFS is empty, return from the function
-#- Create a privileged container (easiest) with "lxc launch testimage ceph-fs -c security.privileged=true"
-#- Attach the volume to the container with "lxc config device add ceph-fs fs disk source=cephfs:$LXD_CEPH_CEPHFS/ ceph.user_name=admin ceph.cluster_name=ceph path=/cephfs"
-#- Confirm that it's visible in the container with something like "lxc exec cephfs-disk -- stat /cephfs"
-#- Restart the container to validate it works on startup "lxc restart cephfs-disk"
-#- Confirm that it's visible in the container with something like "lxc exec cephfs-disk -- stat /cephfs"
-#- Delete the container "lxc delete -f cephfs-disk"
-#- Add both functions to test_container_devices_disk
test_container_devices_disk_ceph() {
local LXD_BACKEND
More information about the lxc-devel
mailing list