[lxc-devel] [lxd/stable-3.0] Backport ceph storage volume availability check

freeekanayaka on Github lxc-bot at linuxcontainers.org
Thu Nov 22 12:55:46 UTC 2018


A non-text attachment was scrubbed...
Name: not available
Type: text/x-mailbox
Size: 301 bytes
Desc: not available
URL: <http://lists.linuxcontainers.org/pipermail/lxc-devel/attachments/20181122/c19ab558/attachment.bin>
-------------- next part --------------
From 474f30acc9402df129cf1d6a8e4392af417f301f Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Thu, 22 Nov 2018 13:47:24 +0100
Subject: [PATCH 1/3] Add StorageVolumeIsAvailable to check if a Ceph volume
 can be attached

Backport of 7146e447367e6ca867f22b75efa77ceff4ef1103

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/db/containers.go        |  54 +++++++
 lxd/db/containers.mapper.go | 298 ++++++++++++++++++++++++++++++++++++
 lxd/db/profiles.go          |  76 +++++++++
 lxd/db/profiles.mapper.go   | 212 +++++++++++++++++++++++++
 lxd/db/storage_volumes.go   |  65 ++++++++
 5 files changed, 705 insertions(+)
 create mode 100644 lxd/db/containers.mapper.go
 create mode 100644 lxd/db/profiles.mapper.go

diff --git a/lxd/db/containers.go b/lxd/db/containers.go
index 0751e5cba9..5c68953214 100644
--- a/lxd/db/containers.go
+++ b/lxd/db/containers.go
@@ -9,6 +9,7 @@ import (
 	"github.com/lxc/lxd/lxd/db/query"
 	"github.com/lxc/lxd/lxd/types"
 	"github.com/lxc/lxd/shared"
+	"github.com/lxc/lxd/shared/api"
 	"github.com/lxc/lxd/shared/logger"
 	"github.com/pkg/errors"
 
@@ -1158,3 +1159,56 @@ WHERE storage_volumes.node_id=? AND storage_volumes.name=? AND storage_volumes.t
 
 	return poolName, nil
 }
+
+// Note: the code below was backported from the 3.x master branch, and it's
+//       mostly cut and paste from there to avoid re-inventing that logic.
+
+// Container is a value object holding db-related details about a container.
+type Container struct {
+	ID           int
+	Name         string `db:"primary=yes"`
+	Node         string `db:"join=nodes.name"`
+	Type         int
+	Architecture int
+	Ephemeral    bool
+	CreationDate time.Time
+	Stateful     bool
+	LastUseDate  time.Time
+	Description  string `db:"coalesce=''"`
+	Config       map[string]string
+	Devices      map[string]map[string]string
+	Profiles     []string
+}
+
+// ContainerListExpanded loads all containers expands their config and devices
+// using the profiles they are associated with.
+func (c *ClusterTx) ContainerListExpanded() ([]Container, error) {
+	containers, err := c.ContainerList()
+	if err != nil {
+		return nil, errors.Wrap(err, "Load containers")
+	}
+
+	profiles, err := c.ProfileList()
+	if err != nil {
+		return nil, errors.Wrap(err, "Load profiles")
+	}
+
+	// Index of all profiles by name.
+	profilesByName := map[string]Profile{}
+	for _, profile := range profiles {
+		profilesByName[profile.Name] = profile
+	}
+
+	for i, container := range containers {
+		profiles := make([]api.Profile, len(container.Profiles))
+		for j, name := range container.Profiles {
+			profile := profilesByName[name]
+			profiles[j] = *ProfileToAPI(&profile)
+		}
+
+		containers[i].Config = ProfilesExpandConfig(container.Config, profiles)
+		containers[i].Devices = ProfilesExpandDevices(container.Devices, profiles)
+	}
+
+	return containers, nil
+}
diff --git a/lxd/db/containers.mapper.go b/lxd/db/containers.mapper.go
new file mode 100644
index 0000000000..02893228a8
--- /dev/null
+++ b/lxd/db/containers.mapper.go
@@ -0,0 +1,298 @@
+// Note: the code below was backported from the 3.x master branch, and it's
+//       mostly cut and paste from there to avoid re-inventing that logic.
+
+package db
+
+// The code below was generated by lxd-generate - DO NOT EDIT!
+
+import (
+	"github.com/lxc/lxd/lxd/db/cluster"
+	"github.com/lxc/lxd/lxd/db/query"
+	"github.com/lxc/lxd/shared/api"
+	"github.com/pkg/errors"
+)
+
+var _ = api.ServerEnvironment{}
+
+var containerObjects = cluster.RegisterStmt(`
+SELECT containers.id, containers.name, nodes.name AS node, containers.type, containers.architecture, containers.ephemeral, containers.creation_date, containers.stateful, containers.last_use_date, coalesce(containers.description, '')
+  FROM containers JOIN nodes ON node_id = nodes.id
+  ORDER BY containers.name
+`)
+
+var containerProfilesRef = cluster.RegisterStmt(`
+SELECT nodes.name,
+       containers.name,
+       profiles.name
+FROM containers_profiles
+JOIN containers ON containers.id=containers_profiles.container_id
+JOIN profiles ON profiles.id=containers_profiles.profile_id
+JOIN nodes ON nodes.id=containers.node_id
+ORDER BY containers_profiles.apply_order;
+`)
+
+var containerConfigRef = cluster.RegisterStmt(`
+SELECT nodes.name,
+       containers.name,
+       containers_config.key,
+       containers_config.value
+FROM containers_config
+JOIN containers ON containers.id=containers_config.container_id
+JOIN nodes ON nodes.id=containers.node_id
+`)
+
+var containerDevicesRef = cluster.RegisterStmt(`
+SELECT nodes.name,
+       containers.name,
+       containers_devices.name,
+       containers_devices.type,
+       coalesce(containers_devices_config.key, ''),
+       coalesce(containers_devices_config.value, '')
+FROM containers_devices
+LEFT OUTER JOIN containers_devices_config
+  ON containers_devices_config.container_device_id=containers_devices.id
+JOIN containers ON containers.id=containers_devices.container_id
+JOIN nodes ON nodes.id=containers.node_id
+`)
+
+// ContainerList returns all available containers.
+func (c *ClusterTx) ContainerList() ([]Container, error) {
+	// Result slice.
+	objects := make([]Container, 0)
+
+	stmt := c.stmt(containerObjects)
+	args := []interface{}{}
+
+	// Dest function for scanning a row.
+	dest := func(i int) []interface{} {
+		objects = append(objects, Container{})
+		return []interface{}{
+			&objects[i].ID,
+			&objects[i].Name,
+			&objects[i].Node,
+			&objects[i].Type,
+			&objects[i].Architecture,
+			&objects[i].Ephemeral,
+			&objects[i].CreationDate,
+			&objects[i].Stateful,
+			&objects[i].LastUseDate,
+			&objects[i].Description,
+		}
+	}
+
+	// Select.
+	err := query.SelectObjects(stmt, dest, args...)
+	if err != nil {
+		return nil, errors.Wrap(err, "Failed to fetch containers")
+	}
+
+	// Fill field Config.
+	configObjects, err := c.ContainerConfigRef()
+	if err != nil {
+		return nil, errors.Wrap(err, "Failed to fetch field Config")
+	}
+
+	for i := range objects {
+		_, ok := configObjects[objects[i].Name]
+		if !ok {
+			configObjects[objects[i].Name] = map[string]string{}
+		}
+
+		value := configObjects[objects[i].Name]
+		if value == nil {
+			value = map[string]string{}
+		}
+		objects[i].Config = value
+	}
+
+	// Fill field Devices.
+	devicesObjects, err := c.ContainerDevicesRef()
+	if err != nil {
+		return nil, errors.Wrap(err, "Failed to fetch field Devices")
+	}
+
+	for i := range objects {
+		value := devicesObjects[objects[i].Name]
+		if value == nil {
+			value = map[string]map[string]string{}
+		}
+		objects[i].Devices = value
+	}
+
+	// Fill field Profiles.
+	profilesObjects, err := c.ContainerProfilesRef()
+	if err != nil {
+		return nil, errors.Wrap(err, "Failed to fetch field Profiles")
+	}
+
+	for i := range objects {
+		value := profilesObjects[objects[i].Name]
+		if value == nil {
+			value = []string{}
+		}
+		objects[i].Profiles = value
+	}
+
+	return objects, nil
+}
+
+// ContainerProfilesRef returns entities used by containers.
+func (c *ClusterTx) ContainerProfilesRef() (map[string][]string, error) {
+	// Result slice.
+	objects := make([]struct {
+		Name  string
+		Value string
+	}, 0)
+
+	stmt := c.stmt(containerProfilesRef)
+	args := []interface{}{}
+
+	// Dest function for scanning a row.
+	dest := func(i int) []interface{} {
+		objects = append(objects, struct {
+			Name  string
+			Value string
+		}{})
+		return []interface{}{
+			&objects[i].Name,
+			&objects[i].Value,
+		}
+	}
+
+	// Select.
+	err := query.SelectObjects(stmt, dest, args...)
+	if err != nil {
+		return nil, errors.Wrap(err, "Failed to fetch string ref for containers")
+	}
+
+	// Build index by primary name.
+	index := map[string][]string{}
+
+	for _, object := range objects {
+		item, ok := index[object.Name]
+		if !ok {
+			item = []string{}
+		}
+
+		index[object.Name] = append(item, object.Value)
+	}
+
+	return index, nil
+}
+
+// ContainerConfigRef returns entities used by containers.
+func (c *ClusterTx) ContainerConfigRef() (map[string]map[string]string, error) {
+	// Result slice.
+	objects := make([]struct {
+		Name  string
+		Key   string
+		Value string
+	}, 0)
+
+	stmt := c.stmt(containerConfigRef)
+	args := []interface{}{}
+
+	// Dest function for scanning a row.
+	dest := func(i int) []interface{} {
+		objects = append(objects, struct {
+			Name  string
+			Key   string
+			Value string
+		}{})
+		return []interface{}{
+			&objects[i].Name,
+			&objects[i].Key,
+			&objects[i].Value,
+		}
+	}
+
+	// Select.
+	err := query.SelectObjects(stmt, dest, args...)
+	if err != nil {
+		return nil, errors.Wrap(err, "Failed to fetch  ref for containers")
+	}
+
+	// Build index by primary name.
+	index := map[string]map[string]string{}
+
+	for _, object := range objects {
+		item, ok := index[object.Name]
+		if !ok {
+			item = map[string]string{}
+		}
+
+		index[object.Name] = item
+		item[object.Key] = object.Value
+	}
+
+	return index, nil
+}
+
+// ContainerDevicesRef returns entities used by containers.
+func (c *ClusterTx) ContainerDevicesRef() (map[string]map[string]map[string]string, error) {
+	// Result slice.
+	objects := make([]struct {
+		Name   string
+		Device string
+		Type   int
+		Key    string
+		Value  string
+	}, 0)
+
+	stmt := c.stmt(containerDevicesRef)
+	args := []interface{}{}
+
+	// Dest function for scanning a row.
+	dest := func(i int) []interface{} {
+		objects = append(objects, struct {
+			Name   string
+			Device string
+			Type   int
+			Key    string
+			Value  string
+		}{})
+		return []interface{}{
+			&objects[i].Name,
+			&objects[i].Device,
+			&objects[i].Type,
+			&objects[i].Key,
+			&objects[i].Value,
+		}
+	}
+
+	// Select.
+	err := query.SelectObjects(stmt, dest, args...)
+	if err != nil {
+		return nil, errors.Wrap(err, "Failed to fetch  ref for containers")
+	}
+
+	// Build index by primary name.
+	index := map[string]map[string]map[string]string{}
+
+	for _, object := range objects {
+		item, ok := index[object.Name]
+		if !ok {
+			item = map[string]map[string]string{}
+		}
+
+		index[object.Name] = item
+		config, ok := item[object.Device]
+		if !ok {
+			// First time we see this device, let's int the config
+			// and add the type.
+			deviceType, err := dbDeviceTypeToString(object.Type)
+			if err != nil {
+				return nil, errors.Wrapf(
+					err, "unexpected device type code '%d'", object.Type)
+			}
+			config = map[string]string{}
+			config["type"] = deviceType
+			item[object.Device] = config
+		}
+		if object.Key != "" {
+			config[object.Key] = object.Value
+		}
+	}
+
+	return index, nil
+}
diff --git a/lxd/db/profiles.go b/lxd/db/profiles.go
index b990b0b06f..0c7d5f7d43 100644
--- a/lxd/db/profiles.go
+++ b/lxd/db/profiles.go
@@ -270,3 +270,79 @@ DELETE FROM profiles_devices_config WHERE profile_device_id NOT IN (SELECT id FR
 
 	return nil
 }
+
+// Note: the code below was backported from the 3.x master branch, and it's
+//       mostly cut and paste from there to avoid re-inventing that logic.
+
+// Profile is a value object holding db-related details about a profile.
+type Profile struct {
+	ID          int
+	Name        string `db:"primary=yes"`
+	Description string `db:"coalesce=''"`
+	Config      map[string]string
+	Devices     map[string]map[string]string
+	UsedBy      []string
+}
+
+// ProfileToAPI is a convenience to convert a Profile db struct into
+// an API profile struct.
+func ProfileToAPI(profile *Profile) *api.Profile {
+	p := &api.Profile{
+		Name:   profile.Name,
+		UsedBy: profile.UsedBy,
+	}
+	p.Description = profile.Description
+	p.Config = profile.Config
+	p.Devices = profile.Devices
+
+	return p
+}
+
+// ProfilesExpandConfig expands the given container config with the config
+// values of the given profiles.
+func ProfilesExpandConfig(config map[string]string, profiles []api.Profile) map[string]string {
+	expandedConfig := map[string]string{}
+
+	// Apply all the profiles
+	profileConfigs := make([]map[string]string, len(profiles))
+	for i, profile := range profiles {
+		profileConfigs[i] = profile.Config
+	}
+
+	for i := range profileConfigs {
+		for k, v := range profileConfigs[i] {
+			expandedConfig[k] = v
+		}
+	}
+
+	// Stick the given config on top
+	for k, v := range config {
+		expandedConfig[k] = v
+	}
+
+	return expandedConfig
+}
+
+// ProfilesExpandDevices expands the given container devices with the devices
+// defined in the given profiles.
+func ProfilesExpandDevices(devices types.Devices, profiles []api.Profile) types.Devices {
+	expandedDevices := types.Devices{}
+
+	// Apply all the profiles
+	profileDevices := make([]types.Devices, len(profiles))
+	for i, profile := range profiles {
+		profileDevices[i] = profile.Devices
+	}
+	for i := range profileDevices {
+		for k, v := range profileDevices[i] {
+			expandedDevices[k] = v
+		}
+	}
+
+	// Stick the given devices on top
+	for k, v := range devices {
+		expandedDevices[k] = v
+	}
+
+	return expandedDevices
+}
diff --git a/lxd/db/profiles.mapper.go b/lxd/db/profiles.mapper.go
new file mode 100644
index 0000000000..3f3e3f2af9
--- /dev/null
+++ b/lxd/db/profiles.mapper.go
@@ -0,0 +1,212 @@
+// Note: the code below was backported from the 3.x master branch, and it's
+//       mostly cut and paste from there to avoid re-inventing that logic.
+
+package db
+
+// The code below was generated by lxd-generate - DO NOT EDIT!
+
+import (
+	"github.com/lxc/lxd/lxd/db/cluster"
+	"github.com/lxc/lxd/lxd/db/query"
+	"github.com/lxc/lxd/shared/api"
+	"github.com/pkg/errors"
+)
+
+var _ = api.ServerEnvironment{}
+
+var profileObjects = cluster.RegisterStmt(`
+SELECT profiles.id, profiles.name, coalesce(profiles.description, '')
+  FROM profiles ORDER BY profiles.name
+`)
+
+var profileConfigRef = cluster.RegisterStmt(`
+SELECT profiles.name,
+       profiles_config.key,
+       profiles_config.value
+FROM profiles_config
+JOIN profiles ON profiles.id=profiles_config.profile_id
+`)
+
+var profileDevicesRef = cluster.RegisterStmt(`
+SELECT profiles.name,
+       profiles_devices.name,
+       profiles_devices.type,
+       coalesce(profiles_devices_config.key, ''),
+       coalesce(profiles_devices_config.value, '')
+FROM profiles_devices
+LEFT OUTER JOIN profiles_devices_config
+   ON profiles_devices_config.profile_device_id=profiles_devices.id
+ JOIN profiles ON profiles.id=profiles_devices.profile_id
+`)
+
+// ProfileList returns all available profiles.
+func (c *ClusterTx) ProfileList() ([]Profile, error) {
+	// Result slice.
+	objects := make([]Profile, 0)
+
+	stmt := c.stmt(profileObjects)
+	args := []interface{}{}
+
+	// Dest function for scanning a row.
+	dest := func(i int) []interface{} {
+		objects = append(objects, Profile{})
+		return []interface{}{
+			&objects[i].ID,
+			&objects[i].Name,
+			&objects[i].Description,
+		}
+	}
+
+	// Select.
+	err := query.SelectObjects(stmt, dest, args...)
+	if err != nil {
+		return nil, errors.Wrap(err, "Failed to fetch profiles")
+	}
+
+	// Fill field Config.
+	configObjects, err := c.ProfileConfigRef()
+	if err != nil {
+		return nil, errors.Wrap(err, "Failed to fetch field Config")
+	}
+
+	for i := range objects {
+		value := configObjects[objects[i].Name]
+		if value == nil {
+			value = map[string]string{}
+		}
+		objects[i].Config = value
+	}
+
+	// Fill field Devices.
+	devicesObjects, err := c.ProfileDevicesRef()
+	if err != nil {
+		return nil, errors.Wrap(err, "Failed to fetch field Devices")
+	}
+
+	for i := range objects {
+		value := devicesObjects[objects[i].Name]
+		if value == nil {
+			value = map[string]map[string]string{}
+		}
+		objects[i].Devices = value
+	}
+
+	return objects, nil
+}
+
+// ProfileConfigRef returns entities used by profiles.
+func (c *ClusterTx) ProfileConfigRef() (map[string]map[string]string, error) {
+	// Result slice.
+	objects := make([]struct {
+		Name  string
+		Key   string
+		Value string
+	}, 0)
+
+	// Pick the prepared statement and arguments to use based on active criteria.
+	stmt := c.stmt(profileConfigRef)
+	args := []interface{}{}
+
+	// Dest function for scanning a row.
+	dest := func(i int) []interface{} {
+		objects = append(objects, struct {
+			Name  string
+			Key   string
+			Value string
+		}{})
+		return []interface{}{
+			&objects[i].Name,
+			&objects[i].Key,
+			&objects[i].Value,
+		}
+	}
+
+	// Select.
+	err := query.SelectObjects(stmt, dest, args...)
+	if err != nil {
+		return nil, errors.Wrap(err, "Failed to fetch  ref for profiles")
+	}
+
+	// Build index by primary name.
+	index := map[string]map[string]string{}
+
+	for _, object := range objects {
+		item, ok := index[object.Name]
+		if !ok {
+			item = map[string]string{}
+		}
+
+		item[object.Key] = object.Value
+	}
+
+	return index, nil
+}
+
+// ProfileDevicesRef returns entities used by profiles.
+func (c *ClusterTx) ProfileDevicesRef() (map[string]map[string]map[string]string, error) {
+	// Result slice.
+	objects := make([]struct {
+		Name   string
+		Device string
+		Type   int
+		Key    string
+		Value  string
+	}, 0)
+
+	stmt := c.stmt(profileDevicesRef)
+	args := []interface{}{}
+
+	// Dest function for scanning a row.
+	dest := func(i int) []interface{} {
+		objects = append(objects, struct {
+			Name   string
+			Device string
+			Type   int
+			Key    string
+			Value  string
+		}{})
+		return []interface{}{
+			&objects[i].Name,
+			&objects[i].Device,
+			&objects[i].Type,
+			&objects[i].Key,
+			&objects[i].Value,
+		}
+	}
+
+	// Select.
+	err := query.SelectObjects(stmt, dest, args...)
+	if err != nil {
+		return nil, errors.Wrap(err, "Failed to fetch  ref for profiles")
+	}
+
+	// Build index by primary name.
+	index := map[string]map[string]map[string]string{}
+
+	for _, object := range objects {
+		item, ok := index[object.Name]
+		if !ok {
+			item = map[string]map[string]string{}
+		}
+
+		index[object.Name] = item
+		config, ok := item[object.Device]
+		if !ok {
+			// First time we see this device, let's int the config
+			// and add the type.
+			deviceType, err := dbDeviceTypeToString(object.Type)
+			if err != nil {
+				return nil, errors.Wrapf(
+					err, "unexpected device type code '%d'", object.Type)
+			}
+			config = map[string]string{}
+			config["type"] = deviceType
+			item[object.Device] = config
+		}
+		if object.Key != "" {
+			config[object.Key] = object.Value
+		}
+	}
+
+	return index, nil
+}
diff --git a/lxd/db/storage_volumes.go b/lxd/db/storage_volumes.go
index ab670b842a..fe6bc9654c 100644
--- a/lxd/db/storage_volumes.go
+++ b/lxd/db/storage_volumes.go
@@ -6,6 +6,7 @@ import (
 	"sort"
 
 	"github.com/lxc/lxd/lxd/db/query"
+	"github.com/pkg/errors"
 )
 
 // StorageVolumeNodeAddresses returns the addresses of all nodes on which the
@@ -206,3 +207,67 @@ func (c *Cluster) StorageVolumeMoveToLVMThinPoolNameKey() error {
 
 	return nil
 }
+
+// StorageVolumeIsAvailable checks that if a custom volume available for being attached.
+//
+// Always return true for non-Ceph volumes.
+//
+// For Ceph volumes, return true if the volume is either not attached to any
+// other container, or attached to containers on this node.
+func (c *Cluster) StorageVolumeIsAvailable(pool, volume string) (bool, error) {
+	isAvailable := false
+
+	err := c.Transaction(func(tx *ClusterTx) error {
+		id, err := tx.StoragePoolID(pool)
+		if err != nil {
+			return errors.Wrapf(err, "Fetch storage pool ID for %q", pool)
+		}
+
+		driver, err := tx.StoragePoolDriver(id)
+		if err != nil {
+			return errors.Wrapf(err, "Fetch storage pool driver for %q", pool)
+		}
+
+		if driver != "ceph" {
+			isAvailable = true
+			return nil
+		}
+
+		node, err := tx.NodeName()
+		if err != nil {
+			return errors.Wrapf(err, "Fetch node name")
+		}
+
+		containers, err := tx.ContainerListExpanded()
+		if err != nil {
+			return errors.Wrapf(err, "Fetch containers")
+		}
+
+		for _, container := range containers {
+			for _, device := range container.Devices {
+				if device["type"] != "disk" {
+					continue
+				}
+				if device["pool"] != pool {
+					continue
+				}
+				if device["source"] != volume {
+					continue
+				}
+				if container.Node != node {
+					// This ceph volume is already attached
+					// to a container on a different node.
+					return nil
+				}
+			}
+		}
+		isAvailable = true
+
+		return nil
+	})
+	if err != nil {
+		return false, err
+	}
+
+	return isAvailable, nil
+}

From 2b7cebdfb6d5b2ac755364a77f3842709e16c47c Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Thu, 22 Nov 2018 13:50:25 +0100
Subject: [PATCH 2/3] Wire StorageVolumeIsAvailable to containerValidDevices

Backport of a6bd52e5432006ae2cdfbf70154e6ccbde548e20

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/container.go | 17 +++++++++++++++--
 1 file changed, 15 insertions(+), 2 deletions(-)

diff --git a/lxd/container.go b/lxd/container.go
index 2e17d54bf4..1c9d565501 100644
--- a/lxd/container.go
+++ b/lxd/container.go
@@ -311,7 +311,7 @@ func containerValidConfig(sysOS *sys.OS, config map[string]string, profile bool,
 	return nil
 }
 
-func containerValidDevices(db *db.Cluster, devices types.Devices, profile bool, expanded bool) error {
+func containerValidDevices(cluster *db.Cluster, devices types.Devices, profile bool, expanded bool) error {
 	// Empty device list
 	if devices == nil {
 		return nil
@@ -390,10 +390,23 @@ func containerValidDevices(db *db.Cluster, devices types.Devices, profile bool,
 					return fmt.Errorf("Storage volumes cannot be specified as absolute paths")
 				}
 
-				_, err := db.StoragePoolGetID(m["pool"])
+				_, err := cluster.StoragePoolGetID(m["pool"])
 				if err != nil {
 					return fmt.Errorf("The \"%s\" storage pool doesn't exist", m["pool"])
 				}
+
+				if !profile && expanded && m["source"] != "" && m["path"] != "/" {
+					isAvailable, err := cluster.StorageVolumeIsAvailable(
+						m["pool"], m["source"])
+					if err != nil {
+						return errors.Wrap(err, "Check if volume is available")
+					}
+					if !isAvailable {
+						return fmt.Errorf(
+							"Storage volume %q is already attached to a container "+
+								"on a different node", m["source"])
+					}
+				}
 			}
 
 		} else if shared.StringInSlice(m["type"], []string{"unix-char", "unix-block"}) {

From 3a06584978a17fbd27dfcbd9116495415dd8b5b7 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Thu, 22 Nov 2018 13:53:12 +0100
Subject: [PATCH 3/3] Add integration test

Backport of 0a6cf157d9df0a59c1111c632bb2fe744654b3b6

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 test/suites/clustering.sh | 16 ++++++++++++++++
 1 file changed, 16 insertions(+)

diff --git a/test/suites/clustering.sh b/test/suites/clustering.sh
index 9a9c1ba5f5..acaa4d993b 100644
--- a/test/suites/clustering.sh
+++ b/test/suites/clustering.sh
@@ -504,6 +504,22 @@ test_clustering_storage() {
     LXD_DIR="${LXD_ONE_DIR}" lxc cluster remove node3 --force
 
     LXD_DIR="${LXD_ONE_DIR}" lxc delete bar
+
+    # Attach a custom volume to a container on node1
+    LXD_DIR="${LXD_ONE_DIR}" lxc storage volume create pool1 v1
+    LXD_DIR="${LXD_ONE_DIR}" lxc init --target node1 -s pool1 testimage baz
+    LXD_DIR="${LXD_ONE_DIR}" lxc storage volume attach pool1 custom/v1 baz testDevice /opt
+
+    # Trying to attach a custom volume to a container on another node fails
+    LXD_DIR="${LXD_TWO_DIR}" lxc init --target node2 -s pool1 testimage buz
+    ! LXD_DIR="${LXD_TWO_DIR}" lxc storage volume attach pool1 custom/v1 buz testDevice /opt
+
+    LXD_DIR="${LXD_ONE_DIR}" lxc storage volume detach pool1 v1 baz
+
+    LXD_DIR="${LXD_ONE_DIR}" lxc storage volume delete pool1 v1
+    LXD_DIR="${LXD_ONE_DIR}" lxc delete baz
+    LXD_DIR="${LXD_ONE_DIR}" lxc delete buz
+
     LXD_DIR="${LXD_ONE_DIR}" lxc image delete testimage
   fi
 


More information about the lxc-devel mailing list