[lxc-devel] [lxd/master] storage: bugfixes
brauner on Github
lxc-bot at linuxcontainers.org
Wed Feb 22 03:04:01 UTC 2017
A non-text attachment was scrubbed...
Name: not available
Type: text/x-mailbox
Size: 301 bytes
Desc: not available
URL: <http://lists.linuxcontainers.org/pipermail/lxc-devel/attachments/20170222/7760aeb5/attachment.bin>
-------------- next part --------------
From 005b5e1235919ad9972f3865476225f238c5d017 Mon Sep 17 00:00:00 2001
From: Christian Brauner <christian.brauner at ubuntu.com>
Date: Wed, 22 Feb 2017 02:54:14 +0100
Subject: [PATCH 1/6] storage-pools: leave some properties unset
- leave "source" property unset
- leave "lvm.vg_name" unset
Signed-off-by: Christian Brauner <christian.brauner at ubuntu.com>
---
lxd/storage_pools_config.go | 7 -------
1 file changed, 7 deletions(-)
diff --git a/lxd/storage_pools_config.go b/lxd/storage_pools_config.go
index 840ff89..a3931c3 100644
--- a/lxd/storage_pools_config.go
+++ b/lxd/storage_pools_config.go
@@ -50,8 +50,6 @@ func storagePoolValidateConfig(name string, driver string, config map[string]str
if config["source"] == "" {
if driver == "dir" {
config["source"] = filepath.Join(shared.VarPath("storage-pools"), name)
- } else if driver != "lvm" {
- config["source"] = filepath.Join(shared.VarPath("disks"), name)
}
}
@@ -149,11 +147,6 @@ func storagePoolFillDefault(name string, driver string, config map[string]string
}
if driver == "lvm" {
- if config["lvm.vg_name"] == "" {
- // Default is to set this to the pool name if empty.
- config["lvm.vg_name"] = name
- }
-
if config["lvm.thinpool_name"] == "" {
config["lvm.thinpool_name"] = "LXDThinpool"
}
From 8de82306b72d45c5dcf3f7ad471b7c36fef46cd1 Mon Sep 17 00:00:00 2001
From: Christian Brauner <christian.brauner at ubuntu.com>
Date: Wed, 22 Feb 2017 02:55:22 +0100
Subject: [PATCH 2/6] lvm: unified command layout
lxc storage create pool1 lvm
- disallowed (reserved for future loop file lvm pools)
lxc storage create pool1 lvm source=my-pool
- Use the existing volume group "my-pool"
lxc storage create pool1 lvm source=/dev/sdb
- Create new lvm pool named "pool1".
lxc storage create pool1 lvm source=/dev/sdb lvm.vg_name=random-string
- Create new lvm pool named "random-string".
Signed-off-by: Christian Brauner <christian.brauner at ubuntu.com>
---
lxd/storage_lvm.go | 135 +++++++++++++++++++++++++++--------------------------
1 file changed, 68 insertions(+), 67 deletions(-)
diff --git a/lxd/storage_lvm.go b/lxd/storage_lvm.go
index dfdfb30..4475b7a 100644
--- a/lxd/storage_lvm.go
+++ b/lxd/storage_lvm.go
@@ -238,23 +238,25 @@ func (s *storageLvm) StoragePoolInit(config map[string]interface{}) (storage, er
}
source := s.pool.Config["source"]
- s.vgName = s.pool.Config["lvm.vg_name"]
s.thinPoolName = s.pool.Config["lvm.thinpool_name"]
if source == "" {
- // Source property is empty, so the user wants us to reuse an
- // already existing volume group.
- s.log.Debug(fmt.Sprintf("Source property of this lvm storage pool empty: Checking that volume group \"lvm.vg_name=%s\" already exists.", s.vgName))
- ok, err := storageVGExists(s.vgName)
- if err != nil {
- // Internal error.
- return s, err
- } else if !ok {
- // Volume group does not exist.
- return s, fmt.Errorf("The \"source\" property of the storage pool is empty and the \"lvm.vg_name=%s\" volume group does not exist.", s.vgName)
+ return s, fmt.Errorf("Loop backed lvm storage pools are not supported.")
+ } else {
+ if filepath.IsAbs(source) {
+ if !shared.IsBlockdevPath(source) {
+ return s, fmt.Errorf("Loop backed lvm storage pools are not supported.")
+ }
+ } else {
+ ok, err := storageVGExists(source)
+ if err != nil {
+ // Internal error.
+ return s, err
+ } else if !ok {
+ // Volume group does not exist.
+ return s, fmt.Errorf("The requested volume group \"%s\" does not exist.", source)
+ }
+ s.vgName = s.pool.Config["lvm.vg_name"]
}
- s.log.Debug(fmt.Sprintf("Source property of this lvm storage pool empty: Detected that volume group \"lvm.vg_name=%s\" already exists.", s.vgName))
- } else if filepath.IsAbs(source) && !shared.IsBlockdevPath(source) {
- return s, fmt.Errorf("Loop backed lvm storage volumes are currently not supported.")
}
return s, nil
@@ -309,8 +311,6 @@ func (s *storageLvm) lvmVersionIsAtLeast(versionString string) (bool, error) {
func (s *storageLvm) StoragePoolCreate() error {
tryUndo := true
- source := s.pool.Config["source"]
-
// Create the mountpoint for the storage pool.
poolMntPoint := getStoragePoolMountPoint(s.pool.Name)
err := os.MkdirAll(poolMntPoint, 0711)
@@ -324,65 +324,66 @@ func (s *storageLvm) StoragePoolCreate() error {
}()
poolName := s.getOnDiskPoolName()
- if filepath.IsAbs(source) {
- // Check whether we have been given a block device.
- if !shared.IsBlockdevPath(source) {
- return fmt.Errorf("Loop backed lvm storage volumes are currently not supported.")
- }
+ source := s.pool.Config["source"]
+ if source == "" {
+ return fmt.Errorf("Loop backed lvm storage pools are not supported.")
+ } else {
+ if filepath.IsAbs(source) {
+ if !shared.IsBlockdevPath(source) {
+ return fmt.Errorf("Loop backed lvm storage pools are not supported.")
+ }
- // Check if the physical volume already exists.
- ok, err := storagePVExists(source)
- if err == nil && !ok {
- // Create a new lvm physical volume.
- output, err := exec.Command("pvcreate", source).CombinedOutput()
- if err != nil {
- return fmt.Errorf("Failed to create the physical volume for the lvm storage pool: %s.", output)
+ // Check if the physical volume already exists.
+ ok, err := storagePVExists(source)
+ if err == nil && !ok {
+ // Create a new lvm physical volume.
+ output, err := exec.Command("pvcreate", source).CombinedOutput()
+ if err != nil {
+ return fmt.Errorf("Failed to create the physical volume for the lvm storage pool: %s.", output)
+ }
+ defer func() {
+ if tryUndo {
+ exec.Command("pvremove", source).Run()
+ }
+ }()
}
- defer func() {
- if tryUndo {
- exec.Command("pvremove", source).Run()
+
+ // lxc storage create pool1 lvm source=/dev/sdX lvm.vg_name=random-string
+ poolName = s.pool.Config["lvm.vg_name"]
+ if poolName == "" {
+ // lxc storage create pool1 lvm source=/dev/sdX
+ poolName = s.pool.Name
+ }
+
+ // Set source to volume group name.
+ s.pool.Config["source"] = poolName
+
+ // Check if the volume group already exists.
+ ok, err = storageVGExists(poolName)
+ if err == nil && !ok {
+ // Create a volume group on the physical volume.
+ output, err := exec.Command("vgcreate", poolName, source).CombinedOutput()
+ if err != nil {
+ return fmt.Errorf("Failed to create the volume group for the lvm storage pool: %s.", output)
}
- }()
- }
+ }
+ s.pool.Config["lvm.vg_name"] = poolName
+ } else {
+ if s.pool.Config["lvm.vg_name"] != "" {
+ // User gave somethin weird.
+ return fmt.Errorf("Invalid request.")
+ }
- ok, err = storageVGExists(poolName)
- if err == nil && !ok {
- // Create a volume group on the physical volume.
- output, err := exec.Command("vgcreate", poolName, source).CombinedOutput()
+ ok, err := storageVGExists(source)
if err != nil {
- return fmt.Errorf("Failed to create the volume group for the lvm storage pool: %s.", output)
+ // Internal error.
+ return err
+ } else if !ok {
+ // Volume group does not exist.
+ return fmt.Errorf("The requested volume group \"%s\" does not exist.", source)
}
- }
- // Set source to volume group name.
- s.pool.Config["source"] = s.pool.Name
- } else {
- vgToCreateOrCheck := ""
- // The user wants us to reuse an already existing volume group.
- if source == "" || source == s.vgName && s.vgName != "" && s.thinPoolName != "" {
- // The user wants us to reuse an already existing volume
- // group.
- vgToCreateOrCheck = s.vgName
- s.pool.Config["source"] = s.vgName
- } else if source != "" {
- vgToCreateOrCheck = source
- // Set lvm.vg_name to the name of the source.
s.pool.Config["lvm.vg_name"] = source
- } else {
- // Shouldn't happen.
- return fmt.Errorf("Invalid LVM pool creation request: source=%s, lvm.vg_name=%s, lvm.thinpool_name=%s.",
- s.pool.Config["source"],
- s.pool.Config["lvm.vg_name"],
- s.pool.Config["lvm.thinpool_name"])
- }
-
- ok, err := storageVGExists(vgToCreateOrCheck)
- if err != nil {
- // Internal error.
- return err
- } else if !ok {
- // Volume group does not exist.
- return fmt.Errorf("The requested volume group \"%s\" does not exist.", vgToCreateOrCheck)
}
}
From dad01066b923ab561e9345827c01d5c45c906a24 Mon Sep 17 00:00:00 2001
From: Christian Brauner <christian.brauner at ubuntu.com>
Date: Wed, 22 Feb 2017 02:56:59 +0100
Subject: [PATCH 3/6] test: adapt to command line unification
Signed-off-by: Christian Brauner <christian.brauner at ubuntu.com>
---
test/suites/storage.sh | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/test/suites/storage.sh b/test/suites/storage.sh
index 6ef9579..ab247e7 100644
--- a/test/suites/storage.sh
+++ b/test/suites/storage.sh
@@ -73,7 +73,7 @@ test_storage() {
pvcreate "${loop_device_7}"
vgcreate "lxdtest-$(basename "${LXD_DIR}")-pool12-dummy_vg_3" "${loop_device_7}"
# Reuse existing volume group "dummy_vg_3" on existing physical volume.
- lxc storage create "lxdtest-$(basename "${LXD_DIR}")-pool12" lvm lvm.vg_name="lxdtest-$(basename "${LXD_DIR}")-pool12-dummy_vg_3"
+ lxc storage create "lxdtest-$(basename "${LXD_DIR}")-pool12" lvm source="${loop_device_7}" lvm.vg_name="lxdtest-$(basename "${LXD_DIR}")-pool12-dummy_vg_3"
configure_lvm_loop_device loop_file_8 loop_device_8
# shellcheck disable=SC2154
From 8229806bb31fdee636612a76b8805942f752f303 Mon Sep 17 00:00:00 2001
From: Christian Brauner <christian.brauner at ubuntu.com>
Date: Wed, 22 Feb 2017 03:40:33 +0100
Subject: [PATCH 4/6] zfs: unify command line
lxc storage create pool1 zfs
- Create zfs pool named "pool1".
lxc storage create pool1 zfs zfs.pool_name=random-string
- Create a loop for a pool named "pool1" with a zpool name of "random-string".
lxc storage create pool1 zfs source=my-pool
- Use the existing pool "my-pool".
lxc storage create pool1 zfs source=my-pool/dataset
- Use the existing dataset "my-pool/dataset"
lxc storage create pool1 zfs source=/dev/somedisk
- Create a new pool called "pool1" on "/dev/somedisk".
lxc storage create pool1 zfs source=/dev/somedisk zfs.pool_name=abc
- Create a new pool called on "abc" on "/dev/somedisk".
Signed-off-by: Christian Brauner <christian.brauner at ubuntu.com>
---
lxd/storage_pools_config.go | 6 -----
lxd/storage_zfs.go | 65 +++++++++++++++------------------------------
2 files changed, 22 insertions(+), 49 deletions(-)
diff --git a/lxd/storage_pools_config.go b/lxd/storage_pools_config.go
index a3931c3..d449b2a 100644
--- a/lxd/storage_pools_config.go
+++ b/lxd/storage_pools_config.go
@@ -140,12 +140,6 @@ func storagePoolFillDefault(name string, driver string, config map[string]string
config["size"] = strconv.FormatUint(uint64(size), 10)
}
- if driver == "zfs" {
- if val, ok := config["zfs.pool_name"]; !ok || val == "" {
- config["zfs.pool_name"] = name
- }
- }
-
if driver == "lvm" {
if config["lvm.thinpool_name"] == "" {
config["lvm.thinpool_name"] = "LXDThinpool"
diff --git a/lxd/storage_zfs.go b/lxd/storage_zfs.go
index 951381e..5df5c72 100644
--- a/lxd/storage_zfs.go
+++ b/lxd/storage_zfs.go
@@ -89,46 +89,16 @@ func (s *storageZfs) StoragePoolInit(config map[string]interface{}) (storage, er
}
// Detect whether we have been given a zfs dataset as source.
- vdev := s.pool.Config["source"]
- if vdev != "" {
- if !filepath.IsAbs(vdev) {
- s.log.Debug(fmt.Sprintf("Treating the source \"%s\" of this storage pool as a ZFS dataset.", vdev))
- s.dataset = vdev
- }
+ if s.pool.Config["zfs.pool_name"] != "" {
+ s.dataset = s.pool.Config["zfs.pool_name"]
}
return s, nil
}
func (s *storageZfs) StoragePoolCheck() error {
- poolName := s.getOnDiskPoolName()
- err := s.zfsPoolCheck(poolName)
- if err != nil {
- source := s.pool.Config["source"]
- if filepath.IsAbs(source) && shared.PathExists(source) {
- _ = loadModule("zfs")
-
- output, err := exec.Command("zpool", "import", source, poolName).CombinedOutput()
- if err != nil {
- return fmt.Errorf("Unable to import the ZFS pool: %s", output)
- }
- } else {
- return err
- }
- }
-
- output, err := exec.Command("zfs", "get", "mountpoint", "-H", "-o", "source", poolName).CombinedOutput()
- if err != nil {
- return fmt.Errorf("Unable to query ZFS mountpoint")
- }
-
- if strings.TrimSpace(string(output)) != "local" {
- err = shared.RunCommand("zfs", "set", "mountpoint=none", poolName)
- if err != nil {
- return err
- }
- }
-
+ // Make noop for now until we figure out something useful to do for all
+ // supported use cases.
return nil
}
@@ -593,7 +563,8 @@ func (s *storageZfs) ContainerDelete(container container) error {
if err != nil {
return err
}
- origin = strings.TrimPrefix(origin, fmt.Sprintf("%s/", s.pool.Name))
+ poolName := s.getOnDiskPoolName()
+ origin = strings.TrimPrefix(origin, fmt.Sprintf("%s/", poolName))
err = s.zfsPoolVolumeDestroy(fs)
if err != nil {
@@ -1341,7 +1312,13 @@ func (s *storageZfs) zfsPoolCreate() error {
vdev = filepath.Join(shared.VarPath("disks"), s.pool.Name)
}
+ zpoolName := s.pool.Name
if !filepath.IsAbs(vdev) {
+ s.dataset = vdev
+ if s.pool.Config["zfs.pool_name"] != "" {
+ return fmt.Errorf("Invalid request.")
+ }
+ s.pool.Config["zfs.pool_name"] = vdev
err := s.zfsPoolCheck(vdev)
// This is a pre-existing dataset.
if err == nil {
@@ -1368,6 +1345,10 @@ func (s *storageZfs) zfsPoolCreate() error {
}
}
} else {
+ if s.pool.Config["zfs.pool_name"] != "" {
+ zpoolName = s.pool.Config["zfs.pool_name"]
+ s.dataset = zpoolName
+ }
if !shared.IsBlockdevPath(vdev) {
vdev = vdev + ".img"
s.pool.Config["source"] = vdev
@@ -1399,12 +1380,8 @@ func (s *storageZfs) zfsPoolCreate() error {
// UUID in a multi-device pool for all devices.). The
// safest way is to just store the name of the zfs pool
// we create.
- s.pool.Config["source"] = s.pool.Name
- }
-
- zpoolName := s.pool.Config["zfs.pool_name"]
- if zpoolName == "" {
- zpoolName = s.pool.Name
+ s.pool.Config["source"] = zpoolName
+ s.dataset = zpoolName
}
output, err := exec.Command(
@@ -1614,7 +1591,8 @@ func (s *storageZfs) zfsPoolVolumeCleanup(path string) error {
if err != nil {
return err
}
- origin = strings.TrimPrefix(origin, fmt.Sprintf("%s/", s.pool.Name))
+ poolName := s.getOnDiskPoolName()
+ origin = strings.TrimPrefix(origin, fmt.Sprintf("%s/", poolName))
err = s.zfsPoolVolumeDestroy(path)
if err != nil {
@@ -1868,7 +1846,8 @@ func (s *storageZfs) zfsPoolListSubvolumes(path string) ([]string, error) {
continue
}
- children = append(children, strings.TrimPrefix(entry, fmt.Sprintf("%s/", s.pool.Name)))
+ poolName := s.getOnDiskPoolName()
+ children = append(children, strings.TrimPrefix(entry, fmt.Sprintf("%s/", poolName)))
}
return children, nil
From d155fd96782289c2a57b054b385c52d0c1f108da Mon Sep 17 00:00:00 2001
From: Christian Brauner <christian.brauner at ubuntu.com>
Date: Wed, 22 Feb 2017 03:50:04 +0100
Subject: [PATCH 5/6] btrfs: set loop file if "source" is empty
Signed-off-by: Christian Brauner <christian.brauner at ubuntu.com>
---
lxd/storage_btrfs.go | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/lxd/storage_btrfs.go b/lxd/storage_btrfs.go
index 8ba340a..f4d18a3 100644
--- a/lxd/storage_btrfs.go
+++ b/lxd/storage_btrfs.go
@@ -100,7 +100,8 @@ func (s *storageBtrfs) StoragePoolCheck() error {
func (s *storageBtrfs) StoragePoolCreate() error {
source := s.pool.Config["source"]
if source == "" {
- return fmt.Errorf("No \"source\" property found for the storage pool.")
+ source = filepath.Join(shared.VarPath("disks"), s.pool.Name)
+ s.pool.Config["source"] = source
}
if !filepath.IsAbs(source) {
From 7fd12c614995f93de918ec617f1f180ca229f725 Mon Sep 17 00:00:00 2001
From: Christian Brauner <christian.brauner at ubuntu.com>
Date: Wed, 22 Feb 2017 04:02:25 +0100
Subject: [PATCH 6/6] storage: ensure keys are set correctly
In case the source is not a loop image file, then for zfs and lvm, the keys
- zfs.pool_name
- lvm.vg_name
should be identical to the source.
Signed-off-by: Christian Brauner <christian.brauner at ubuntu.com>
---
lxd/patches.go | 42 ++++++++++++++++++++++++++++++++++++++++--
1 file changed, 40 insertions(+), 2 deletions(-)
diff --git a/lxd/patches.go b/lxd/patches.go
index 3db2f36..70a63f3 100644
--- a/lxd/patches.go
+++ b/lxd/patches.go
@@ -4,6 +4,7 @@ import (
"fmt"
"os"
"os/exec"
+ "path/filepath"
"strconv"
"strings"
"syscall"
@@ -37,7 +38,7 @@ var patches = []patch{
{name: "storage_api", run: patchStorageApi},
{name: "storage_api_v1", run: patchStorageApiV1},
{name: "storage_api_dir_cleanup", run: patchStorageApiDirCleanup},
- {name: "storage_api_lvm_keys", run: patchStorageApiLvmKeys},
+ {name: "storage_api_zfs_lvm_keys", run: patchStorageApiZfsLvmKeys},
}
type patch struct {
@@ -1403,7 +1404,7 @@ func patchStorageApiDirCleanup(name string, d *Daemon) error {
return nil
}
-func patchStorageApiLvmKeys(name string, d *Daemon) error {
+func patchStorageApiZfsLvmKeys(name string, d *Daemon) error {
_, err := dbExec(d.db, "UPDATE storage_pools_config SET key='lvm.thinpool_name' WHERE key='volume.lvm.thinpool_name';")
if err != nil {
return err
@@ -1414,5 +1415,42 @@ func patchStorageApiLvmKeys(name string, d *Daemon) error {
return err
}
+ pools, err := dbStoragePools(d.db)
+ if err != nil && err == NoSuchObjectError {
+ // No pool was configured in the previous update. So we're on a
+ // pristine LXD instance.
+ return nil
+ } else if err != nil {
+ // Database is screwed.
+ shared.LogErrorf("Failed to query database: %s", err)
+ return err
+ }
+
+ for _, poolName := range pools {
+ _, pool, err := dbStoragePoolGet(d.db, poolName)
+ if err != nil {
+ return err
+ }
+
+ if pool.Driver != "zfs" && pool.Driver != "lvm" {
+ continue
+ }
+
+ if !filepath.IsAbs(pool.Config["source"]) {
+ // Ensure that the source and the zfs.pool_name or
+ // lvm.vg_name are lined up. After creation of the pool
+ // they should never differ.
+ if pool.Driver == "zfs" {
+ pool.Config["zfs.pool_name"] = pool.Config["source"]
+ } else if pool.Driver == "lvm" {
+ pool.Config["lvm.vg_name"] = pool.Config["source"]
+ }
+ err := dbStoragePoolUpdate(d.db, poolName, pool.Config)
+ if err != nil {
+ return err
+ }
+ }
+ }
+
return nil
}
More information about the lxc-devel
mailing list