[lxc-devel] [lxd/master] Storage: LVM VM support

tomponline on Github lxc-bot at linuxcontainers.org
Mon Jan 13 17:41:20 UTC 2020


A non-text attachment was scrubbed...
Name: not available
Type: text/x-mailbox
Size: 315 bytes
Desc: not available
URL: <http://lists.linuxcontainers.org/pipermail/lxc-devel/attachments/20200113/b3fdc639/attachment-0001.bin>
-------------- next part --------------
From 37b4524a89cc5659b4c9f8dc9b2ef0aa77a9d6cf Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Wed, 8 Jan 2020 15:09:21 +0000
Subject: [PATCH 01/18] lxd/storage/utils: Removes default volume size from
 VolumeFillDefault

The new storage pkg handles reading this from either expanded instance root disk or from pool's default volume config using backendLXD.instanceRootVolumeConfig() and volume.ExpandedConfig()

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/storage/utils.go | 17 -----------------
 1 file changed, 17 deletions(-)

diff --git a/lxd/storage/utils.go b/lxd/storage/utils.go
index a2f2969542..ed367af146 100644
--- a/lxd/storage/utils.go
+++ b/lxd/storage/utils.go
@@ -457,23 +457,6 @@ func VolumeFillDefault(name string, config map[string]string, parentPool *api.St
 			// Unchangeable volume property: Set unconditionally.
 			config["block.mount_options"] = "discard"
 		}
-
-		// Does the pool request a default size for new storage volumes?
-		if config["size"] == "0" || config["size"] == "" {
-			config["size"] = parentPool.Config["volume.size"]
-		}
-		// Does the user explicitly request a default size for new
-		// storage volumes?
-		if config["size"] == "0" || config["size"] == "" {
-			config["size"] = "10GB"
-		}
-	} else if parentPool.Driver != "dir" {
-		if config["size"] != "" {
-			_, err := units.ParseByteSizeString(config["size"])
-			if err != nil {
-				return err
-			}
-		}
 	}
 
 	return nil

From 6be5db987172f24df17e5f07face2408aaac7279 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Thu, 9 Jan 2020 18:58:42 +0000
Subject: [PATCH 02/18] test/suites/storage: Updates LVM quota tests to take
 into account new SI units conversion

Before this SI units specified by user were incorrectly being supplied to LVM as binary units.

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 test/suites/storage.sh | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/test/suites/storage.sh b/test/suites/storage.sh
index ab5a4f28fb..587be00eac 100644
--- a/test/suites/storage.sh
+++ b/test/suites/storage.sh
@@ -773,7 +773,8 @@ test_storage() {
     rootMinKB1="17000"
     rootMaxKB1="20000"
 
-    QUOTA2="21MB"
+    # Increase quota enough to require a new 4MB LVM extent.
+    QUOTA2="25MB"
     rootMinKB2="21000"
     rootMaxKB2="23000"
   fi

From 9d438b5b9be9b42a5727f3a44b59ee6bbc1d29a2 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Fri, 10 Jan 2020 13:56:53 +0000
Subject: [PATCH 03/18] test/suites/backup: Fixes issue with import testing
 with LVM

BTRFS test removes the snapshot directory, but LVM test does not.

New storage driver uses directory presence as indication not to delete volume.

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 test/suites/backup.sh | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/test/suites/backup.sh b/test/suites/backup.sh
index 3ddac39ea3..ca10f82d8e 100644
--- a/test/suites/backup.sh
+++ b/test/suites/backup.sh
@@ -137,7 +137,7 @@ test_container_import() {
         ;;
       lvm)
         lvremove -f "lxdtest-$(basename "${LXD_DIR}")/containers_ctImport-snap0"
-        rm -f "${LXD_DIR}/snapshots/ctImport"
+        rm -rf "${LXD_DIR}/storage-pools/lxdtest-$(basename "${LXD_DIR}")/containers-snapshots/ctImport/snap0"
         ;;
       zfs)
         zfs destroy "lxdtest-$(basename "${LXD_DIR}")/containers/ctImport at snapshot-snap0"

From 37b8885be12148aae54c771f4fc18facb60789fe Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Tue, 7 Jan 2020 09:28:54 +0000
Subject: [PATCH 04/18] lxd/storage/drivers/load: Enables LVM driver

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/storage/drivers/load.go | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)

diff --git a/lxd/storage/drivers/load.go b/lxd/storage/drivers/load.go
index dcb8086659..eeca94a1be 100644
--- a/lxd/storage/drivers/load.go
+++ b/lxd/storage/drivers/load.go
@@ -6,9 +6,10 @@ import (
 )
 
 var drivers = map[string]func() driver{
-	"dir":    func() driver { return &dir{} },
-	"cephfs": func() driver { return &cephfs{} },
 	"btrfs":  func() driver { return &btrfs{} },
+	"cephfs": func() driver { return &cephfs{} },
+	"dir":    func() driver { return &dir{} },
+	"lvm":    func() driver { return &lvm{} },
 	"zfs":    func() driver { return &zfs{} },
 }
 

From e772d69920ffffb1fff5236381acb2ba36ae05f2 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Tue, 17 Dec 2019 11:22:34 +0000
Subject: [PATCH 05/18] lxd/storage/drivers/lvm: LVM driver implementation

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/storage/drivers/drivers_lvm.go         | 561 ++++++++++++++++
 lxd/storage/drivers/drivers_lvm_utils.go   | 693 +++++++++++++++++++
 lxd/storage/drivers/drivers_lvm_volumes.go | 732 +++++++++++++++++++++
 3 files changed, 1986 insertions(+)
 create mode 100644 lxd/storage/drivers/drivers_lvm.go
 create mode 100644 lxd/storage/drivers/drivers_lvm_utils.go
 create mode 100644 lxd/storage/drivers/drivers_lvm_volumes.go

diff --git a/lxd/storage/drivers/drivers_lvm.go b/lxd/storage/drivers/drivers_lvm.go
new file mode 100644
index 0000000000..d9abc978c1
--- /dev/null
+++ b/lxd/storage/drivers/drivers_lvm.go
@@ -0,0 +1,561 @@
+package drivers
+
+import (
+	"fmt"
+	"os"
+	"os/exec"
+	"path/filepath"
+	"strconv"
+	"strings"
+
+	"github.com/pkg/errors"
+
+	"github.com/lxc/lxd/lxd/operations"
+	"github.com/lxc/lxd/lxd/revert"
+	"github.com/lxc/lxd/shared"
+	"github.com/lxc/lxd/shared/api"
+	log "github.com/lxc/lxd/shared/log15"
+	"github.com/lxc/lxd/shared/units"
+)
+
+const lvmVgPoolMarker = "lxd_pool" // Indicator tag used to mark volume groups as in use by LXD.
+
+var lvmLoaded bool
+var lvmVersion string
+
+type lvm struct {
+	common
+}
+
+func (d *lvm) load() error {
+	if lvmLoaded {
+		return nil
+	}
+
+	// Validate the required binaries.
+	for _, tool := range []string{"lvm"} {
+		_, err := exec.LookPath(tool)
+		if err != nil {
+			return fmt.Errorf("Required tool '%s' is missing", tool)
+		}
+	}
+
+	// Detect and record the version.
+	if lvmVersion == "" {
+		output, err := shared.RunCommand("lvm", "version")
+		if err != nil {
+			return fmt.Errorf("Error getting LVM version: %v", err)
+		}
+
+		lines := strings.Split(output, "\n")
+		for idx, line := range lines {
+			fields := strings.SplitAfterN(line, ":", 2)
+			if len(fields) < 2 {
+				continue
+			}
+
+			if !strings.Contains(line, "version:") {
+				continue
+			}
+
+			if idx > 0 {
+				lvmVersion += " / "
+			}
+
+			lvmVersion += strings.TrimSpace(fields[1])
+		}
+	}
+
+	lvmLoaded = true
+	return nil
+}
+
+// Info returns info about the driver and its environment.
+func (d *lvm) Info() Info {
+	return Info{
+		Name:                  "lvm",
+		Version:               lvmVersion,
+		OptimizedImages:       d.usesThinpool(), // Only thinpool pools support optimized images.
+		PreservesInodes:       !d.state.OS.RunningInUserNS,
+		Remote:                false,
+		VolumeTypes:           []VolumeType{VolumeTypeCustom, VolumeTypeImage, VolumeTypeContainer},
+		BlockBacking:          true,
+		RunningQuotaResize:    false,
+		RunningSnapshotFreeze: false,
+	}
+}
+
+// Create creates the storage pool on the storage device.
+func (d *lvm) Create() error {
+	d.config["volatile.initial_source"] = d.config["source"]
+
+	defaultSource := filepath.Join(shared.VarPath("disks"), fmt.Sprintf("%s.img", d.name))
+	var err error
+	var pvExists, vgExists bool
+	var pvName string
+	var vgTags []string
+
+	revert := revert.New()
+	defer revert.Fail()
+
+	if d.config["source"] == "" || d.config["source"] == defaultSource {
+		// We are using an LXD internal loopback file.
+		d.config["source"] = defaultSource
+		if d.config["lvm.vg_name"] == "" {
+			d.config["lvm.vg_name"] = d.name
+		}
+
+		size, err := units.ParseByteSizeString(d.config["size"])
+		if err != nil {
+			return err
+		}
+
+		if shared.PathExists(d.config["source"]) {
+			return fmt.Errorf("Source file location already exists")
+		}
+
+		err = createSparseFile(d.config["source"], size)
+		if err != nil {
+			return fmt.Errorf("Failed to create sparse file %q: %s", d.config["source"], err)
+		}
+
+		revert.Add(func() { os.Remove(d.config["source"]) })
+
+		// Open the loop file.
+		loopFile, err := d.openLoopFile(d.config["source"])
+		if err != nil {
+			return err
+		}
+		defer loopFile.Close()
+
+		// Check if the physical volume already exists.
+		pvName = loopFile.Name()
+		pvExists, err = d.pysicalVolumeExists(pvName)
+		if err != nil {
+			return err
+		}
+
+		// Check if the volume group already exists.
+		vgExists, vgTags, err = d.volumeGroupExists(d.config["lvm.vg_name"])
+		if err != nil {
+			return err
+		}
+	} else if filepath.IsAbs(d.config["source"]) {
+		// We are using an existing physical device.
+		srcPath := shared.HostPath(d.config["source"])
+
+		// Size is ignored as the physical device is a fixed size.
+		d.config["size"] = ""
+
+		if d.config["lvm.vg_name"] == "" {
+			d.config["lvm.vg_name"] = d.name
+		}
+		d.config["source"] = d.config["lvm.vg_name"]
+
+		if !shared.IsBlockdevPath(srcPath) {
+			return fmt.Errorf("Custom loop file locations are not supported")
+		}
+
+		// Check if the volume group already exists.
+		vgExists, vgTags, err = d.volumeGroupExists(d.config["lvm.vg_name"])
+		if err != nil {
+			return err
+		}
+
+		if vgExists {
+			return fmt.Errorf("Volume group already exists, cannot use new physical device at %s", srcPath)
+		}
+
+		// Check if the physical volume already exists.
+		pvName = srcPath
+		pvExists, err = d.pysicalVolumeExists(pvName)
+		if err != nil {
+			return err
+		}
+	} else if d.config["source"] != "" {
+		// We are using an existing volume group, so physical must exist already.
+		pvExists = true
+
+		// Size is ignored as the existing device is a fixed size.
+		d.config["size"] = ""
+
+		if d.config["lvm.vg_name"] != "" && d.config["lvm.vg_name"] != d.config["source"] {
+			return fmt.Errorf("Invalid combination of \"source\" and \"lvm.vg_name\" property")
+		}
+
+		d.config["lvm.vg_name"] = d.config["source"]
+
+		// Check the volume group already exists.
+		vgExists, vgTags, err = d.volumeGroupExists(d.config["lvm.vg_name"])
+		if err != nil {
+			return err
+		}
+
+		if !vgExists {
+			return fmt.Errorf("The requested volume group \"%s\" does not exist", d.config["lvm.vg_name"])
+		}
+	} else {
+		return fmt.Errorf("Invalid \"source\" property")
+	}
+
+	// This is an internal error condition which should never be hit.
+	if d.config["lvm.vg_name"] == "" {
+		return fmt.Errorf("No name for volume group detected")
+	}
+
+	// Used to track the result of checking whether the thin pool exists during the existing volume group empty
+	// checks to avoid having to do it twice.
+	thinPoolExists := false
+
+	if vgExists {
+		// Check that the volume group is empty. Otherwise we will refuse to use it.
+		// The LV count returned includes both normal volumes and thin volumes.
+		lvCount, err := d.countLogicalVolumes(d.config["lvm.vg_name"])
+		if err != nil {
+			return fmt.Errorf("Failed to determine whether the volume group \"%s\" is empty: %v", d.config["lvm.vg_name"], err)
+		}
+
+		empty := false
+		if lvCount > 0 {
+			if d.usesThinpool() {
+				// Always check if the thin pool exists as we may need to create it later.
+				thinPoolExists, err = d.thinpoolExists(d.config["lvm.vg_name"], d.thinpoolName())
+				if err != nil {
+					return fmt.Errorf("Failed to determine whether thinpool \"%s\" exists in volume group \"%s\": %s", d.config["lvm.vg_name"], d.thinpoolName(), err)
+				}
+
+				// If the single volume is the storage pool's thin pool LV then we still consider
+				// this an empty volume group.
+				if thinPoolExists && lvCount == 1 {
+					empty = true
+				}
+			}
+		} else {
+			empty = true
+		}
+
+		if !empty {
+			return fmt.Errorf("Volume group \"%s\" is not empty", d.config["lvm.vg_name"])
+		}
+
+		// Check the tags on the volume group to check it is not already being used by LXD.
+		if shared.StringInSlice(lvmVgPoolMarker, vgTags) {
+			return fmt.Errorf("Volume group \"%s\" is already used by LXD", d.config["lvm.vg_name"])
+		}
+	} else {
+		// Create physical volume if doesn't exist.
+		if !pvExists {
+			// This is an internal error condition which should never be hit.
+			if pvName == "" {
+				return fmt.Errorf("No name for physical volume detected")
+			}
+
+			_, err := shared.TryRunCommand("pvcreate", pvName)
+			if err != nil {
+				return fmt.Errorf("Failed to create the physical volume for the lvm storage pool: %v", err)
+			}
+			revert.Add(func() { shared.TryRunCommand("pvremove", pvName) })
+		}
+
+		// Create volume group.
+		_, err := shared.TryRunCommand("vgcreate", d.config["lvm.vg_name"], pvName)
+		if err != nil {
+			return fmt.Errorf("Failed to create the volume group for the lvm storage pool: %v", err)
+		}
+		d.logger.Debug("Volume group created", log.Ctx{"pv_name": pvName, "vg_name": d.config["lvm.vg_name"]})
+		revert.Add(func() { shared.TryRunCommand("vgremove", d.config["lvm.vg_name"]) })
+	}
+
+	// Create thin pool if needed.
+	if d.usesThinpool() {
+		if !thinPoolExists {
+			err = d.createThinpool(d.Info().Version, d.config["lvm.vg_name"], d.thinpoolName())
+			if err != nil {
+				return err
+			}
+			d.logger.Debug("Thin pool created", log.Ctx{"vg_name": d.config["lvm.vg_name"], "thinpool_name": d.thinpoolName()})
+
+			revert.Add(func() {
+				d.removeLogicalVolume(d.lvmDevPath(d.config["lvm.vg_name"], "", "", d.thinpoolName()))
+			})
+		}
+	}
+
+	// Mark the volume group with the lvmVgPoolMarker tag to indicate it is now in use by LXD.
+	_, err = shared.TryRunCommand("vgchange", "--addtag", lvmVgPoolMarker, d.config["lvm.vg_name"])
+	if err != nil {
+		return fmt.Errorf("Failed to add marker tag to volume group for the lvm storage pool: %v", err)
+	}
+	d.logger.Debug("LXD marker tag added to volume group", log.Ctx{"vg_name": d.config["lvm.vg_name"]})
+
+	revert.Success()
+	return nil
+}
+
+// Delete removes the storage pool from the storage device.
+func (d *lvm) Delete(op *operations.Operation) error {
+	if d.config["source"] == "" {
+		return fmt.Errorf("No \"source\" property found for the storage pool")
+	}
+
+	var err error
+	var loopFile *os.File
+
+	// Open the loop file if needed.
+	if filepath.IsAbs(d.config["source"]) && !shared.IsBlockdevPath(d.config["source"]) {
+		loopFile, err = d.openLoopFile(d.config["source"])
+		if err != nil {
+			return err
+		}
+		defer loopFile.Close()
+	}
+
+	vgExists, vgTags, err := d.volumeGroupExists(d.config["lvm.vg_name"])
+	if err != nil {
+		return err
+	}
+
+	removeVg := false
+	if vgExists {
+		// Count normal and thin volumes.
+		lvCount, err := d.countLogicalVolumes(d.config["lvm.vg_name"])
+		if err != nil && err != errLVMNotFound {
+			return err
+		}
+
+		// Check that volume group is not in use. If it is we need to assume that other users are using
+		// the volume group, so don't remove it. This actually goes against policy since we explicitly
+		// state: our pool, and nothing but our pool, but still, let's not hurt users.
+		if err == nil {
+			if lvCount == 0 {
+				removeVg = true // Volume group is totally empty, safe to remove.
+			} else if d.usesThinpool() && lvCount > 0 {
+				// Lets see if the lv count is just our thin pool, or whether we can only remove
+				// the thin pool itself and not the volume group.
+				thinVolCount, err := d.countThinVolumes(d.config["lvm.vg_name"], d.thinpoolName())
+				if err != nil && err != errLVMNotFound {
+					return err
+				}
+
+				// Thin pool exists.
+				if err == nil {
+					// If thin pool is empty and the total VG volume count is 1 (our thin pool
+					// volume) then just remote the entire volume group.
+					if thinVolCount == 0 && lvCount == 1 {
+						removeVg = true
+					} else if thinVolCount == 0 && lvCount > 1 {
+						// Otherwise, if the thin pool is empty but the volume group has
+						// other volumes, then just remove the thin pool volume.
+						err = d.removeLogicalVolume(d.lvmDevPath(d.config["lvm.vg_name"], "", "", d.thinpoolName()))
+						if err != nil {
+							return fmt.Errorf("Failed to delete thin pool \"%s\" from volume group \"%s\": %v", d.thinpoolName(), d.config["lvm.vg_name"], err)
+						}
+						d.logger.Debug("Thin pool removed", log.Ctx{"vg_name": d.config["lvm.vg_name"], "thinpool_name": d.thinpoolName()})
+					}
+				}
+			}
+		}
+
+		// Remove volume group if needed.
+		if removeVg {
+			_, err := shared.TryRunCommand("vgremove", "-f", d.config["lvm.vg_name"])
+			if err != nil {
+				return fmt.Errorf("Failed to delete the volume group for the lvm storage pool: %v", err)
+			}
+			d.logger.Debug("Volume group removed", log.Ctx{"vg_name": d.config["lvm.vg_name"]})
+		} else {
+			// Otherwise just remove the lvmVgPoolMarker tag to indicate LXD no longer uses this VG.
+			if shared.StringInSlice(lvmVgPoolMarker, vgTags) {
+				_, err = shared.TryRunCommand("vgchange", "--deltag", lvmVgPoolMarker, d.config["lvm.vg_name"])
+				if err != nil {
+					return fmt.Errorf("Failed to remove marker tag on volume group for the lvm storage pool: %v", err)
+				}
+				d.logger.Debug("LXD marker tag removed from volume group", log.Ctx{"vg_name": d.config["lvm.vg_name"]})
+			}
+		}
+	}
+
+	// If we have removed the volume group and this is a loop file, lets clean up the physical volume too.
+	if removeVg && loopFile != nil {
+		_, err := shared.TryRunCommand("pvremove", "-f", loopFile.Name())
+		if err != nil {
+			d.logger.Warn("Failed to destroy the physical volume for the lvm storage pool", log.Ctx{"err": err})
+		}
+		d.logger.Debug("Physical volume removed", log.Ctx{"pv_name": loopFile.Name()})
+
+		// Set LO_FLAGS_AUTOCLEAR before removing the loop file otherwise we will get EBADF.
+		err = SetAutoclearOnLoopDev(int(loopFile.Fd()))
+		if err != nil {
+			d.logger.Warn("Failed to set LO_FLAGS_AUTOCLEAR on loop device, manual cleanup needed", log.Ctx{"err": err})
+		}
+
+		err = loopFile.Close()
+		if err != nil {
+			return err
+		}
+
+		// This is a loop file so deconfigure the associated loop device.
+		err = os.Remove(d.config["source"])
+		if err != nil {
+			return errors.Wrapf(err, "Error removing LVM pool loop file '%s'", d.config["source"])
+		}
+		d.logger.Debug("Physical loop file removed", log.Ctx{"file_name": d.config["source"]})
+	}
+
+	// Wipe everything in the storage pool directory.
+	err = wipeDirectory(GetPoolMountPath(d.name))
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (d *lvm) Validate(config map[string]string) error {
+	return nil
+}
+
+// Update updates the storage pool settings.
+func (d *lvm) Update(changedConfig map[string]string) error {
+	if changedConfig["lvm.vg_name"] != "" {
+		err := d.renameVolumeGroup(d.config["lvm.vg_name"], changedConfig["lvm.vg_name"])
+		if err != nil {
+			return err
+		}
+	}
+
+	if changedConfig["lvm.thinpool_name"] != "" {
+		err := d.renameThinpool(d.config["lvm.vg_name"], d.config["lvm.thinpool_name"], changedConfig["lvm.thinpool_name"])
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+// Mount mounts the storage pool (this does nothing for external LVM pools, but for loopback image
+// LVM pools this creates a loop device).
+func (d *lvm) Mount() (bool, error) {
+	// Open the loop file if the LVM device doesn't exist yet and the source points to a file.
+	if !shared.IsDir(fmt.Sprintf("/dev/%s", d.config["lvm.vg_name"])) && filepath.IsAbs(d.config["source"]) && !shared.IsBlockdevPath(d.config["source"]) {
+		loopFile, err := d.openLoopFile(d.config["source"])
+		if err != nil {
+			return false, err
+		}
+		defer loopFile.Close()
+
+		return true, nil
+	}
+
+	return false, nil
+}
+
+// Unmount unmounts the storage pool (this does nothing for external LVM pools, but for loopback
+// image LVM pools this closes the loop device handle if needed).
+func (d *lvm) Unmount() (bool, error) {
+	if filepath.IsAbs(d.config["source"]) && !shared.IsBlockdevPath(d.config["source"]) {
+		loopFile, err := d.openLoopFile(d.config["source"])
+		if err != nil {
+			return false, err
+		}
+
+		// Set LO_FLAGS_AUTOCLEAR before removing the loop file otherwise we will get EBADF.
+		err = SetAutoclearOnLoopDev(int(loopFile.Fd()))
+		if err != nil {
+			d.logger.Warn("Failed to set LO_FLAGS_AUTOCLEAR on loop device, manual cleanup needed", log.Ctx{"err": err})
+		}
+
+		err = loopFile.Close()
+		if err != nil {
+			return false, err
+		}
+
+		return true, nil // We closed the file.
+	}
+
+	// No loop device was opened, so nothing to close.
+	return false, nil
+}
+
+// GetResources returns utilisation and space info about the pool.
+func (d *lvm) GetResources() (*api.ResourcesStoragePool, error) {
+	res := api.ResourcesStoragePool{}
+
+	// Thinpools will always report zero free space on the volume group, so calculate approx
+	// used space using the thinpool logical volume allocated (data and meta) percentages.
+	if d.usesThinpool() {
+		args := []string{
+			fmt.Sprintf("%s/%s", d.config["lvm.vg_name"], d.thinpoolName()),
+			"--noheadings",
+			"--units", "b",
+			"--nosuffix",
+			"--separator", ",",
+			"-o", "lv_size,data_percent,metadata_percent",
+		}
+
+		out, err := shared.RunCommand("lvs", args...)
+		if err != nil {
+			return nil, err
+		}
+
+		parts := strings.Split(strings.TrimSpace(out), ",")
+		if len(parts) < 3 {
+			return nil, fmt.Errorf("Unexpected output from lvs command")
+		}
+
+		total, err := strconv.ParseUint(parts[0], 10, 64)
+		if err != nil {
+			return nil, err
+		}
+
+		res.Space.Total = total
+
+		dataPerc, err := strconv.ParseFloat(parts[1], 64)
+		if err != nil {
+			return nil, err
+		}
+
+		metaPerc, err := strconv.ParseFloat(parts[2], 64)
+		if err != nil {
+			return nil, err
+		}
+
+		res.Space.Used = uint64(float64(total) * ((dataPerc + metaPerc) / 100))
+	} else {
+		// If thinpools are not in use, calculate used space in volume group.
+		args := []string{
+			d.config["lvm.vg_name"],
+			"--noheadings",
+			"--units", "b",
+			"--nosuffix",
+			"--separator", ",",
+			"-o", "vg_size,vg_free",
+		}
+
+		out, err := shared.RunCommand("vgs", args...)
+		if err != nil {
+			return nil, err
+		}
+
+		parts := strings.Split(strings.TrimSpace(out), ",")
+		if len(parts) < 2 {
+			return nil, fmt.Errorf("Unexpected output from vgs command")
+		}
+
+		total, err := strconv.ParseUint(parts[0], 10, 64)
+		if err != nil {
+			return nil, err
+		}
+
+		res.Space.Total = total
+
+		free, err := strconv.ParseUint(parts[1], 10, 64)
+		if err != nil {
+			return nil, err
+		}
+		res.Space.Used = total - free
+	}
+
+	return &res, nil
+}
diff --git a/lxd/storage/drivers/drivers_lvm_utils.go b/lxd/storage/drivers/drivers_lvm_utils.go
new file mode 100644
index 0000000000..dc27626cda
--- /dev/null
+++ b/lxd/storage/drivers/drivers_lvm_utils.go
@@ -0,0 +1,693 @@
+package drivers
+
+import (
+	"fmt"
+	"os"
+	"os/exec"
+	"path/filepath"
+	"strconv"
+	"strings"
+	"syscall"
+
+	"github.com/pkg/errors"
+
+	"github.com/lxc/lxd/lxd/revert"
+	"github.com/lxc/lxd/lxd/storage/locking"
+	"github.com/lxc/lxd/shared"
+	log "github.com/lxc/lxd/shared/log15"
+	"github.com/lxc/lxd/shared/units"
+	"github.com/lxc/lxd/shared/version"
+)
+
+// lvmBlockVolSuffix suffix used for block content type svolumes.
+const lvmBlockVolSuffix = ".block"
+
+var errLVMNotFound = fmt.Errorf("Not found")
+
+// usesThinpool indicates whether the config specifies to use a thin pool or not.
+func (d *lvm) usesThinpool() bool {
+	// Default is to use a thinpool.
+	if d.config["lvm.use_thinpool"] == "" {
+		return true
+	}
+
+	return shared.IsTrue(d.config["lvm.use_thinpool"])
+}
+
+// thinpoolName returns the thinpool volume to use.
+func (d *lvm) thinpoolName() string {
+	if d.config["lvm.thinpool_name"] != "" {
+		return d.config["lvm.thinpool_name"]
+	}
+
+	return "LXDThinPool"
+}
+
+// volumeFilesystem returns the filesystem to use for logical volumes.
+func (d *lvm) volumeFilesystem(vol Volume) string {
+	fs := vol.ExpandedConfig("block.filesystem")
+	if fs != "" {
+		return fs
+	}
+
+	return DefaultFilesystem
+}
+
+// volumeSize returns the size to use when creating new logical volumes.
+func (d *lvm) volumeSize(vol Volume) string {
+	size := vol.ExpandedConfig("size")
+	if size == "" || size == "0" {
+		return defaultBlockSize
+	}
+
+	return size
+}
+
+// mountOptions returns the mount options for volumes.
+func (d *lvm) volumeMountOptions(vol Volume) string {
+	if d.config["block.mount_options"] != "" {
+		return d.config["block.mount_options"]
+	}
+
+	// Use some special options if the filesystem for the volume is BTRFS.
+	if d.volumeFilesystem(vol) == "btrfs" {
+		return "user_subvol_rm_allowed,discard"
+	}
+
+	return "discard"
+}
+
+// openLoopFile opens a loopback file and disable auto detach.
+func (d *lvm) openLoopFile(source string) (*os.File, error) {
+	if source == "" {
+		return nil, fmt.Errorf("No \"source\" property found for the storage pool")
+	}
+
+	if filepath.IsAbs(source) && !shared.IsBlockdevPath(source) {
+		unlock := locking.Lock(d.name, "", "")
+		defer unlock()
+
+		// Try to prepare new loop device.
+		loopF, err := PrepareLoopDev(source, 0)
+		if err != nil {
+			return nil, err
+		}
+
+		// Make sure that LO_FLAGS_AUTOCLEAR is unset, so that the loopback device will not
+		// autodestruct on last close.
+		err = UnsetAutoclearOnLoopDev(int(loopF.Fd()))
+		if err != nil {
+			return nil, err
+		}
+
+		return loopF, nil
+	}
+
+	return nil, fmt.Errorf("Source is not loop file")
+}
+
+// isLVMNotFoundExitError checks whether the supplied error is an exit error from an LVM command
+// meaning that the object was not found. Returns true if it is (exit status 5) false if not.
+func (d *lvm) isLVMNotFoundExitError(err error) bool {
+	runErr, ok := err.(shared.RunError)
+	if ok {
+		exitError, ok := runErr.Err.(*exec.ExitError)
+		if ok {
+			waitStatus := exitError.Sys().(syscall.WaitStatus)
+			if waitStatus.ExitStatus() == 5 {
+				return true
+			}
+		}
+	}
+
+	return false
+}
+
+// pysicalVolumeExists checks if an LVM Physical Volume exists.
+func (d *lvm) pysicalVolumeExists(pvName string) (bool, error) {
+	_, err := shared.RunCommand("pvs", "--noheadings", "-o", "pv_name", pvName)
+	if err != nil {
+		if d.isLVMNotFoundExitError(err) {
+			return false, nil
+		}
+
+		return false, fmt.Errorf("Error checking for LVM physical volume \"%s\"", pvName)
+	}
+
+	return true, nil
+}
+
+// volumeGroupExists checks if an LVM Volume Group exists and returns any tags on that volume group.
+func (d *lvm) volumeGroupExists(vgName string) (bool, []string, error) {
+	output, err := shared.RunCommand("vgs", "--noheadings", "-o", "vg_tags", vgName)
+	if err != nil {
+		if d.isLVMNotFoundExitError(err) {
+			return false, nil, nil
+		}
+
+		return false, nil, fmt.Errorf("Error checking for LVM volume group \"%s\"", vgName)
+	}
+
+	output = strings.TrimSpace(output)
+	tags := strings.SplitN(output, ",", -1)
+
+	return true, tags, nil
+}
+
+// volumeGroupExtentSize gets the volume group's physical extent size in bytes.
+func (d *lvm) volumeGroupExtentSize(vgName string) (int64, error) {
+	output, err := shared.RunCommand("vgs", "--noheadings", "--nosuffix", "--units", "b", "-o", "vg_extent_size", vgName)
+	if err != nil {
+		if d.isLVMNotFoundExitError(err) {
+			return -1, errLVMNotFound
+		}
+
+		return -1, err
+	}
+
+	output = strings.TrimSpace(output)
+	return strconv.ParseInt(output, 10, 64)
+}
+
+// renameVolumeGroup renames a volume group.
+func (d *lvm) renameVolumeGroup(vgName, newVgName string) error {
+	_, err := shared.TryRunCommand("vgrename", vgName, newVgName)
+	if err != nil {
+		return fmt.Errorf("Error renaming LVM volume group from \"%s\" to \"%s\": %v", vgName, newVgName, err)
+	}
+	d.logger.Debug("Volume group renamed", log.Ctx{"vg_name": vgName, "new_vg_name": newVgName})
+
+	return nil
+}
+
+// countLogicalVolumes gets the count of volumes (both normal and thin) in a volume group.
+func (d *lvm) countLogicalVolumes(vgName string) (int, error) {
+	output, err := shared.RunCommand("vgs", "--noheadings", "-o", "lv_count", vgName)
+	if err != nil {
+		if d.isLVMNotFoundExitError(err) {
+			return -1, errLVMNotFound
+		}
+
+		return -1, err
+	}
+
+	output = strings.TrimSpace(output)
+	return strconv.Atoi(output)
+}
+
+// countThinVolumes gets the count of thin volumes in a thin pool.
+func (d *lvm) countThinVolumes(vgName, poolName string) (int, error) {
+	output, err := shared.RunCommand("lvs", "--noheadings", "-o", "thin_count", fmt.Sprintf("%s/%s", vgName, poolName))
+	if err != nil {
+		if d.isLVMNotFoundExitError(err) {
+			return -1, errLVMNotFound
+		}
+
+		return -1, err
+	}
+
+	output = strings.TrimSpace(output)
+	return strconv.Atoi(output)
+}
+
+// thinpoolExists checks whether the specified thinpool exists in a volume group.
+func (d *lvm) thinpoolExists(vgName string, poolName string) (bool, error) {
+	output, err := shared.RunCommand("lvs", "--noheadings", "-o", "lv_attr", fmt.Sprintf("%s/%s", vgName, poolName))
+	if err != nil {
+		if d.isLVMNotFoundExitError(err) {
+			return false, nil
+		}
+
+		return false, fmt.Errorf("Error checking for LVM thin pool \"%s\"", poolName)
+	}
+
+	// Found LV named poolname, check type:
+	attrs := strings.TrimSpace(string(output[:]))
+	if strings.HasPrefix(attrs, "t") {
+		return true, nil
+	}
+
+	return false, fmt.Errorf("LVM volume named \"%s\" exists but is not a thin pool", poolName)
+}
+
+// logicalVolumeExists checks whether the specified logical volume exists.
+func (d *lvm) logicalVolumeExists(volDevPath string) (bool, error) {
+	_, err := shared.RunCommand("lvs", "--noheadings", "-o", "lv_name", volDevPath)
+	if err != nil {
+		if d.isLVMNotFoundExitError(err) {
+			return false, nil
+		}
+
+		return false, fmt.Errorf("Error checking for LVM logical volume \"%s\"", volDevPath)
+	}
+
+	return true, nil
+}
+
+// createThinpool creates a thin pool logical volume.
+func (d *lvm) createThinpool(lvmVersion string, vgName string, thinPoolName string) error {
+	exists, err := d.thinpoolExists(vgName, thinPoolName)
+	if err != nil {
+		return err
+	}
+
+	if exists {
+		return nil
+	}
+
+	err = d.createDefaultThinPool(lvmVersion, vgName, thinPoolName)
+	if err != nil {
+		return err
+	}
+
+	poolExists, err := d.thinpoolExists(vgName, thinPoolName)
+	if err != nil {
+		return fmt.Errorf("Error checking for LVM thin pool \"%s\" in \"%s\": %v", thinPoolName, vgName, err)
+	}
+
+	if !poolExists {
+		return fmt.Errorf("LVM thin pool \"'%s\" does not exist in Volume Group \"%s\"", thinPoolName, vgName)
+	}
+
+	return nil
+}
+
+// createDefaultThinPool creates the default thinpool as 100% the size of the volume group with a 1G
+// meta data volume.
+func (d *lvm) createDefaultThinPool(lvmVersion, vgName, thinPoolName string) error {
+	isRecent, err := d.lvmVersionIsAtLeast(lvmVersion, "2.02.99")
+	if err != nil {
+		return fmt.Errorf("Error checking LVM version: %s", err)
+	}
+
+	// Create the thin pool
+	lvmThinPool := fmt.Sprintf("%s/%s", vgName, thinPoolName)
+	if isRecent {
+		_, err = shared.TryRunCommand(
+			"lvcreate",
+			"-Wy", "--yes",
+			"--poolmetadatasize", "1G",
+			"-l", "100%FREE",
+			"--thinpool", lvmThinPool)
+	} else {
+		_, err = shared.TryRunCommand(
+			"lvcreate",
+			"-Wy", "--yes",
+			"--poolmetadatasize", "1G",
+			"-L", "1G",
+			"--thinpool", lvmThinPool)
+	}
+
+	if err != nil {
+		return fmt.Errorf("Error creating LVM thin pool named %s: %v", thinPoolName, err)
+	}
+
+	if !isRecent {
+		// Grow it to the maximum VG size (two step process required by old LVM)
+		_, err = shared.TryRunCommand("lvextend", "--alloc", "anywhere", "-l", "100%FREE", lvmThinPool)
+
+		if err != nil {
+			return fmt.Errorf("Error growing LVM thin pool named %s: %v", thinPoolName, err)
+		}
+	}
+
+	return nil
+}
+
+// renameThinpool renames a thinpool volume.
+func (d *lvm) renameThinpool(vgName, thinPoolName, newThinPoolName string) error {
+	_, err := shared.TryRunCommand("lvrename", vgName, thinPoolName, newThinPoolName)
+	if err != nil {
+		return fmt.Errorf("Error renaming LVM thin pool from \"%s\" to \"%s\": %v", thinPoolName, newThinPoolName, err)
+	}
+	d.logger.Debug("Thin pool volume renamed", log.Ctx{"vg_name": vgName, "thinpool": thinPoolName, "new_thinpool": newThinPoolName})
+
+	return nil
+}
+
+// lvmVersionIsAtLeast checks whether the installed version of LVM is at least the specific version.
+func (d *lvm) lvmVersionIsAtLeast(sTypeVersion string, versionString string) (bool, error) {
+	lvmVersionString := strings.Split(sTypeVersion, "/")[0]
+
+	lvmVersion, err := version.Parse(lvmVersionString)
+	if err != nil {
+		return false, err
+	}
+
+	inVersion, err := version.Parse(versionString)
+	if err != nil {
+		return false, err
+	}
+
+	if lvmVersion.Compare(inVersion) < 0 {
+		return false, nil
+	}
+
+	return true, nil
+}
+
+// roundedSizeString rounds the size to the nearest multiple of 512 bytes as the LVM tools require this.
+func (d *lvm) roundedSizeBytesString(size string) (int64, error) {
+	sizeBytes, err := units.ParseByteSizeString(size)
+	if err != nil {
+		return 0, err
+	}
+
+	if sizeBytes < 512 {
+		sizeBytes = 512
+	}
+
+	// Round the size to closest 512 bytes as LVM tools require sizes in multiples of 512 bytes.
+	sizeBytes = int64(sizeBytes/512) * 512
+
+	return sizeBytes, nil
+}
+
+// createLogicalVolume creates a logical volume.
+func (d *lvm) createLogicalVolume(vgName, thinPoolName string, vol Volume, makeThinLv bool) error {
+	var output string
+	var err error
+
+	lvSizeBytes, err := d.roundedSizeBytesString(d.volumeSize(vol))
+	if err != nil {
+		return err
+	}
+
+	lvFullName := d.lvmFullVolumeName(vol.volType, vol.contentType, vol.name)
+
+	if makeThinLv {
+		targetVg := fmt.Sprintf("%s/%s", vgName, thinPoolName)
+		_, err = shared.TryRunCommand("lvcreate", "-Wy", "--yes", "--thin", "-n", lvFullName, "--virtualsize", fmt.Sprintf("%db", lvSizeBytes), targetVg)
+	} else {
+		_, err = shared.TryRunCommand("lvcreate", "-Wy", "--yes", "-n", lvFullName, "--size", fmt.Sprintf("%db", lvSizeBytes), vgName)
+	}
+	if err != nil {
+		return fmt.Errorf("Error creating LVM logical volume %s: %v", lvFullName, err)
+	}
+
+	volDevPath := d.lvmDevPath(vgName, vol.volType, vol.contentType, vol.name)
+	output, err = makeFSType(volDevPath, d.volumeFilesystem(vol), nil)
+	if err != nil {
+		return fmt.Errorf("Error making filesystem on LVM logical volume: %v (%s)", err, output)
+	}
+
+	d.logger.Debug("Logical volume created", log.Ctx{"vg_name": vgName, "lv_name": lvFullName, "size": fmt.Sprintf("%db", lvSizeBytes), "fs": d.volumeFilesystem(vol)})
+	return nil
+}
+
+// createLogicalVolumeSnapshot creates a snapshot of a logical volume.
+func (d *lvm) createLogicalVolumeSnapshot(vgName string, srcVol, snapVol Volume, readonly bool, makeThinLv bool) (string, error) {
+	srcVolDevPath := d.lvmDevPath(vgName, srcVol.volType, srcVol.contentType, srcVol.name)
+	isRecent, err := d.lvmVersionIsAtLeast(lvmVersion, "2.02.99")
+	if err != nil {
+		return "", fmt.Errorf("Error checking LVM version: %v", err)
+	}
+
+	snapLvName := d.lvmFullVolumeName(snapVol.volType, snapVol.contentType, snapVol.name)
+	logCtx := log.Ctx{"vg_name": vgName, "lv_name": snapLvName, "src_dev": srcVolDevPath, "thin": makeThinLv}
+	args := []string{"-n", snapLvName, "-s", srcVolDevPath}
+
+	if isRecent {
+		args = append(args, "-kn")
+	}
+
+	// If the source is not a thin volume the size needs to be specified.
+	// According to LVM tools 15-20% of the original volume should be sufficient.
+	// However, let's not be stingy at first otherwise we might force users to fiddle around with lvextend.
+	if !makeThinLv {
+		lvSizeBytes, err := d.roundedSizeBytesString(d.volumeSize(snapVol))
+		if err != nil {
+			return "", err
+		}
+
+		args = append(args, "--size", fmt.Sprintf("%db", lvSizeBytes))
+		logCtx["size"] = fmt.Sprintf("%db", lvSizeBytes)
+	}
+
+	if readonly {
+		args = append(args, "-pr")
+	} else {
+		args = append(args, "-prw")
+	}
+
+	revert := revert.New()
+	defer revert.Fail()
+
+	_, err = shared.TryRunCommand("lvcreate", args...)
+	if err != nil {
+		return "", fmt.Errorf("Error creating LVM logical volume snapshot: %s to %s: %v", srcVol.name, snapVol.name, err)
+	}
+	d.logger.Debug("Logical volume snapshot created", logCtx)
+
+	revert.Add(func() {
+		d.removeLogicalVolume(d.lvmDevPath(vgName, snapVol.volType, snapVol.contentType, snapVol.name))
+	})
+
+	targetVolDevPath := d.lvmDevPath(vgName, snapVol.volType, snapVol.contentType, snapVol.name)
+	if makeThinLv {
+		// Snapshots of thin logical volumes can be directly activated.
+		// Normal snapshots will complain about changing the origin (Which they never do.),
+		// so skip the activation since the logical volume will be automatically activated anyway.
+		err := d.activeLogicalVolume(targetVolDevPath)
+		if err != nil {
+			return "", fmt.Errorf("Error activating LVM logical volume snapshot: %s: %v", snapVol.name, err)
+		}
+	}
+
+	revert.Success()
+	return targetVolDevPath, nil
+}
+
+// activeLogicalVolume marks the logical volume as active.
+func (d *lvm) activeLogicalVolume(volDevPath string) error {
+	_, err := shared.TryRunCommand("lvchange", "-ay", volDevPath)
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
+// removeLogicalVolume removes a logical volume.
+func (d *lvm) removeLogicalVolume(volDevPath string) error {
+	_, err := shared.TryRunCommand("lvremove", "-f", volDevPath)
+	if err != nil {
+		return fmt.Errorf("Error removing LVM logical volume %s: %v", volDevPath, err)
+	}
+	d.logger.Debug("Logical volume removed", log.Ctx{"dev": volDevPath})
+
+	return nil
+}
+
+// renameLogicalVolume renames a logical volume.
+func (d *lvm) renameLogicalVolume(volDevPath string, newVolDevPath string) error {
+	_, err := shared.TryRunCommand("lvrename", volDevPath, newVolDevPath)
+	if err != nil {
+		return fmt.Errorf("Error renaming LVM logical volume from \"%s\" to \"%s\": %v", volDevPath, newVolDevPath, err)
+	}
+	d.logger.Debug("Logical volume renamed", log.Ctx{"dev": volDevPath, "new_dev": newVolDevPath})
+
+	return nil
+}
+
+// volumeTypeToLVMType converts volume type to internal volume prefix.
+func (d *lvm) volumeTypeToLVMType(volType VolumeType) (string, error) {
+	switch volType {
+	case VolumeTypeContainer:
+		return "containers", nil
+	case VolumeTypeVM:
+		return "virtual-machines", nil
+	case VolumeTypeImage:
+		return "images", nil
+	case VolumeTypeCustom:
+		return "custom", nil
+	}
+
+	return "", fmt.Errorf("Invalid storage volume type")
+}
+
+// volNameToLVName escapes the volume name to a name suitable for using as a logical volume.
+func (d *lvm) volNameToLVName(volName string) string {
+	lvName := strings.Replace(volName, "-", "--", -1)
+	return strings.Replace(lvName, shared.SnapshotDelimiter, "-", -1)
+}
+
+// lvmFullVolumeName returns the logical volume's full name with volume type prefix. It also converts the supplied
+// volName to a name suitable for use as a logical volume using volNameToLVName(). If an empty volType is passed
+// then just the volName is returned. If an invalid volType is passed then an empty string is returned.
+// If a content type of ContentTypeBlock is supplied then the volume name is suffixed with lvmBlockVolSuffix.
+func (d *lvm) lvmFullVolumeName(volType VolumeType, contentType ContentType, volName string) string {
+	if volType == "" {
+		return volName
+	}
+
+	volTypePrefix, err := d.volumeTypeToLVMType(volType)
+	if err != nil {
+		return ""
+	}
+
+	contentTypeSuffix := ""
+	if contentType == ContentTypeBlock {
+		contentTypeSuffix = lvmBlockVolSuffix
+	}
+
+	return fmt.Sprintf("%s_%s%s", volTypePrefix, d.volNameToLVName(volName), contentTypeSuffix)
+}
+
+// lvmDevPath returns the path to the LVM volume device. Empty string is returned if invalid volType supplied.
+func (d *lvm) lvmDevPath(vgName string, volType VolumeType, contentType ContentType, volName string) string {
+	fullVolName := d.lvmFullVolumeName(volType, contentType, volName)
+	if fullVolName == "" {
+		return "" // Invalid volType supplied.
+	}
+
+	return fmt.Sprintf("/dev/%s/%s", vgName, fullVolName)
+}
+
+// resizeLogicalVolume resizes an LVM logical volume. This function does not resize any filesystem inside the LV.
+func (d *lvm) resizeLogicalVolume(lvPath string, sizeBytes int64) error {
+	_, err := shared.TryRunCommand("lvresize", "-L", fmt.Sprintf("%db", sizeBytes), "-f", lvPath)
+	if err != nil {
+		return fmt.Errorf("Error resizing LVM logical volume %s: %v", lvPath, err)
+	}
+
+	d.logger.Debug("Logical volume resized", log.Ctx{"dev": lvPath, "size": fmt.Sprintf("%db", sizeBytes)})
+	return nil
+}
+
+// copyThinpoolVolume makes an optimised copy of a thinpool volume by using thinpool snapshots.
+func (d *lvm) copyThinpoolVolume(vol, srcVol Volume, srcSnapshots []Volume, refresh bool) error {
+	revert := revert.New()
+	defer revert.Fail()
+
+	removeVols := []string{}
+
+	// If copying snapshots is indicated, check the source isn't itself a snapshot.
+	if len(srcSnapshots) > 0 && !srcVol.IsSnapshot() {
+		// Create the parent snapshot directory.
+		err := createParentSnapshotDirIfMissing(d.name, vol.volType, vol.name)
+		if err != nil {
+			return err
+		}
+
+		for _, srcSnapshot := range srcSnapshots {
+			_, snapName, _ := shared.InstanceGetParentAndSnapshotName(srcSnapshot.name)
+			newFullSnapName := GetSnapshotVolumeName(vol.name, snapName)
+			newSnapVol := NewVolume(d, d.Name(), vol.volType, vol.contentType, newFullSnapName, vol.config, vol.poolConfig)
+
+			if d.HasVolume(newSnapVol) {
+				return fmt.Errorf("LVM snapshot volume already exists: %s", newSnapVol.name)
+			}
+
+			newSnapVolPath := newSnapVol.MountPath()
+			err := newSnapVol.EnsureMountPath()
+			if err != nil {
+				return err
+			}
+
+			revert.Add(func() { os.RemoveAll(newSnapVolPath) })
+
+			// We do not modify the original snapshot so as to avoid damaging if it is corrupted for
+			// some reason. If the filesystem needs to have a unique UUID generated in order to mount
+			// this will be done at restore time to be safe.
+			_, err = d.createLogicalVolumeSnapshot(d.config["lvm.vg_name"], srcSnapshot, newSnapVol, true, d.usesThinpool())
+			if err != nil {
+				return fmt.Errorf("Error creating LVM logical volume snapshot: %v", err)
+			}
+
+			revert.Add(func() {
+				d.removeLogicalVolume(d.lvmDevPath(d.config["lvm.vg_name"], newSnapVol.volType, newSnapVol.contentType, newSnapVol.name))
+			})
+		}
+	}
+
+	// Handle copying the main volume.
+	if d.HasVolume(vol) {
+		if refresh {
+			newVolDevPath := d.lvmDevPath(d.config["lvm.vg_name"], vol.volType, vol.contentType, vol.name)
+			tmpVolName := fmt.Sprintf("%s%s", vol.name, tmpVolSuffix)
+			tmpVolDevPath := d.lvmDevPath(d.config["lvm.vg_name"], vol.volType, vol.contentType, tmpVolName)
+
+			// Rename existing volume to temporary new name so we can revert if needed.
+			err := d.renameLogicalVolume(newVolDevPath, tmpVolDevPath)
+			if err != nil {
+				return fmt.Errorf("Error temporarily renaming original LVM logical volume: %v", err)
+			}
+
+			// Record this volume to be removed at the very end.
+			removeVols = append(removeVols, tmpVolName)
+
+			revert.Add(func() {
+				// Rename the original volume back to the original name.
+				d.renameLogicalVolume(tmpVolDevPath, newVolDevPath)
+			})
+		} else {
+			return fmt.Errorf("LVM volume already exists: %s", vol.name)
+		}
+	} else {
+		volPath := vol.MountPath()
+		err := vol.EnsureMountPath()
+		if err != nil {
+			return err
+		}
+
+		revert.Add(func() { os.RemoveAll(volPath) })
+	}
+
+	// Create snapshot of source volume as new volume.
+	_, err := d.createLogicalVolumeSnapshot(d.config["lvm.vg_name"], srcVol, vol, false, d.usesThinpool())
+	if err != nil {
+		return fmt.Errorf("Error creating LVM logical volume snapshot: %v", err)
+	}
+
+	volDevPath := d.lvmDevPath(d.config["lvm.vg_name"], vol.volType, vol.contentType, vol.name)
+
+	revert.Add(func() {
+		d.removeLogicalVolume(volDevPath)
+	})
+
+	if vol.contentType == ContentTypeFS {
+		// Generate a new filesystem UUID if needed (this is required because some filesystems won't allow
+		// volumes with the same UUID to be mounted at the same time). This should be done before volume
+		// resize as some filesystems will need to mount the filesystem to resize.
+		if renegerateFilesystemUUIDNeeded(d.volumeFilesystem(vol)) {
+			d.logger.Debug("Regenerating filesystem UUID", log.Ctx{"dev": volDevPath, "fs": d.volumeFilesystem(vol)})
+			err = regenerateFilesystemUUID(d.volumeFilesystem(vol), volDevPath)
+			if err != nil {
+				return err
+			}
+		}
+	}
+
+	// Resize the new volume and filesystem to the correct size.
+	err = d.SetVolumeQuota(vol, d.volumeSize(vol), nil)
+	if err != nil {
+		return err
+	}
+
+	// Finally clean up original volumes left that were renamed with a tmpVolSuffix suffix.
+	for _, removeVolName := range removeVols {
+		err := d.removeLogicalVolume(d.lvmDevPath(d.config["lvm.vg_name"], vol.volType, vol.contentType, removeVolName))
+		if err != nil {
+			return errors.Wrapf(err, "Error removing LVM volume: %s", vol.name)
+		}
+	}
+
+	revert.Success()
+	return nil
+}
+
+// logicalVolumeSize gets the size in bytes of a logical volume.
+func (d *lvm) logicalVolumeSize(volDevPath string) (int64, error) {
+	output, err := shared.RunCommand("lvs", "--noheadings", "--nosuffix", "--units", "b", "-o", "lv_size", volDevPath)
+	if err != nil {
+		if d.isLVMNotFoundExitError(err) {
+			return -1, errLVMNotFound
+		}
+
+		return -1, err
+	}
+
+	output = strings.TrimSpace(output)
+	return strconv.ParseInt(output, 10, 64)
+}
diff --git a/lxd/storage/drivers/drivers_lvm_volumes.go b/lxd/storage/drivers/drivers_lvm_volumes.go
new file mode 100644
index 0000000000..a70b016177
--- /dev/null
+++ b/lxd/storage/drivers/drivers_lvm_volumes.go
@@ -0,0 +1,732 @@
+package drivers
+
+import (
+	"fmt"
+	"io"
+	"math"
+	"os"
+
+	"github.com/pkg/errors"
+	"golang.org/x/sys/unix"
+
+	"github.com/lxc/lxd/lxd/migration"
+	"github.com/lxc/lxd/lxd/operations"
+	"github.com/lxc/lxd/lxd/revert"
+	"github.com/lxc/lxd/lxd/rsync"
+	"github.com/lxc/lxd/shared"
+	log "github.com/lxc/lxd/shared/log15"
+)
+
+// GetVolumeUsage returns the disk space used by the volume (this is not currently supported).
+func (d *lvm) GetVolumeUsage(vol Volume) (int64, error) {
+	return 0, ErrNotSupported
+}
+
+// ValidateVolume validates the supplied volume config.
+func (d *lvm) ValidateVolume(vol Volume, removeUnknownKeys bool) error {
+	return d.validateVolume(vol, nil, removeUnknownKeys)
+}
+
+// HasVolume indicates whether a specific volume exists on the storage pool.
+func (d *lvm) HasVolume(vol Volume) bool {
+	volDevPath := d.lvmDevPath(d.config["lvm.vg_name"], vol.volType, vol.contentType, vol.name)
+	volExists, err := d.logicalVolumeExists(volDevPath)
+	if err != nil {
+		return false
+	}
+
+	return volExists
+}
+
+// GetVolumeDiskPath returns the location of a disk volume.
+func (d *lvm) GetVolumeDiskPath(vol Volume) (string, error) {
+	return "", ErrNotImplemented
+}
+
+// CreateVolume creates an empty volume and can optionally fill it by executing the supplied filler function.
+func (d *lvm) CreateVolume(vol Volume, filler *VolumeFiller, op *operations.Operation) error {
+	if vol.contentType != ContentTypeFS {
+		return fmt.Errorf("Content type not supported")
+	}
+
+	revert := revert.New()
+	defer revert.Fail()
+
+	volPath := vol.MountPath()
+	err := vol.EnsureMountPath()
+	if err != nil {
+		return err
+	}
+	revert.Add(func() { os.RemoveAll(volPath) })
+
+	err = d.createLogicalVolume(d.config["lvm.vg_name"], d.thinpoolName(), vol, d.usesThinpool())
+	if err != nil {
+		return fmt.Errorf("Error creating LVM logical volume: %v", err)
+	}
+	revert.Add(func() { d.DeleteVolume(vol, op) })
+
+	if filler != nil && filler.Fill != nil {
+		err = vol.MountTask(func(mountPath string, op *operations.Operation) error {
+			// Run the volume filler function if supplied.
+			d.logger.Debug("Running filler function")
+			err = filler.Fill(mountPath, "")
+			if err != nil {
+				return err
+			}
+
+			// Run EnsureMountPath again after mounting to ensure the mount directory
+			// has the correct permissions set.
+			err := vol.EnsureMountPath()
+			if err != nil {
+				return err
+			}
+
+			return nil
+		}, op)
+		if err != nil {
+			return err
+		}
+	}
+
+	revert.Success()
+	return nil
+}
+
+// MigrateVolume sends a volume for migration.
+func (d *lvm) MigrateVolume(vol Volume, conn io.ReadWriteCloser, volSrcArgs *migration.VolumeSourceArgs, op *operations.Operation) error {
+	if vol.contentType != ContentTypeFS {
+		return fmt.Errorf("Content type not supported")
+	}
+
+	if volSrcArgs.MigrationType.FSType != migration.MigrationFSType_RSYNC {
+		return fmt.Errorf("Migration type not supported")
+	}
+
+	return d.vfsMigrateVolume(vol, conn, volSrcArgs, op)
+}
+
+// CreateVolumeFromMigration creates a volume being sent via a migration.
+func (d *lvm) CreateVolumeFromMigration(vol Volume, conn io.ReadWriteCloser, volTargetArgs migration.VolumeTargetArgs, preFiller *VolumeFiller, op *operations.Operation) error {
+	if vol.contentType != ContentTypeFS {
+		return fmt.Errorf("Content type not supported")
+	}
+
+	if volTargetArgs.MigrationType.FSType != migration.MigrationFSType_RSYNC {
+		return fmt.Errorf("Migration type not supported")
+	}
+
+	return genericCreateVolumeFromMigration(d, nil, vol, conn, volTargetArgs, preFiller, op)
+}
+
+// CreateVolumeFromCopy provides same-pool volume copying functionality.
+func (d *lvm) CreateVolumeFromCopy(vol, srcVol Volume, copySnapshots bool, op *operations.Operation) error {
+	var err error
+	var srcSnapshots []Volume
+
+	if vol.contentType != ContentTypeFS || srcVol.contentType != ContentTypeFS {
+		return fmt.Errorf("Content type not supported")
+	}
+
+	if copySnapshots && !srcVol.IsSnapshot() {
+		// Get the list of snapshots from the source.
+		srcSnapshots, err = srcVol.Snapshots(op)
+		if err != nil {
+			return err
+		}
+	}
+
+	// We can use optimised copying when the pool is backed by an LVM thinpool.
+	if d.usesThinpool() {
+		return d.copyThinpoolVolume(vol, srcVol, srcSnapshots, false)
+	}
+
+	// Otherwise run the generic copy.
+	return genericCopyVolume(d, nil, vol, srcVol, srcSnapshots, false, op)
+}
+
+// RefreshVolume provides same-pool volume and specific snapshots syncing functionality.
+func (d *lvm) RefreshVolume(vol, srcVol Volume, srcSnapshots []Volume, op *operations.Operation) error {
+	if vol.contentType != ContentTypeFS || srcVol.contentType != ContentTypeFS {
+		return fmt.Errorf("Content type not supported")
+	}
+
+	// We can use optimised copying when the pool is backed by an LVM thinpool.
+	if d.usesThinpool() {
+		return d.copyThinpoolVolume(vol, srcVol, srcSnapshots, true)
+	}
+
+	// Otherwise run the generic copy.
+	return genericCopyVolume(d, nil, vol, srcVol, srcSnapshots, true, op)
+}
+
+// VolumeSnapshots returns a list of snapshots for the volume.
+func (d *lvm) VolumeSnapshots(vol Volume, op *operations.Operation) ([]string, error) {
+	// We use the vfsVolumeSnapshots rather than inspecting the logical volumes themselves because the origin
+	// property of an LVM snapshot can be removed/changed when restoring snapshots, such that they are no
+	// marked as origin of the parent volume.
+	return d.vfsVolumeSnapshots(vol, op)
+}
+
+// UpdateVolume applies config changes to the volume.
+func (d *lvm) UpdateVolume(vol Volume, changedConfig map[string]string) error {
+	if vol.contentType != ContentTypeFS {
+		return fmt.Errorf("Content type not supported")
+	}
+
+	if _, changed := changedConfig["size"]; changed {
+		err := d.SetVolumeQuota(vol, changedConfig["size"], nil)
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+// RenameVolume renames a volume and its snapshots.
+func (d *lvm) RenameVolume(vol Volume, newVolName string, op *operations.Operation) error {
+	volDevPath := d.lvmDevPath(d.config["lvm.vg_name"], vol.volType, vol.contentType, vol.name)
+	lvExists, err := d.logicalVolumeExists(volDevPath)
+	if err != nil {
+		return err
+	}
+
+	if !lvExists {
+		return fmt.Errorf("LVM logical volume doesn't exist")
+	}
+
+	return vol.UnmountTask(func(op *operations.Operation) error {
+		snapNames, err := d.VolumeSnapshots(vol, op)
+		if err != nil {
+			return err
+		}
+
+		revert := revert.New()
+		defer revert.Fail()
+
+		// Rename snapshots (change volume prefix to use new parent volume name).
+		for _, snapName := range snapNames {
+			snapVolName := GetSnapshotVolumeName(vol.name, snapName)
+			snapVolDevPath := d.lvmDevPath(d.config["lvm.vg_name"], vol.volType, vol.contentType, snapVolName)
+			newSnapVolName := GetSnapshotVolumeName(newVolName, snapName)
+			newSnapVolDevPath := d.lvmDevPath(d.config["lvm.vg_name"], vol.volType, vol.contentType, newSnapVolName)
+			err = d.renameLogicalVolume(snapVolDevPath, newSnapVolDevPath)
+			if err != nil {
+				return err
+			}
+			revert.Add(func() { d.renameLogicalVolume(newSnapVolDevPath, snapVolDevPath) })
+		}
+
+		// Rename snapshots dir if present.
+		srcSnapshotDir := GetVolumeSnapshotDir(d.name, vol.volType, vol.name)
+		dstSnapshotDir := GetVolumeSnapshotDir(d.name, vol.volType, newVolName)
+		if shared.PathExists(srcSnapshotDir) {
+			err = os.Rename(srcSnapshotDir, dstSnapshotDir)
+			if err != nil {
+				return errors.Wrapf(err, "Error renaming LVM logical volume snapshot directory from '%s' to '%s'", srcSnapshotDir, dstSnapshotDir)
+			}
+			revert.Add(func() { os.Rename(dstSnapshotDir, srcSnapshotDir) })
+		}
+
+		// Rename actual volume.
+		newVolDevPath := d.lvmDevPath(d.config["lvm.vg_name"], vol.volType, vol.contentType, newVolName)
+		err = d.renameLogicalVolume(volDevPath, newVolDevPath)
+		if err != nil {
+			return err
+		}
+		revert.Add(func() { d.renameLogicalVolume(newVolDevPath, volDevPath) })
+
+		// Rename volume dir.
+		srcVolumePath := GetVolumeMountPath(d.name, vol.volType, vol.name)
+		dstVolumePath := GetVolumeMountPath(d.name, vol.volType, newVolName)
+		err = os.Rename(srcVolumePath, dstVolumePath)
+		if err != nil {
+			return errors.Wrapf(err, "Error renaming LVM logical volume mount path from '%s' to '%s'", srcVolumePath, dstVolumePath)
+		}
+		revert.Add(func() { os.Rename(dstVolumePath, srcVolumePath) })
+
+		revert.Success()
+		return nil
+	}, op)
+}
+
+// RestoreVolume restores a volume from a snapshot.
+func (d *lvm) RestoreVolume(vol Volume, snapshotName string, op *operations.Operation) error {
+	// Instantiate snapshot volume from snapshot name.
+	snapVol, err := vol.NewSnapshot(snapshotName)
+	if err != nil {
+		return err
+	}
+
+	// Check snapshot volume exists.
+	if !d.HasVolume(snapVol) {
+		return fmt.Errorf("Snapshot not found")
+	}
+
+	revert := revert.New()
+	defer revert.Fail()
+
+	// If the pool uses thinpools, then the process for restoring a snapshot is as follows:
+	// 1. Rename the original volume to a temporary name (so we can revert later if needed).
+	// 2. Create a writable snapshot with the original name from the snapshot being restored.
+	// 3. Delete the renamed original volume.
+	if d.usesThinpool() {
+		_, err = d.UnmountVolume(vol, op)
+		if err != nil {
+			return fmt.Errorf("Error unmounting LVM logical volume: %v", err)
+		}
+
+		originalVolDevPath := d.lvmDevPath(d.config["lvm.vg_name"], vol.volType, vol.contentType, vol.name)
+		tmpVolName := fmt.Sprintf("%s%s", vol.name, tmpVolSuffix)
+		tmpVolDevPath := d.lvmDevPath(d.config["lvm.vg_name"], vol.volType, vol.contentType, tmpVolName)
+
+		// Rename original logical volume to temporary new name so we can revert if needed.
+		err = d.renameLogicalVolume(originalVolDevPath, tmpVolDevPath)
+		if err != nil {
+			return fmt.Errorf("Error temporarily renaming original LVM logical volume: %v", err)
+		}
+
+		revert.Add(func() {
+			// Rename the original volume back to the original name.
+			d.renameLogicalVolume(tmpVolDevPath, originalVolDevPath)
+		})
+
+		// Create writable snapshot from source snapshot named as target volume.
+		_, err = d.createLogicalVolumeSnapshot(d.config["lvm.vg_name"], snapVol, vol, false, true)
+		if err != nil {
+			return fmt.Errorf("Error restoring LVM logical volume snapshot: %v", err)
+		}
+
+		volDevPath := d.lvmDevPath(d.config["lvm.vg_name"], vol.volType, vol.contentType, vol.name)
+
+		revert.Add(func() {
+			d.removeLogicalVolume(volDevPath)
+		})
+
+		// If the volume's filesystem needs to have its UUID regenerated to allow mount then do so now.
+		if vol.contentType == ContentTypeFS && renegerateFilesystemUUIDNeeded(d.volumeFilesystem(vol)) {
+			d.logger.Debug("Regenerating filesystem UUID", log.Ctx{"dev": volDevPath, "fs": d.volumeFilesystem(vol)})
+			err = regenerateFilesystemUUID(d.volumeFilesystem(vol), volDevPath)
+			if err != nil {
+				return err
+			}
+		}
+
+		// Finally remove the original logical volume. Should always be the last step to allow revert.
+		err = d.removeLogicalVolume(d.lvmDevPath(d.config["lvm.vg_name"], vol.volType, vol.contentType, tmpVolName))
+		if err != nil {
+			return fmt.Errorf("Error removing original LVM logical volume: %v", err)
+		}
+
+		revert.Success()
+		return nil
+	}
+
+	// If the pool uses classic logical volumes, then the process for restoring a snapshot is as follows:
+	// 1. Mount source and target.
+	// 2. Rsync source to target.
+	// 3. Unmount source and target.
+	err = vol.MountTask(func(mountPath string, op *operations.Operation) error {
+		// Copy source to destination (mounting each volume if needed).
+		err = snapVol.MountTask(func(srcMountPath string, op *operations.Operation) error {
+			bwlimit := d.config["rsync.bwlimit"]
+			_, err := rsync.LocalCopy(srcMountPath, mountPath, bwlimit, true)
+			return err
+		}, op)
+		if err != nil {
+			return err
+		}
+
+		// Run EnsureMountPath after mounting and syncing to ensure the mounted directory has the
+		// correct permissions set.
+		err = vol.EnsureMountPath()
+		if err != nil {
+			return err
+		}
+
+		return nil
+	}, op)
+	if err != nil {
+		return fmt.Errorf("Error restoring LVM logical volume snapshot: %v", err)
+	}
+
+	revert.Success()
+	return nil
+}
+
+// DeleteVolume deletes a volume of the storage device. If any snapshots of the volume remain then this function
+// will return an error.
+func (d *lvm) DeleteVolume(vol Volume, op *operations.Operation) error {
+	snapshots, err := d.VolumeSnapshots(vol, op)
+	if err != nil {
+		return err
+	}
+
+	if len(snapshots) > 0 {
+		return fmt.Errorf("Cannot remove a volume that has snapshots")
+	}
+
+	volDevPath := d.lvmDevPath(d.config["lvm.vg_name"], vol.volType, vol.contentType, vol.name)
+	lvExists, err := d.logicalVolumeExists(volDevPath)
+	if err != nil {
+		return err
+	}
+
+	if lvExists {
+		_, err = d.UnmountVolume(vol, op)
+		if err != nil {
+			return fmt.Errorf("Error unmounting LVM logical volume: %v", err)
+		}
+
+		err = d.removeLogicalVolume(d.lvmDevPath(d.config["lvm.vg_name"], vol.volType, vol.contentType, vol.name))
+		if err != nil {
+			return fmt.Errorf("Error removing LVM logical volume: %v", err)
+		}
+	}
+
+	// Remove the volume from the storage device.
+	mountPath := vol.MountPath()
+	err = os.RemoveAll(mountPath)
+	if err != nil {
+		return errors.Wrapf(err, "Error removing LVM logical volume mount path '%s'", mountPath)
+	}
+
+	// Although the volume snapshot directory should already be removed, lets remove it here to just in case
+	// the top-level directory is left.
+	err = deleteParentSnapshotDirIfEmpty(d.name, vol.volType, vol.name)
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
+// MountVolume simulates mounting a volume. As dir driver doesn't have volumes to mount it returns
+// false indicating that there is no need to issue an unmount.
+func (d *lvm) MountVolume(vol Volume, op *operations.Operation) (bool, error) {
+	mountPath := vol.MountPath()
+
+	// Check if already mounted.
+	if vol.contentType == ContentTypeFS && !shared.IsMountPoint(mountPath) {
+		volDevPath := d.lvmDevPath(d.config["lvm.vg_name"], vol.volType, vol.contentType, vol.name)
+		mountFlags, mountOptions := resolveMountOptions(d.volumeMountOptions(vol))
+		err := TryMount(volDevPath, mountPath, d.volumeFilesystem(vol), mountFlags, mountOptions)
+		if err != nil {
+			return false, errors.Wrapf(err, "Failed to mount LVM logical volume")
+		}
+		d.logger.Debug("Mounted logical volume", log.Ctx{"dev": volDevPath, "path": mountPath})
+
+		return true, nil
+	}
+
+	return false, nil
+}
+
+// MountVolumeSnapshot sets up a read-only mount on top of the snapshot to avoid accidental modifications.
+func (d *lvm) MountVolumeSnapshot(snapVol Volume, op *operations.Operation) (bool, error) {
+	mountPath := snapVol.MountPath()
+
+	// Check if already mounted.
+	if !shared.IsMountPoint(mountPath) {
+		revert := revert.New()
+		defer revert.Fail()
+
+		// Default to mounting the original snapshot directly. This may be changed below if a temporary
+		// snapshot needs to be taken.
+		mountVol := snapVol
+
+		// Regenerate filesystem UUID if needed. This is because some filesystems do not allow mounting
+		// multiple volumes that share the same UUID. As snapshotting a volume will copy its UUID we need
+		// to potentially regenerate the UUID of the snapshot now that we are trying to mount it.
+		// This is done at mount time rather than snapshot time for 2 reasons; firstly snapshots need to be
+		// as fast as possible, and on some filesystems regenerating the UUID is a slow process, secondly
+		// we do not want to modify a snapshot in case it is corrupted for some reason, so at mount time
+		// we take another snapshot of the snapshot, regenerate the temporary snapshot's UUID and then
+		// mount that.
+		if renegerateFilesystemUUIDNeeded(d.volumeFilesystem(snapVol)) {
+			// Instantiate a new volume to be the temporary writable snapshot.
+			tmpVolName := fmt.Sprintf("%s%s", snapVol.name, tmpVolSuffix)
+			tmpVol := NewVolume(d, d.name, snapVol.volType, snapVol.contentType, tmpVolName, snapVol.config, snapVol.poolConfig)
+
+			// Create writable snapshot from source snapshot named with a tmpVolSuffix suffix.
+			_, err := d.createLogicalVolumeSnapshot(d.config["lvm.vg_name"], snapVol, tmpVol, false, d.usesThinpool())
+			if err != nil {
+				return false, fmt.Errorf("Error creating temporary LVM logical volume snapshot: %v", err)
+			}
+
+			revert.Add(func() {
+				d.removeLogicalVolume(d.lvmDevPath(d.config["lvm.vg_name"], tmpVol.volType, tmpVol.contentType, tmpVol.name))
+			})
+
+			tmpVolDevPath := d.lvmDevPath(d.config["lvm.vg_name"], tmpVol.volType, tmpVol.contentType, tmpVol.name)
+
+			d.logger.Debug("Regenerating filesystem UUID", log.Ctx{"dev": tmpVolDevPath, "fs": d.volumeFilesystem(tmpVol)})
+			err = regenerateFilesystemUUID(d.volumeFilesystem(tmpVol), tmpVolDevPath)
+			if err != nil {
+				return false, err
+			}
+
+			// We are going to mount the temporary volume instead.
+			mountVol = tmpVol
+		}
+
+		// Finally attempt to mount the volume that needs mounting.
+		volDevPath := d.lvmDevPath(d.config["lvm.vg_name"], mountVol.volType, mountVol.contentType, mountVol.name)
+		mountFlags, mountOptions := resolveMountOptions(d.volumeMountOptions(snapVol))
+		err := TryMount(volDevPath, mountPath, d.volumeFilesystem(mountVol), mountFlags|unix.MS_RDONLY, mountOptions)
+		if err != nil {
+			return false, errors.Wrapf(err, "Failed to mount LVM snapshot volume")
+		}
+		d.logger.Debug("Mounted logical volume snapshot", log.Ctx{"dev": volDevPath, "path": mountPath})
+
+		revert.Success()
+		return true, nil
+	}
+
+	return false, nil
+}
+
+// UnmountVolume simulates unmounting a volume. As dir driver doesn't have volumes to unmount it
+// returns false indicating the volume was already unmounted.
+func (d *lvm) UnmountVolume(vol Volume, op *operations.Operation) (bool, error) {
+	mountPath := vol.MountPath()
+
+	// Check if already mounted.
+	if shared.IsMountPoint(mountPath) {
+		err := TryUnmount(mountPath, 0)
+		if err != nil {
+			return false, errors.Wrapf(err, "Failed to unmount LVM logical volume")
+		}
+		d.logger.Debug("Unmounted logical volume", log.Ctx{"path": mountPath})
+
+		return true, nil
+	}
+
+	return false, nil
+}
+
+// UnmountVolumeSnapshot removes the read-only mount placed on top of a snapshot.
+// If a temporary snapshot volume exists then it will attempt to remove it.
+func (d *lvm) UnmountVolumeSnapshot(snapVol Volume, op *operations.Operation) (bool, error) {
+	mountPath := snapVol.MountPath()
+
+	// Check if already mounted.
+	if shared.IsMountPoint(mountPath) {
+		err := TryUnmount(mountPath, 0)
+		if err != nil {
+			return false, errors.Wrapf(err, "Failed to unmount LVM snapshot volume")
+		}
+		d.logger.Debug("Unmounted logical volume snapshot", log.Ctx{"path": mountPath})
+
+		// Check if a temporary snapshot exists, and if so remove it.
+		tmpVolName := fmt.Sprintf("%s%s", snapVol.name, tmpVolSuffix)
+		tmpVolDevPath := d.lvmDevPath(d.config["lvm.vg_name"], snapVol.volType, snapVol.contentType, tmpVolName)
+		exists, err := d.logicalVolumeExists(tmpVolDevPath)
+		if err != nil {
+			return true, errors.Wrapf(err, "Failed to check existence of temporary LVM snapshot volume '%s'", tmpVolDevPath)
+		}
+
+		if exists {
+			err = d.removeLogicalVolume(tmpVolDevPath)
+			if err != nil {
+				return true, errors.Wrapf(err, "Failed to remove temporary LVM snapshot volume '%s'", tmpVolDevPath)
+			}
+		}
+
+		return true, nil
+	}
+
+	return false, nil
+}
+
+// SetVolumeQuota sets the quota on the volume.
+func (d *lvm) SetVolumeQuota(vol Volume, size string, op *operations.Operation) error {
+	// Can't do anything if the size property has been removed from volume config.
+	if size == "" || size == "0" {
+		return nil
+	}
+
+	newSizeBytes, err := d.roundedSizeBytesString(size)
+	if err != nil {
+		return err
+	}
+
+	// Read actual size of current volume.
+	volDevPath := d.lvmDevPath(d.config["lvm.vg_name"], vol.volType, vol.contentType, vol.name)
+	oldSizeBytes, err := d.logicalVolumeSize(volDevPath)
+	if err != nil {
+		return err
+	}
+
+	// Get the volume group's physical extent size, as we use this to figure out if the new and old sizes are
+	// going to change beyond 1 extent size, otherwise there is no point in trying to resize as LVM do it.
+	vgExtentSize, err := d.volumeGroupExtentSize(d.config["lvm.vg_name"])
+	if err != nil {
+		return err
+	}
+
+	// Round up the number of extents required for new quota size, as this is what the lvresize tool will do.
+	newNumExtents := math.Ceil(float64(newSizeBytes) / float64(vgExtentSize))
+	oldNumExtents := math.Ceil(float64(oldSizeBytes) / float64(vgExtentSize))
+	extentDiff := int(newNumExtents - oldNumExtents)
+
+	// If old and new extents required are the same, nothing to do, as LVM won't resize them.
+	if extentDiff == 0 {
+		return nil
+	}
+
+	logCtx := log.Ctx{"dev": volDevPath, "size": fmt.Sprintf("%db", newSizeBytes)}
+
+	// Resize filesystem if needed.
+	if vol.contentType == ContentTypeFS {
+		if newSizeBytes < oldSizeBytes {
+			// Shrink filesystem to new size first, then shrink logical volume.
+			err = shrinkFileSystem(d.volumeFilesystem(vol), volDevPath, vol, newSizeBytes)
+			if err != nil {
+				return err
+			}
+			d.logger.Debug("Logical volume filesystem shrunk", logCtx)
+
+			err = d.resizeLogicalVolume(volDevPath, newSizeBytes)
+			if err != nil {
+				return err
+			}
+		} else if newSizeBytes > oldSizeBytes {
+			// Grow logical volume to new size first, then grow filesystem to fill it.
+			err = d.resizeLogicalVolume(volDevPath, newSizeBytes)
+			if err != nil {
+				return err
+			}
+
+			err = growFileSystem(d.volumeFilesystem(vol), volDevPath, vol)
+			if err != nil {
+				return err
+			}
+			d.logger.Debug("Logical volume filesystem grown", logCtx)
+		}
+	} else {
+		err = d.resizeLogicalVolume(volDevPath, newSizeBytes)
+		if err != nil {
+			return err
+
+		}
+	}
+
+	return nil
+}
+
+// CreateVolumeSnapshot creates a snapshot of a volume.
+func (d *lvm) CreateVolumeSnapshot(snapVol Volume, op *operations.Operation) error {
+	parentName, _, _ := shared.InstanceGetParentAndSnapshotName(snapVol.name)
+	parentVol := NewVolume(d, d.name, snapVol.volType, snapVol.contentType, parentName, snapVol.config, snapVol.poolConfig)
+	snapPath := snapVol.MountPath()
+
+	// Create the parent directory.
+	err := createParentSnapshotDirIfMissing(d.name, snapVol.volType, parentName)
+	if err != nil {
+		return err
+	}
+
+	revert := revert.New()
+	defer revert.Fail()
+
+	// Create snapshot directory.
+	err = snapVol.EnsureMountPath()
+	if err != nil {
+		return err
+	}
+	revert.Add(func() { os.RemoveAll(snapPath) })
+
+	_, err = d.createLogicalVolumeSnapshot(d.config["lvm.vg_name"], parentVol, snapVol, true, d.usesThinpool())
+	if err != nil {
+		return fmt.Errorf("Error creating LVM logical volume snapshot: %v", err)
+	}
+
+	revert.Success()
+	return nil
+}
+
+// DeleteVolumeSnapshot removes a snapshot from the storage device. The volName and snapshotName
+// must be bare names and should not be in the format "volume/snapshot".
+func (d *lvm) DeleteVolumeSnapshot(snapVol Volume, op *operations.Operation) error {
+	// Remove the snapshot from the storage device.
+	volDevPath := d.lvmDevPath(d.config["lvm.vg_name"], snapVol.volType, snapVol.contentType, snapVol.name)
+	lvExists, err := d.logicalVolumeExists(volDevPath)
+	if err != nil {
+		return err
+	}
+
+	if lvExists {
+		_, err = d.UnmountVolume(snapVol, op)
+		if err != nil {
+			return fmt.Errorf("Error unmounting LVM logical volume: %v", err)
+		}
+
+		err = d.removeLogicalVolume(d.lvmDevPath(d.config["lvm.vg_name"], snapVol.volType, snapVol.contentType, snapVol.name))
+		if err != nil {
+			return fmt.Errorf("Error removing LVM logical volume: %v", err)
+		}
+	}
+
+	// Remove the snapshot mount path from the storage device.
+	snapPath := snapVol.MountPath()
+	err = os.RemoveAll(snapPath)
+	if err != nil {
+		return errors.Wrapf(err, "Error removing LVM snapshot mount path '%s'", snapPath)
+	}
+
+	// Remove the parent snapshot directory if this is the last snapshot being removed.
+	parentName, _, _ := shared.InstanceGetParentAndSnapshotName(snapVol.name)
+	err = deleteParentSnapshotDirIfEmpty(d.name, snapVol.volType, parentName)
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
+// RenameVolumeSnapshot renames a volume snapshot.
+func (d *lvm) RenameVolumeSnapshot(snapVol Volume, newSnapshotName string, op *operations.Operation) error {
+	volDevPath := d.lvmDevPath(d.config["lvm.vg_name"], snapVol.volType, snapVol.contentType, snapVol.name)
+	lvExists, err := d.logicalVolumeExists(volDevPath)
+	if err != nil {
+		return err
+	}
+
+	if !lvExists {
+		return fmt.Errorf("LVM logical volume doesn't exist")
+	}
+
+	_, err = d.UnmountVolume(snapVol, nil)
+	if err != nil {
+		return fmt.Errorf("Error unmounting LVM logical volume: %v", err)
+	}
+
+	parentName, _, _ := shared.InstanceGetParentAndSnapshotName(snapVol.name)
+	newSnapVolName := GetSnapshotVolumeName(parentName, newSnapshotName)
+	newVolDevPath := d.lvmDevPath(d.config["lvm.vg_name"], snapVol.volType, snapVol.contentType, newSnapVolName)
+	err = d.renameLogicalVolume(volDevPath, newVolDevPath)
+	if err != nil {
+		return fmt.Errorf("Error renaming LVM logical volume: %v", err)
+	}
+
+	oldPath := snapVol.MountPath()
+	newPath := GetVolumeMountPath(d.name, snapVol.volType, newSnapVolName)
+	err = os.Rename(oldPath, newPath)
+	if err != nil {
+		return errors.Wrapf(err, "Error renaming snapshot mount path from '%s' to '%s'", oldPath, newPath)
+	}
+
+	return nil
+}
+
+// BackupVolume copies a volume (and optionally its snapshots) to a specified target path.
+// This driver does not support optimized backups.
+func (d *lvm) BackupVolume(vol Volume, targetPath string, _, snapshots bool, op *operations.Operation) error {
+	return d.vfsBackupVolume(vol, targetPath, snapshots, op)
+}
+
+// CreateVolumeFromBackup restores a backup tarball onto the storage device.
+func (d *lvm) CreateVolumeFromBackup(vol Volume, snapshots []string, srcData io.ReadSeeker, optimizedStorage bool, op *operations.Operation) (func(vol Volume) error, func(), error) {
+	return genericBackupUnpack(d, vol, snapshots, srcData, op)
+}

From c6609a131f2e6c55199ecfd4a504797e6915720d Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Fri, 10 Jan 2020 17:45:43 +0000
Subject: [PATCH 06/18] tests: Add lvm to list of new drivers

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 test/includes/storage.sh | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/test/includes/storage.sh b/test/includes/storage.sh
index 36d05ad5fe..003f0323e2 100644
--- a/test/includes/storage.sh
+++ b/test/includes/storage.sh
@@ -130,8 +130,8 @@ umount_loops() {
 }
 
 storage_compatible() {
-    if [ "${1}" = "cephfs" ] || [ "${1}" = "dir" ] || [ "${1}" = "btrfs" ] || [ "${1}" = "zfs" ]; then
-        if [ "${2}" = "cephfs" ] || [ "${2}" = "dir" ] || [ "${2}" = "btrfs" ] || [ "${2}" = "zfs" ]; then
+    if [ "${1}" = "cephfs" ] || [ "${1}" = "dir" ] || [ "${1}" = "btrfs" ] || [ "${1}" = "lvm" ] || [ "${1}" = "zfs" ]; then
+        if [ "${2}" = "cephfs" ] || [ "${2}" = "dir" ] || [ "${2}" = "btrfs" ] || [ "${2}" = "lvm" ] || [ "${2}" = "zfs" ]; then
             true
             return
         else

From 29b8264b53e309c9ada784fb69d940557f4ccebd Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Mon, 13 Jan 2020 11:46:07 +0000
Subject: [PATCH 07/18] client/lxd/instances: Sends instance type when copying
 instances

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 client/lxd_instances.go | 1 +
 1 file changed, 1 insertion(+)

diff --git a/client/lxd_instances.go b/client/lxd_instances.go
index 340a4b9bf5..b40b995863 100644
--- a/client/lxd_instances.go
+++ b/client/lxd_instances.go
@@ -352,6 +352,7 @@ func (r *ProtocolLXD) CopyInstance(source InstanceServer, instance api.Instance,
 	req := api.InstancesPost{
 		Name:        instance.Name,
 		InstancePut: instance.Writable(),
+		Type:        api.InstanceType(instance.Type),
 	}
 	req.Source.BaseImage = instance.Config["volatile.base_image"]
 

From d2d91d47afc02e3f9880886f2afd7c0b1501f91a Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Mon, 13 Jan 2020 14:37:51 +0000
Subject: [PATCH 08/18] lxd/storage/drivers/generic: Updates genericCopyVolume
 to be VM block aware using copyDevice

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/storage/drivers/generic.go | 50 +++++++++++++++++++++++++++++++---
 1 file changed, 46 insertions(+), 4 deletions(-)

diff --git a/lxd/storage/drivers/generic.go b/lxd/storage/drivers/generic.go
index 56df274740..b3ac53cde6 100644
--- a/lxd/storage/drivers/generic.go
+++ b/lxd/storage/drivers/generic.go
@@ -16,8 +16,8 @@ import (
 // genericCopyVolume copies a volume and its snapshots using a non-optimized method.
 // initVolume is run against the main volume (not the snapshots) and is often used for quota initialization.
 func genericCopyVolume(d Driver, initVolume func(vol Volume) (func(), error), vol Volume, srcVol Volume, srcSnapshots []Volume, refresh bool, op *operations.Operation) error {
-	if vol.contentType != ContentTypeFS || srcVol.contentType != ContentTypeFS {
-		return fmt.Errorf("Content type not supported")
+	if vol.contentType != srcVol.contentType {
+		return fmt.Errorf("Content type of source and target must be the same")
 	}
 
 	bwlimit := d.Config()["rsync.bwlimit"]
@@ -46,7 +46,28 @@ func genericCopyVolume(d Driver, initVolume func(vol Volume) (func(), error), vo
 				err := srcSnapshot.MountTask(func(srcMountPath string, op *operations.Operation) error {
 					// Copy the snapshot.
 					_, err := rsync.LocalCopy(srcMountPath, mountPath, bwlimit, true)
-					return err
+					if err != nil {
+						return err
+					}
+
+					if srcSnapshot.IsVMBlock() {
+						srcDevPath, err := d.GetVolumeDiskPath(srcSnapshot)
+						if err != nil {
+							return err
+						}
+
+						targetDevPath, err := d.GetVolumeDiskPath(vol)
+						if err != nil {
+							return err
+						}
+
+						err = copyDevice(srcDevPath, targetDevPath)
+						if err != nil {
+							return err
+						}
+					}
+
+					return nil
 				}, op)
 				if err != nil {
 					return err
@@ -79,7 +100,28 @@ func genericCopyVolume(d Driver, initVolume func(vol Volume) (func(), error), vo
 		// Copy source to destination (mounting each volume if needed).
 		err := srcVol.MountTask(func(srcMountPath string, op *operations.Operation) error {
 			_, err := rsync.LocalCopy(srcMountPath, mountPath, bwlimit, true)
-			return err
+			if err != nil {
+				return err
+			}
+
+			if srcVol.IsVMBlock() {
+				srcDevPath, err := d.GetVolumeDiskPath(srcVol)
+				if err != nil {
+					return err
+				}
+
+				targetDevPath, err := d.GetVolumeDiskPath(vol)
+				if err != nil {
+					return err
+				}
+
+				err = copyDevice(srcDevPath, targetDevPath)
+				if err != nil {
+					return err
+				}
+			}
+
+			return nil
 		}, op)
 		if err != nil {
 			return err

From c2bd1e81df22a6a6e3b2c77ffee76a1c7cf05d9a Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Mon, 13 Jan 2020 14:38:27 +0000
Subject: [PATCH 09/18] lxd/storage/drivers: Filler logging

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/storage/drivers/driver_cephfs_volumes.go | 3 ++-
 lxd/storage/drivers/driver_dir_volumes.go    | 3 ++-
 2 files changed, 4 insertions(+), 2 deletions(-)

diff --git a/lxd/storage/drivers/driver_cephfs_volumes.go b/lxd/storage/drivers/driver_cephfs_volumes.go
index 447fec82b8..c49b8ee17b 100644
--- a/lxd/storage/drivers/driver_cephfs_volumes.go
+++ b/lxd/storage/drivers/driver_cephfs_volumes.go
@@ -12,6 +12,7 @@ import (
 	"github.com/lxc/lxd/lxd/rsync"
 	"github.com/lxc/lxd/shared"
 	"github.com/lxc/lxd/shared/ioprogress"
+	log "github.com/lxc/lxd/shared/log15"
 	"github.com/lxc/lxd/shared/units"
 )
 
@@ -42,7 +43,7 @@ func (d *cephfs) CreateVolume(vol Volume, filler *VolumeFiller, op *operations.O
 
 	// Fill the volume.
 	if filler != nil && filler.Fill != nil {
-		d.logger.Debug("Running filler function")
+		d.logger.Debug("Running filler function", log.Ctx{"path": volPath})
 		err = filler.Fill(volPath, "")
 		if err != nil {
 			return err
diff --git a/lxd/storage/drivers/driver_dir_volumes.go b/lxd/storage/drivers/driver_dir_volumes.go
index 6e55d1781d..30ab4892db 100644
--- a/lxd/storage/drivers/driver_dir_volumes.go
+++ b/lxd/storage/drivers/driver_dir_volumes.go
@@ -11,6 +11,7 @@ import (
 	"github.com/lxc/lxd/lxd/rsync"
 	"github.com/lxc/lxd/lxd/storage/quota"
 	"github.com/lxc/lxd/shared"
+	log "github.com/lxc/lxd/shared/log15"
 )
 
 // CreateVolume creates an empty volume and can optionally fill it by executing the supplied
@@ -49,7 +50,7 @@ func (d *dir) CreateVolume(vol Volume, filler *VolumeFiller, op *operations.Oper
 
 	// Run the volume filler function if supplied.
 	if filler != nil && filler.Fill != nil {
-		d.logger.Debug("Running filler function")
+		d.logger.Debug("Running filler function", log.Ctx{"path": volPath})
 		err = filler.Fill(volPath, rootBlockPath)
 		if err != nil {
 			return err

From 05bb427f0ad7b84790c50b5a903a2fccc9f31c21 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Mon, 13 Jan 2020 14:52:35 +0000
Subject: [PATCH 10/18] lxd/storage/drivers/utils: Adds copyDevice function

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/storage/drivers/utils.go | 23 +++++++++++++++++++++++
 1 file changed, 23 insertions(+)

diff --git a/lxd/storage/drivers/utils.go b/lxd/storage/drivers/utils.go
index cf26c5aa80..781e5a6c56 100644
--- a/lxd/storage/drivers/utils.go
+++ b/lxd/storage/drivers/utils.go
@@ -2,6 +2,7 @@ package drivers
 
 import (
 	"fmt"
+	"io"
 	"io/ioutil"
 	"os"
 	"path/filepath"
@@ -529,3 +530,25 @@ func regenerateFilesystemXFSUUID(devPath string) error {
 
 	return nil
 }
+
+// copyDevice copies one device path to another.
+func copyDevice(inputPath, outputPath string) error {
+	from, err := os.Open(inputPath)
+	if err != nil {
+		return errors.Wrapf(err, "Error opening file for reading: %s", inputPath)
+	}
+	defer from.Close()
+
+	to, err := os.OpenFile(outputPath, os.O_WRONLY, 0)
+	if err != nil {
+		return errors.Wrapf(err, "Error opening file writing: %s", outputPath)
+	}
+	defer to.Close()
+
+	_, err = io.Copy(to, from)
+	if err != nil {
+		return errors.Wrapf(err, "Error copying file '%s' to '%s'", inputPath, outputPath)
+	}
+
+	return nil
+}

From e55cecd1a8ebf332ec0ab23c218a413b367b36f7 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Mon, 13 Jan 2020 15:24:10 +0000
Subject: [PATCH 11/18] lxd/device/disk: Allow VM disks to be updated

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/device/disk.go | 6 +-----
 1 file changed, 1 insertion(+), 5 deletions(-)

diff --git a/lxd/device/disk.go b/lxd/device/disk.go
index 252a42b57c..a963ece965 100644
--- a/lxd/device/disk.go
+++ b/lxd/device/disk.go
@@ -397,11 +397,7 @@ func (d *disk) postStart() error {
 
 // Update applies configuration changes to a started device.
 func (d *disk) Update(oldDevices deviceConfig.Devices, isRunning bool) error {
-	if d.inst.Type() == instancetype.VM {
-		if shared.IsRootDiskDevice(d.config) {
-			return nil
-		}
-
+	if d.inst.Type() == instancetype.VM && !shared.IsRootDiskDevice(d.config) {
 		return fmt.Errorf("Non-root disks not supported for VMs")
 	}
 

From fb69aa386355437e3ba0408b4ab29d9121ed57f6 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Mon, 13 Jan 2020 15:56:25 +0000
Subject: [PATCH 12/18] lxd/storage: Updates storageRootFSApplyQuota to support
 VMs

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/storage.go | 14 +++++++-------
 1 file changed, 7 insertions(+), 7 deletions(-)

diff --git a/lxd/storage.go b/lxd/storage.go
index 3351aed653..971465b28a 100644
--- a/lxd/storage.go
+++ b/lxd/storage.go
@@ -925,18 +925,18 @@ func storageVolumeUmount(state *state.State, poolName string, volumeName string,
 // storageRootFSApplyQuota applies a quota to an instance if it can, if it cannot then it will
 // return false indicating that the quota needs to be stored in volatile to be applied on next boot.
 func storageRootFSApplyQuota(state *state.State, inst instance.Instance, size string) error {
-	c, ok := inst.(*containerLXC)
-	if !ok {
-		return fmt.Errorf("Received non-LXC container instance")
-	}
-
-	pool, err := storagePools.GetPoolByInstance(state, c)
+	pool, err := storagePools.GetPoolByInstance(state, inst)
 	if err != storageDrivers.ErrUnknownDriver && err != storageDrivers.ErrNotImplemented {
-		err = pool.SetInstanceQuota(c, size, nil)
+		err = pool.SetInstanceQuota(inst, size, nil)
 		if err != nil {
 			return err
 		}
 	} else {
+		c, ok := inst.(*containerLXC)
+		if !ok {
+			return fmt.Errorf("Received non-LXC container instance")
+		}
+
 		err := c.initStorage()
 		if err != nil {
 			return errors.Wrap(err, "Initialize storage")

From be99d33667d99fc26f12ea6fc5a9683c0c24e197 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Mon, 13 Jan 2020 17:16:55 +0000
Subject: [PATCH 13/18] lxd/container: Adds VM support to
 instanceCreateAsSnapshot

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/container.go | 4 ----
 1 file changed, 4 deletions(-)

diff --git a/lxd/container.go b/lxd/container.go
index 1aac67580c..91332d2514 100644
--- a/lxd/container.go
+++ b/lxd/container.go
@@ -606,10 +606,6 @@ func instanceCreateAsCopy(s *state.State, args db.InstanceArgs, sourceInst insta
 }
 
 func instanceCreateAsSnapshot(s *state.State, args db.InstanceArgs, sourceInstance instance.Instance, op *operations.Operation) (instance.Instance, error) {
-	if sourceInstance.Type() != instancetype.Container {
-		return nil, fmt.Errorf("Instance is not container type")
-	}
-
 	if sourceInstance.Type() != args.Type {
 		return nil, fmt.Errorf("Source instance and snapshot instance types do not match")
 	}

From 8cf686e373a4c494d3eb0c625da144d9991143e8 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Mon, 13 Jan 2020 17:17:10 +0000
Subject: [PATCH 14/18] lxd/container/snapshot: Adds VM support to
 containerSnapshotHandler

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/container_snapshot.go | 5 -----
 1 file changed, 5 deletions(-)

diff --git a/lxd/container_snapshot.go b/lxd/container_snapshot.go
index 81a1fb402b..374f319e4d 100644
--- a/lxd/container_snapshot.go
+++ b/lxd/container_snapshot.go
@@ -14,7 +14,6 @@ import (
 
 	"github.com/lxc/lxd/lxd/db"
 	"github.com/lxc/lxd/lxd/instance"
-	"github.com/lxc/lxd/lxd/instance/instancetype"
 	"github.com/lxc/lxd/lxd/operations"
 	"github.com/lxc/lxd/lxd/response"
 	"github.com/lxc/lxd/lxd/util"
@@ -215,10 +214,6 @@ func containerSnapshotHandler(d *Daemon, r *http.Request) response.Response {
 		return response.SmartError(err)
 	}
 
-	if inst.Type() != instancetype.Container {
-		return response.SmartError(fmt.Errorf("Instance is not container type"))
-	}
-
 	switch r.Method {
 	case "GET":
 		return snapshotGet(inst, snapshotName)

From a00327d008c683e9e877437e12fbb9c61fcbef0f Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Mon, 13 Jan 2020 17:24:12 +0000
Subject: [PATCH 15/18] lxd/migration/migration/volumes: Fixes crash when
 storage driver has no transfer methods

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/migration/migration_volumes.go | 7 ++++++-
 1 file changed, 6 insertions(+), 1 deletion(-)

diff --git a/lxd/migration/migration_volumes.go b/lxd/migration/migration_volumes.go
index 4ccb7e8944..aff1691510 100644
--- a/lxd/migration/migration_volumes.go
+++ b/lxd/migration/migration_volumes.go
@@ -49,7 +49,12 @@ type VolumeTargetArgs struct {
 func TypesToHeader(types ...Type) MigrationHeader {
 	missingFeature := false
 	hasFeature := true
-	preferredType := types[0]
+	var preferredType Type
+
+	if len(types) > 0 {
+		preferredType = types[0]
+	}
+
 	header := MigrationHeader{Fs: &preferredType.FSType}
 
 	// Add ZFS features if preferred type is ZFS.

From b6e0b61750db45db1565b652930507d6b98b2fec Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Mon, 13 Jan 2020 17:34:53 +0000
Subject: [PATCH 16/18] lxd/storage/drivers/driver/common: Adds VM support for
 migration types

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/storage/drivers/driver_common.go | 4 ----
 1 file changed, 4 deletions(-)

diff --git a/lxd/storage/drivers/driver_common.go b/lxd/storage/drivers/driver_common.go
index 6f8f9e88c4..92057b3ef5 100644
--- a/lxd/storage/drivers/driver_common.go
+++ b/lxd/storage/drivers/driver_common.go
@@ -94,10 +94,6 @@ func (d *common) validateVolume(vol Volume, driverRules map[string]func(value st
 // MigrationType returns the type of transfer methods to be used when doing migrations between pools
 // in preference order.
 func (d *common) MigrationTypes(contentType ContentType, refresh bool) []migration.Type {
-	if contentType != ContentTypeFS {
-		return nil
-	}
-
 	return []migration.Type{
 		{
 			FSType:   migration.MigrationFSType_RSYNC,

From bd707dc1dfe2b0d477a6c9b6a69e523e668ab9d9 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Mon, 13 Jan 2020 12:03:41 +0000
Subject: [PATCH 17/18] lxd/storage/drivers/driver/lvm: Adds VM support

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/storage/drivers/drivers_lvm.go | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/lxd/storage/drivers/drivers_lvm.go b/lxd/storage/drivers/drivers_lvm.go
index d9abc978c1..fab341abd9 100644
--- a/lxd/storage/drivers/drivers_lvm.go
+++ b/lxd/storage/drivers/drivers_lvm.go
@@ -78,7 +78,7 @@ func (d *lvm) Info() Info {
 		OptimizedImages:       d.usesThinpool(), // Only thinpool pools support optimized images.
 		PreservesInodes:       !d.state.OS.RunningInUserNS,
 		Remote:                false,
-		VolumeTypes:           []VolumeType{VolumeTypeCustom, VolumeTypeImage, VolumeTypeContainer},
+		VolumeTypes:           []VolumeType{VolumeTypeCustom, VolumeTypeImage, VolumeTypeContainer, VolumeTypeVM},
 		BlockBacking:          true,
 		RunningQuotaResize:    false,
 		RunningSnapshotFreeze: false,

From bb6f7c3b42cf7dbb3c7443cb70f4ef5211549947 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Mon, 13 Jan 2020 11:46:46 +0000
Subject: [PATCH 18/18] lxd/storage/drivers/drivers/lvm/volumes: Adds VM
 support

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/storage/drivers/drivers_lvm_volumes.go | 190 ++++++++++++++++-----
 1 file changed, 144 insertions(+), 46 deletions(-)

diff --git a/lxd/storage/drivers/drivers_lvm_volumes.go b/lxd/storage/drivers/drivers_lvm_volumes.go
index a70b016177..7ba0fd9639 100644
--- a/lxd/storage/drivers/drivers_lvm_volumes.go
+++ b/lxd/storage/drivers/drivers_lvm_volumes.go
@@ -40,15 +40,16 @@ func (d *lvm) HasVolume(vol Volume) bool {
 
 // GetVolumeDiskPath returns the location of a disk volume.
 func (d *lvm) GetVolumeDiskPath(vol Volume) (string, error) {
-	return "", ErrNotImplemented
+	if vol.IsVMBlock() {
+		volDevPath := d.lvmDevPath(d.config["lvm.vg_name"], vol.volType, vol.contentType, vol.name)
+		return volDevPath, nil
+	}
+
+	return "", fmt.Errorf("No disk paths for filesystems")
 }
 
 // CreateVolume creates an empty volume and can optionally fill it by executing the supplied filler function.
 func (d *lvm) CreateVolume(vol Volume, filler *VolumeFiller, op *operations.Operation) error {
-	if vol.contentType != ContentTypeFS {
-		return fmt.Errorf("Content type not supported")
-	}
-
 	revert := revert.New()
 	defer revert.Fail()
 
@@ -65,13 +66,39 @@ func (d *lvm) CreateVolume(vol Volume, filler *VolumeFiller, op *operations.Oper
 	}
 	revert.Add(func() { d.DeleteVolume(vol, op) })
 
+	// For VMs, also create the filesystem volume.
+	if vol.IsVMBlock() {
+		fsVol := vol.NewVMBlockFilesystemVolume()
+		err := d.CreateVolume(fsVol, nil, op)
+		if err != nil {
+			return err
+		}
+
+		revert.Add(func() { d.DeleteVolume(fsVol, op) })
+	}
+
+	// Run the volume filler function if supplied.
 	if filler != nil && filler.Fill != nil {
 		err = vol.MountTask(func(mountPath string, op *operations.Operation) error {
-			// Run the volume filler function if supplied.
-			d.logger.Debug("Running filler function")
-			err = filler.Fill(mountPath, "")
-			if err != nil {
-				return err
+			if vol.contentType == ContentTypeFS {
+				d.logger.Debug("Running filler function", log.Ctx{"path": volPath})
+				err = filler.Fill(mountPath, "")
+				if err != nil {
+					return err
+				}
+			} else {
+				// Get the device path.
+				devPath, err := d.GetVolumeDiskPath(vol)
+				if err != nil {
+					return err
+				}
+
+				// Run the filler.
+				d.logger.Debug("Running filler function", log.Ctx{"dev": devPath, "path": volPath})
+				err = filler.Fill(mountPath, devPath)
+				if err != nil {
+					return err
+				}
 			}
 
 			// Run EnsureMountPath again after mounting to ensure the mount directory
@@ -123,10 +150,6 @@ func (d *lvm) CreateVolumeFromCopy(vol, srcVol Volume, copySnapshots bool, op *o
 	var err error
 	var srcSnapshots []Volume
 
-	if vol.contentType != ContentTypeFS || srcVol.contentType != ContentTypeFS {
-		return fmt.Errorf("Content type not supported")
-	}
-
 	if copySnapshots && !srcVol.IsSnapshot() {
 		// Get the list of snapshots from the source.
 		srcSnapshots, err = srcVol.Snapshots(op)
@@ -137,7 +160,19 @@ func (d *lvm) CreateVolumeFromCopy(vol, srcVol Volume, copySnapshots bool, op *o
 
 	// We can use optimised copying when the pool is backed by an LVM thinpool.
 	if d.usesThinpool() {
-		return d.copyThinpoolVolume(vol, srcVol, srcSnapshots, false)
+		err = d.copyThinpoolVolume(vol, srcVol, srcSnapshots, false)
+		if err != nil {
+			return err
+		}
+
+		// For VMs, also copy the filesystem volume.
+		if vol.IsVMBlock() {
+			srcFSVol := srcVol.NewVMBlockFilesystemVolume()
+			fsVol := vol.NewVMBlockFilesystemVolume()
+			return d.copyThinpoolVolume(fsVol, srcFSVol, srcSnapshots, false)
+		}
+
+		return nil
 	}
 
 	// Otherwise run the generic copy.
@@ -146,10 +181,6 @@ func (d *lvm) CreateVolumeFromCopy(vol, srcVol Volume, copySnapshots bool, op *o
 
 // RefreshVolume provides same-pool volume and specific snapshots syncing functionality.
 func (d *lvm) RefreshVolume(vol, srcVol Volume, srcSnapshots []Volume, op *operations.Operation) error {
-	if vol.contentType != ContentTypeFS || srcVol.contentType != ContentTypeFS {
-		return fmt.Errorf("Content type not supported")
-	}
-
 	// We can use optimised copying when the pool is backed by an LVM thinpool.
 	if d.usesThinpool() {
 		return d.copyThinpoolVolume(vol, srcVol, srcSnapshots, true)
@@ -218,14 +249,16 @@ func (d *lvm) RenameVolume(vol Volume, newVolName string, op *operations.Operati
 		}
 
 		// Rename snapshots dir if present.
-		srcSnapshotDir := GetVolumeSnapshotDir(d.name, vol.volType, vol.name)
-		dstSnapshotDir := GetVolumeSnapshotDir(d.name, vol.volType, newVolName)
-		if shared.PathExists(srcSnapshotDir) {
-			err = os.Rename(srcSnapshotDir, dstSnapshotDir)
-			if err != nil {
-				return errors.Wrapf(err, "Error renaming LVM logical volume snapshot directory from '%s' to '%s'", srcSnapshotDir, dstSnapshotDir)
+		if vol.contentType == ContentTypeFS {
+			srcSnapshotDir := GetVolumeSnapshotDir(d.name, vol.volType, vol.name)
+			dstSnapshotDir := GetVolumeSnapshotDir(d.name, vol.volType, newVolName)
+			if shared.PathExists(srcSnapshotDir) {
+				err = os.Rename(srcSnapshotDir, dstSnapshotDir)
+				if err != nil {
+					return errors.Wrapf(err, "Error renaming LVM logical volume snapshot directory from '%s' to '%s'", srcSnapshotDir, dstSnapshotDir)
+				}
+				revert.Add(func() { os.Rename(dstSnapshotDir, srcSnapshotDir) })
 			}
-			revert.Add(func() { os.Rename(dstSnapshotDir, srcSnapshotDir) })
 		}
 
 		// Rename actual volume.
@@ -237,13 +270,24 @@ func (d *lvm) RenameVolume(vol Volume, newVolName string, op *operations.Operati
 		revert.Add(func() { d.renameLogicalVolume(newVolDevPath, volDevPath) })
 
 		// Rename volume dir.
-		srcVolumePath := GetVolumeMountPath(d.name, vol.volType, vol.name)
-		dstVolumePath := GetVolumeMountPath(d.name, vol.volType, newVolName)
-		err = os.Rename(srcVolumePath, dstVolumePath)
-		if err != nil {
-			return errors.Wrapf(err, "Error renaming LVM logical volume mount path from '%s' to '%s'", srcVolumePath, dstVolumePath)
+		if vol.contentType == ContentTypeFS {
+			srcVolumePath := GetVolumeMountPath(d.name, vol.volType, vol.name)
+			dstVolumePath := GetVolumeMountPath(d.name, vol.volType, newVolName)
+			err = os.Rename(srcVolumePath, dstVolumePath)
+			if err != nil {
+				return errors.Wrapf(err, "Error renaming LVM logical volume mount path from '%s' to '%s'", srcVolumePath, dstVolumePath)
+			}
+			revert.Add(func() { os.Rename(dstVolumePath, srcVolumePath) })
+		}
+
+		// For VMs, also rename the filesystem volume.
+		if vol.IsVMBlock() {
+			fsVol := vol.NewVMBlockFilesystemVolume()
+			err = d.RenameVolume(fsVol, newVolName, op)
+			if err != nil {
+				return err
+			}
 		}
-		revert.Add(func() { os.Rename(dstVolumePath, srcVolumePath) })
 
 		revert.Success()
 		return nil
@@ -373,9 +417,11 @@ func (d *lvm) DeleteVolume(vol Volume, op *operations.Operation) error {
 	}
 
 	if lvExists {
-		_, err = d.UnmountVolume(vol, op)
-		if err != nil {
-			return fmt.Errorf("Error unmounting LVM logical volume: %v", err)
+		if vol.contentType == ContentTypeFS {
+			_, err = d.UnmountVolume(vol, op)
+			if err != nil {
+				return fmt.Errorf("Error unmounting LVM logical volume: %v", err)
+			}
 		}
 
 		err = d.removeLogicalVolume(d.lvmDevPath(d.config["lvm.vg_name"], vol.volType, vol.contentType, vol.name))
@@ -384,18 +430,29 @@ func (d *lvm) DeleteVolume(vol Volume, op *operations.Operation) error {
 		}
 	}
 
-	// Remove the volume from the storage device.
-	mountPath := vol.MountPath()
-	err = os.RemoveAll(mountPath)
-	if err != nil {
-		return errors.Wrapf(err, "Error removing LVM logical volume mount path '%s'", mountPath)
+	if vol.contentType == ContentTypeFS {
+		// Remove the volume from the storage device.
+		mountPath := vol.MountPath()
+		err = os.RemoveAll(mountPath)
+		if err != nil {
+			return errors.Wrapf(err, "Error removing LVM logical volume mount path '%s'", mountPath)
+		}
+
+		// Although the volume snapshot directory should already be removed, lets remove it here to just in case
+		// the top-level directory is left.
+		err = deleteParentSnapshotDirIfEmpty(d.name, vol.volType, vol.name)
+		if err != nil {
+			return err
+		}
 	}
 
-	// Although the volume snapshot directory should already be removed, lets remove it here to just in case
-	// the top-level directory is left.
-	err = deleteParentSnapshotDirIfEmpty(d.name, vol.volType, vol.name)
-	if err != nil {
-		return err
+	// For VMs, also delete the filesystem volume.
+	if vol.IsVMBlock() {
+		fsVol := vol.NewVMBlockFilesystemVolume()
+		err := d.DeleteVolume(fsVol, op)
+		if err != nil {
+			return err
+		}
 	}
 
 	return nil
@@ -419,6 +476,12 @@ func (d *lvm) MountVolume(vol Volume, op *operations.Operation) (bool, error) {
 		return true, nil
 	}
 
+	// For VMs, mount the filesystem volume.
+	if vol.IsVMBlock() {
+		fsVol := vol.NewVMBlockFilesystemVolume()
+		return d.MountVolume(fsVol, op)
+	}
+
 	return false, nil
 }
 
@@ -427,7 +490,7 @@ func (d *lvm) MountVolumeSnapshot(snapVol Volume, op *operations.Operation) (boo
 	mountPath := snapVol.MountPath()
 
 	// Check if already mounted.
-	if !shared.IsMountPoint(mountPath) {
+	if snapVol.contentType == ContentTypeFS && !shared.IsMountPoint(mountPath) {
 		revert := revert.New()
 		defer revert.Fail()
 
@@ -483,6 +546,12 @@ func (d *lvm) MountVolumeSnapshot(snapVol Volume, op *operations.Operation) (boo
 		return true, nil
 	}
 
+	// For VMs, mount the filesystem volume.
+	if snapVol.IsVMBlock() {
+		fsVol := snapVol.NewVMBlockFilesystemVolume()
+		return d.MountVolumeSnapshot(fsVol, op)
+	}
+
 	return false, nil
 }
 
@@ -605,6 +674,10 @@ func (d *lvm) SetVolumeQuota(vol Volume, size string, op *operations.Operation)
 			d.logger.Debug("Logical volume filesystem grown", logCtx)
 		}
 	} else {
+		if newSizeBytes < oldSizeBytes {
+			return fmt.Errorf("You cannot shrink block volumes")
+		}
+
 		err = d.resizeLogicalVolume(volDevPath, newSizeBytes)
 		if err != nil {
 			return err
@@ -642,6 +715,22 @@ func (d *lvm) CreateVolumeSnapshot(snapVol Volume, op *operations.Operation) err
 		return fmt.Errorf("Error creating LVM logical volume snapshot: %v", err)
 	}
 
+	volDevPath := d.lvmDevPath(d.config["lvm.vg_name"], snapVol.volType, snapVol.contentType, snapVol.name)
+
+	revert.Add(func() {
+		d.removeLogicalVolume(volDevPath)
+	})
+
+	// For VMs, also snapshot the filesystem.
+	if snapVol.IsVMBlock() {
+		parentFSVol := parentVol.NewVMBlockFilesystemVolume()
+		fsVol := snapVol.NewVMBlockFilesystemVolume()
+		_, err = d.createLogicalVolumeSnapshot(d.config["lvm.vg_name"], parentFSVol, fsVol, true, d.usesThinpool())
+		if err != nil {
+			return fmt.Errorf("Error creating LVM logical volume snapshot: %v", err)
+		}
+	}
+
 	revert.Success()
 	return nil
 }
@@ -668,6 +757,15 @@ func (d *lvm) DeleteVolumeSnapshot(snapVol Volume, op *operations.Operation) err
 		}
 	}
 
+	// For VMs, also remove the snapshot filesystem volume.
+	if snapVol.IsVMBlock() {
+		fsVol := snapVol.NewVMBlockFilesystemVolume()
+		err = d.DeleteVolumeSnapshot(fsVol, op)
+		if err != nil {
+			return err
+		}
+	}
+
 	// Remove the snapshot mount path from the storage device.
 	snapPath := snapVol.MountPath()
 	err = os.RemoveAll(snapPath)


More information about the lxc-devel mailing list