[lxc-devel] [lxd/master] Storage: Removes usage of old storage pool loader

tomponline on Github lxc-bot at linuxcontainers.org
Mon Feb 24 14:43:49 UTC 2020


A non-text attachment was scrubbed...
Name: not available
Type: text/x-mailbox
Size: 350 bytes
Desc: not available
URL: <http://lists.linuxcontainers.org/pipermail/lxc-devel/attachments/20200224/98c668e5/attachment-0001.bin>
-------------- next part --------------
From 188dee17459da51d7b833f4f7d6f4bf7349d5a35 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?St=C3=A9phane=20Graber?= <stgraber at ubuntu.com>
Date: Thu, 5 Dec 2019 15:56:33 -0500
Subject: [PATCH 01/36] lxd/storage: Remove legacy dir implementation
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Signed-off-by: Stéphane Graber <stgraber at ubuntu.com>
---
 lxd/patches.go           |   29 +-
 lxd/patches_utils.go     |   39 +
 lxd/storage.go           |   27 +-
 lxd/storage_dir.go       | 1589 --------------------------------------
 lxd/storage_migration.go |  155 ++--
 5 files changed, 95 insertions(+), 1744 deletions(-)
 create mode 100644 lxd/patches_utils.go
 delete mode 100644 lxd/storage_dir.go

diff --git a/lxd/patches.go b/lxd/patches.go
index dbb5050316..d2400b1763 100644
--- a/lxd/patches.go
+++ b/lxd/patches.go
@@ -329,25 +329,20 @@ func patchStorageApi(name string, d *Daemon) error {
 	lvmVgName := daemonConfig["storage.lvm_vg_name"]
 	zfsPoolName := daemonConfig["storage.zfs_pool_name"]
 	defaultPoolName := "default"
-	preStorageApiStorageType := storageTypeDir
+	preStorageApiStorageType := "dir"
 
 	if lvmVgName != "" {
-		preStorageApiStorageType = storageTypeLvm
+		preStorageApiStorageType = "lvm"
 		defaultPoolName = lvmVgName
 	} else if zfsPoolName != "" {
-		preStorageApiStorageType = storageTypeZfs
+		preStorageApiStorageType = "zfs"
 		defaultPoolName = zfsPoolName
 	} else if d.os.BackingFS == "btrfs" {
-		preStorageApiStorageType = storageTypeBtrfs
+		preStorageApiStorageType = "btrfs"
 	} else {
 		// Dir storage pool.
 	}
 
-	defaultStorageTypeName, err := storageTypeToString(preStorageApiStorageType)
-	if err != nil {
-		return err
-	}
-
 	// In case we detect that an lvm name or a zfs name exists it makes
 	// sense to create a storage pool in the database, independent of
 	// whether anything currently exists on that pool. We can still probably
@@ -392,13 +387,13 @@ func patchStorageApi(name string, d *Daemon) error {
 	// If any of these are actually called, there's no way back.
 	poolName := defaultPoolName
 	switch preStorageApiStorageType {
-	case storageTypeBtrfs:
-		err = upgradeFromStorageTypeBtrfs(name, d, defaultPoolName, defaultStorageTypeName, cRegular, cSnapshots, imgPublic, imgPrivate)
-	case storageTypeDir:
-		err = upgradeFromStorageTypeDir(name, d, defaultPoolName, defaultStorageTypeName, cRegular, cSnapshots, imgPublic, imgPrivate)
-	case storageTypeLvm:
-		err = upgradeFromStorageTypeLvm(name, d, defaultPoolName, defaultStorageTypeName, cRegular, cSnapshots, imgPublic, imgPrivate)
-	case storageTypeZfs:
+	case "btrfs":
+		err = upgradeFromStorageTypeBtrfs(name, d, defaultPoolName, preStorageApiStorageType, cRegular, cSnapshots, imgPublic, imgPrivate)
+	case "dir":
+		err = upgradeFromStorageTypeDir(name, d, defaultPoolName, preStorageApiStorageType, cRegular, cSnapshots, imgPublic, imgPrivate)
+	case "lvm":
+		err = upgradeFromStorageTypeLvm(name, d, defaultPoolName, preStorageApiStorageType, cRegular, cSnapshots, imgPublic, imgPrivate)
+	case "zfs":
 		// The user is using a zfs dataset. This case needs to be
 		// handled with care:
 
@@ -410,7 +405,7 @@ func patchStorageApi(name string, d *Daemon) error {
 		if strings.Contains(defaultPoolName, "/") {
 			poolName = "default"
 		}
-		err = upgradeFromStorageTypeZfs(name, d, defaultPoolName, defaultStorageTypeName, cRegular, []string{}, imgPublic, imgPrivate)
+		err = upgradeFromStorageTypeZfs(name, d, defaultPoolName, preStorageApiStorageType, cRegular, []string{}, imgPublic, imgPrivate)
 	default: // Shouldn't happen.
 		return fmt.Errorf("Invalid storage type. Upgrading not possible")
 	}
diff --git a/lxd/patches_utils.go b/lxd/patches_utils.go
new file mode 100644
index 0000000000..01429d1a59
--- /dev/null
+++ b/lxd/patches_utils.go
@@ -0,0 +1,39 @@
+package main
+
+import (
+	"os"
+
+	"github.com/lxc/lxd/lxd/project"
+	driver "github.com/lxc/lxd/lxd/storage"
+	"github.com/lxc/lxd/shared"
+)
+
+func dirSnapshotDeleteInternal(projectName, poolName string, snapshotName string) error {
+	snapshotContainerMntPoint := driver.GetSnapshotMountPoint(projectName, poolName, snapshotName)
+	if shared.PathExists(snapshotContainerMntPoint) {
+		err := os.RemoveAll(snapshotContainerMntPoint)
+		if err != nil {
+			return err
+		}
+	}
+
+	sourceContainerName, _, _ := shared.InstanceGetParentAndSnapshotName(snapshotName)
+	snapshotContainerPath := driver.GetSnapshotMountPoint(projectName, poolName, sourceContainerName)
+	empty, _ := shared.PathIsEmpty(snapshotContainerPath)
+	if empty == true {
+		err := os.Remove(snapshotContainerPath)
+		if err != nil {
+			return err
+		}
+
+		snapshotSymlink := shared.VarPath("snapshots", project.Prefix(projectName, sourceContainerName))
+		if shared.PathExists(snapshotSymlink) {
+			err := os.Remove(snapshotSymlink)
+			if err != nil {
+				return err
+			}
+		}
+	}
+
+	return nil
+}
diff --git a/lxd/storage.go b/lxd/storage.go
index 71d5b02a26..d78220e4cd 100644
--- a/lxd/storage.go
+++ b/lxd/storage.go
@@ -102,7 +102,6 @@ type storageType int
 const (
 	storageTypeBtrfs storageType = iota
 	storageTypeCeph
-	storageTypeDir
 	storageTypeLvm
 	storageTypeMock
 	storageTypeZfs
@@ -116,8 +115,6 @@ func storageTypeToString(sType storageType) (string, error) {
 		return "btrfs", nil
 	case storageTypeCeph:
 		return "ceph", nil
-	case storageTypeDir:
-		return "dir", nil
 	case storageTypeLvm:
 		return "lvm", nil
 	case storageTypeMock:
@@ -135,8 +132,6 @@ func storageStringToType(sName string) (storageType, error) {
 		return storageTypeBtrfs, nil
 	case "ceph":
 		return storageTypeCeph, nil
-	case "dir":
-		return storageTypeDir, nil
 	case "lvm":
 		return storageTypeLvm, nil
 	case "mock":
@@ -268,13 +263,6 @@ func storageCoreInit(driver string) (storage, error) {
 			return nil, err
 		}
 		return &btrfs, nil
-	case storageTypeDir:
-		dir := storageDir{}
-		err = dir.StorageCoreInit()
-		if err != nil {
-			return nil, err
-		}
-		return &dir, nil
 	case storageTypeCeph:
 		ceph := storageCeph{}
 		err = ceph.StorageCoreInit()
@@ -324,9 +312,8 @@ func storageInit(s *state.State, project, poolName, volumeName string, volumeTyp
 
 	// Load the storage volume.
 	volume := &api.StorageVolume{}
-	volumeID := int64(-1)
 	if volumeName != "" {
-		volumeID, volume, err = s.Cluster.StoragePoolNodeVolumeGetTypeByProject(project, volumeName, volumeType, poolID)
+		_, volume, err = s.Cluster.StoragePoolNodeVolumeGetTypeByProject(project, volumeName, volumeType, poolID)
 		if err != nil {
 			return nil, err
 		}
@@ -349,18 +336,6 @@ func storageInit(s *state.State, project, poolName, volumeName string, volumeTyp
 			return nil, err
 		}
 		return &btrfs, nil
-	case storageTypeDir:
-		dir := storageDir{}
-		dir.poolID = poolID
-		dir.pool = pool
-		dir.volume = volume
-		dir.volumeID = volumeID
-		dir.s = s
-		err = dir.StoragePoolInit()
-		if err != nil {
-			return nil, err
-		}
-		return &dir, nil
 	case storageTypeCeph:
 		ceph := storageCeph{}
 		ceph.poolID = poolID
diff --git a/lxd/storage_dir.go b/lxd/storage_dir.go
deleted file mode 100644
index 52e01e1814..0000000000
--- a/lxd/storage_dir.go
+++ /dev/null
@@ -1,1589 +0,0 @@
-package main
-
-import (
-	"fmt"
-	"io"
-	"os"
-	"path/filepath"
-	"strings"
-
-	"github.com/gorilla/websocket"
-	"github.com/pkg/errors"
-	"golang.org/x/sys/unix"
-
-	"github.com/lxc/lxd/lxd/backup"
-	"github.com/lxc/lxd/lxd/instance"
-	"github.com/lxc/lxd/lxd/instance/instancetype"
-	"github.com/lxc/lxd/lxd/migration"
-	"github.com/lxc/lxd/lxd/operations"
-	"github.com/lxc/lxd/lxd/project"
-	"github.com/lxc/lxd/lxd/rsync"
-	driver "github.com/lxc/lxd/lxd/storage"
-	"github.com/lxc/lxd/lxd/storage/quota"
-	"github.com/lxc/lxd/shared"
-	"github.com/lxc/lxd/shared/api"
-	"github.com/lxc/lxd/shared/ioprogress"
-	"github.com/lxc/lxd/shared/logger"
-	"github.com/lxc/lxd/shared/units"
-)
-
-type storageDir struct {
-	storageShared
-
-	volumeID int64
-}
-
-// Only initialize the minimal information we need about a given storage type.
-func (s *storageDir) StorageCoreInit() error {
-	s.sType = storageTypeDir
-	typeName, err := storageTypeToString(s.sType)
-	if err != nil {
-		return err
-	}
-	s.sTypeName = typeName
-	s.sTypeVersion = "1"
-
-	return nil
-}
-
-// Initialize a full storage interface.
-func (s *storageDir) StoragePoolInit() error {
-	err := s.StorageCoreInit()
-	if err != nil {
-		return err
-	}
-
-	return nil
-}
-
-// Initialize a full storage interface.
-func (s *storageDir) StoragePoolCheck() error {
-	logger.Debugf("Checking DIR storage pool \"%s\"", s.pool.Name)
-	return nil
-}
-
-func (s *storageDir) StoragePoolCreate() error {
-	logger.Infof("Creating DIR storage pool \"%s\"", s.pool.Name)
-
-	s.pool.Config["volatile.initial_source"] = s.pool.Config["source"]
-
-	poolMntPoint := driver.GetStoragePoolMountPoint(s.pool.Name)
-
-	source := shared.HostPath(s.pool.Config["source"])
-	if source == "" {
-		source = filepath.Join(shared.VarPath("storage-pools"), s.pool.Name)
-		s.pool.Config["source"] = source
-	} else {
-		cleanSource := filepath.Clean(source)
-		lxdDir := shared.VarPath()
-		if strings.HasPrefix(cleanSource, lxdDir) &&
-			cleanSource != poolMntPoint {
-			return fmt.Errorf(`DIR storage pool requests in LXD `+
-				`directory "%s" are only valid under `+
-				`"%s"\n(e.g. source=%s)`, shared.VarPath(),
-				shared.VarPath("storage-pools"), poolMntPoint)
-		}
-		source = filepath.Clean(source)
-	}
-
-	revert := true
-	if !shared.PathExists(source) {
-		err := os.MkdirAll(source, 0711)
-		if err != nil {
-			return err
-		}
-		defer func() {
-			if !revert {
-				return
-			}
-			os.Remove(source)
-		}()
-	} else {
-		empty, err := shared.PathIsEmpty(source)
-		if err != nil {
-			return err
-		}
-
-		if !empty {
-			return fmt.Errorf("The provided directory is not empty")
-		}
-	}
-
-	if !shared.PathExists(poolMntPoint) {
-		err := os.MkdirAll(poolMntPoint, 0711)
-		if err != nil {
-			return err
-		}
-		defer func() {
-			if !revert {
-				return
-			}
-			os.Remove(poolMntPoint)
-		}()
-	}
-
-	err := s.StoragePoolCheck()
-	if err != nil {
-		return err
-	}
-
-	_, err = s.StoragePoolMount()
-	if err != nil {
-		return err
-	}
-
-	revert = false
-
-	logger.Infof("Created DIR storage pool \"%s\"", s.pool.Name)
-	return nil
-}
-
-func (s *storageDir) StoragePoolDelete() error {
-	logger.Infof("Deleting DIR storage pool \"%s\"", s.pool.Name)
-
-	source := shared.HostPath(s.pool.Config["source"])
-	if source == "" {
-		return fmt.Errorf("no \"source\" property found for the storage pool")
-	}
-
-	_, err := s.StoragePoolUmount()
-	if err != nil {
-		return err
-	}
-
-	if shared.PathExists(source) {
-		err := os.RemoveAll(source)
-		if err != nil {
-			return err
-		}
-	}
-
-	prefix := shared.VarPath("storage-pools")
-	if !strings.HasPrefix(source, prefix) {
-		storagePoolSymlink := driver.GetStoragePoolMountPoint(s.pool.Name)
-		if !shared.PathExists(storagePoolSymlink) {
-			return nil
-		}
-
-		err := os.Remove(storagePoolSymlink)
-		if err != nil {
-			return err
-		}
-	}
-
-	logger.Infof("Deleted DIR storage pool \"%s\"", s.pool.Name)
-	return nil
-}
-
-func (s *storageDir) StoragePoolMount() (bool, error) {
-	source := shared.HostPath(s.pool.Config["source"])
-	if source == "" {
-		return false, fmt.Errorf("no \"source\" property found for the storage pool")
-	}
-	cleanSource := filepath.Clean(source)
-	poolMntPoint := driver.GetStoragePoolMountPoint(s.pool.Name)
-	if cleanSource == poolMntPoint {
-		return true, nil
-	}
-
-	logger.Debugf("Mounting DIR storage pool \"%s\"", s.pool.Name)
-
-	poolMountLockID := getPoolMountLockID(s.pool.Name)
-	lxdStorageMapLock.Lock()
-	if waitChannel, ok := lxdStorageOngoingOperationMap[poolMountLockID]; ok {
-		lxdStorageMapLock.Unlock()
-		if _, ok := <-waitChannel; ok {
-			logger.Warnf("Received value over semaphore, this should not have happened")
-		}
-		// Give the benefit of the doubt and assume that the other
-		// thread actually succeeded in mounting the storage pool.
-		return false, nil
-	}
-
-	lxdStorageOngoingOperationMap[poolMountLockID] = make(chan bool)
-	lxdStorageMapLock.Unlock()
-
-	removeLockFromMap := func() {
-		lxdStorageMapLock.Lock()
-		if waitChannel, ok := lxdStorageOngoingOperationMap[poolMountLockID]; ok {
-			close(waitChannel)
-			delete(lxdStorageOngoingOperationMap, poolMountLockID)
-		}
-		lxdStorageMapLock.Unlock()
-	}
-	defer removeLockFromMap()
-
-	mountSource := cleanSource
-	mountFlags := unix.MS_BIND
-
-	if shared.IsMountPoint(poolMntPoint) {
-		return false, nil
-	}
-
-	err := unix.Mount(mountSource, poolMntPoint, "", uintptr(mountFlags), "")
-	if err != nil {
-		logger.Errorf(`Failed to mount DIR storage pool "%s" onto "%s": %s`, mountSource, poolMntPoint, err)
-		return false, err
-	}
-
-	logger.Debugf("Mounted DIR storage pool \"%s\"", s.pool.Name)
-
-	return true, nil
-}
-
-func (s *storageDir) StoragePoolUmount() (bool, error) {
-	source := s.pool.Config["source"]
-	if source == "" {
-		return false, fmt.Errorf("no \"source\" property found for the storage pool")
-	}
-	cleanSource := filepath.Clean(source)
-	poolMntPoint := driver.GetStoragePoolMountPoint(s.pool.Name)
-	if cleanSource == poolMntPoint {
-		return true, nil
-	}
-
-	logger.Debugf("Unmounting DIR storage pool \"%s\"", s.pool.Name)
-
-	poolUmountLockID := getPoolUmountLockID(s.pool.Name)
-	lxdStorageMapLock.Lock()
-	if waitChannel, ok := lxdStorageOngoingOperationMap[poolUmountLockID]; ok {
-		lxdStorageMapLock.Unlock()
-		if _, ok := <-waitChannel; ok {
-			logger.Warnf("Received value over semaphore, this should not have happened")
-		}
-		// Give the benefit of the doubt and assume that the other
-		// thread actually succeeded in unmounting the storage pool.
-		return false, nil
-	}
-
-	lxdStorageOngoingOperationMap[poolUmountLockID] = make(chan bool)
-	lxdStorageMapLock.Unlock()
-
-	removeLockFromMap := func() {
-		lxdStorageMapLock.Lock()
-		if waitChannel, ok := lxdStorageOngoingOperationMap[poolUmountLockID]; ok {
-			close(waitChannel)
-			delete(lxdStorageOngoingOperationMap, poolUmountLockID)
-		}
-		lxdStorageMapLock.Unlock()
-	}
-
-	defer removeLockFromMap()
-
-	if !shared.IsMountPoint(poolMntPoint) {
-		return false, nil
-	}
-
-	err := unix.Unmount(poolMntPoint, unix.MNT_DETACH)
-	if err != nil {
-		return false, err
-	}
-
-	logger.Debugf("Unmounted DIR pool \"%s\"", s.pool.Name)
-	return true, nil
-}
-
-func (s *storageDir) GetContainerPoolInfo() (int64, string, string) {
-	return s.poolID, s.pool.Name, s.pool.Name
-}
-
-func (s *storageDir) StoragePoolUpdate(writable *api.StoragePoolPut, changedConfig []string) error {
-	logger.Infof(`Updating DIR storage pool "%s"`, s.pool.Name)
-
-	_, err := s.StoragePoolMount()
-	if err != nil {
-		return err
-	}
-
-	changeable := changeableStoragePoolProperties["dir"]
-	unchangeable := []string{}
-	for _, change := range changedConfig {
-		if !shared.StringInSlice(change, changeable) {
-			unchangeable = append(unchangeable, change)
-		}
-	}
-
-	if len(unchangeable) > 0 {
-		return updateStoragePoolError(unchangeable, "dir")
-	}
-
-	// "rsync.bwlimit" requires no on-disk modifications.
-
-	logger.Infof(`Updated DIR storage pool "%s"`, s.pool.Name)
-	return nil
-}
-
-// Functions dealing with storage pools.
-func (s *storageDir) StoragePoolVolumeCreate() error {
-	logger.Infof("Creating DIR storage volume \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-
-	_, err := s.StoragePoolMount()
-	if err != nil {
-		return err
-	}
-
-	source := s.pool.Config["source"]
-	if source == "" {
-		return fmt.Errorf("no \"source\" property found for the storage pool")
-	}
-
-	isSnapshot := shared.IsSnapshot(s.volume.Name)
-
-	var storageVolumePath string
-
-	if isSnapshot {
-		storageVolumePath = driver.GetStoragePoolVolumeSnapshotMountPoint(s.pool.Name, s.volume.Name)
-	} else {
-		storageVolumePath = driver.GetStoragePoolVolumeMountPoint(s.pool.Name, s.volume.Name)
-	}
-
-	err = os.MkdirAll(storageVolumePath, 0711)
-	if err != nil {
-		return err
-	}
-
-	err = s.initQuota(storageVolumePath, s.volumeID)
-	if err != nil {
-		return err
-	}
-
-	logger.Infof("Created DIR storage volume \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-	return nil
-}
-
-func (s *storageDir) StoragePoolVolumeDelete() error {
-	logger.Infof("Deleting DIR storage volume \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-
-	source := s.pool.Config["source"]
-	if source == "" {
-		return fmt.Errorf("no \"source\" property found for the storage pool")
-	}
-
-	storageVolumePath := driver.GetStoragePoolVolumeMountPoint(s.pool.Name, s.volume.Name)
-	if !shared.PathExists(storageVolumePath) {
-		return nil
-	}
-
-	err := s.deleteQuota(storageVolumePath, s.volumeID)
-	if err != nil {
-		return err
-	}
-
-	err = os.RemoveAll(storageVolumePath)
-	if err != nil {
-		return err
-	}
-
-	err = s.s.Cluster.StoragePoolVolumeDelete(
-		"default",
-		s.volume.Name,
-		storagePoolVolumeTypeCustom,
-		s.poolID)
-	if err != nil {
-		logger.Errorf(`Failed to delete database entry for DIR storage volume "%s" on storage pool "%s"`,
-			s.volume.Name, s.pool.Name)
-	}
-
-	logger.Infof("Deleted DIR storage volume \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-	return nil
-}
-
-func (s *storageDir) StoragePoolVolumeMount() (bool, error) {
-	return true, nil
-}
-
-func (s *storageDir) StoragePoolVolumeUmount() (bool, error) {
-	return true, nil
-}
-
-func (s *storageDir) StoragePoolVolumeUpdate(writable *api.StorageVolumePut, changedConfig []string) error {
-	if writable.Restore == "" {
-		logger.Infof(`Updating DIR storage volume "%s"`, s.volume.Name)
-	}
-
-	_, err := s.StoragePoolMount()
-	if err != nil {
-		return err
-	}
-
-	if writable.Restore != "" {
-		logger.Infof(`Restoring DIR storage volume "%s" from snapshot "%s"`,
-			s.volume.Name, writable.Restore)
-
-		sourcePath := driver.GetStoragePoolVolumeSnapshotMountPoint(s.pool.Name,
-			fmt.Sprintf("%s/%s", s.volume.Name, writable.Restore))
-		targetPath := driver.GetStoragePoolVolumeMountPoint(s.pool.Name, s.volume.Name)
-
-		// Restore using rsync
-		bwlimit := s.pool.Config["rsync.bwlimit"]
-		output, err := rsync.LocalCopy(sourcePath, targetPath, bwlimit, true)
-		if err != nil {
-			return fmt.Errorf("Failed to rsync container: %s: %s", string(output), err)
-		}
-
-		logger.Infof(`Restored DIR storage volume "%s" from snapshot "%s"`,
-			s.volume.Name, writable.Restore)
-		return nil
-	}
-
-	changeable := changeableStoragePoolVolumeProperties["dir"]
-	unchangeable := []string{}
-	for _, change := range changedConfig {
-		if !shared.StringInSlice(change, changeable) {
-			unchangeable = append(unchangeable, change)
-		}
-	}
-
-	if len(unchangeable) > 0 {
-		return updateStoragePoolVolumeError(unchangeable, "dir")
-	}
-
-	if shared.StringInSlice("size", changedConfig) {
-		if s.volume.Type != storagePoolVolumeTypeNameCustom {
-			return updateStoragePoolVolumeError([]string{"size"}, "dir")
-		}
-
-		if s.volume.Config["size"] != writable.Config["size"] {
-			size, err := units.ParseByteSizeString(writable.Config["size"])
-			if err != nil {
-				return err
-			}
-
-			err = s.StorageEntitySetQuota(storagePoolVolumeTypeCustom, size, nil)
-			if err != nil {
-				return err
-			}
-		}
-	}
-
-	logger.Infof(`Updated DIR storage volume "%s"`, s.volume.Name)
-	return nil
-}
-
-func (s *storageDir) StoragePoolVolumeRename(newName string) error {
-	logger.Infof(`Renaming DIR storage volume on storage pool "%s" from "%s" to "%s`,
-		s.pool.Name, s.volume.Name, newName)
-
-	_, err := s.StoragePoolMount()
-	if err != nil {
-		return err
-	}
-
-	usedBy, err := storagePoolVolumeUsedByInstancesGet(s.s, "default", s.pool.Name, s.volume.Name)
-	if err != nil {
-		return err
-	}
-	if len(usedBy) > 0 {
-		return fmt.Errorf(`DIR storage volume "%s" on storage pool "%s" is attached to containers`,
-			s.volume.Name, s.pool.Name)
-	}
-
-	oldPath := driver.GetStoragePoolVolumeMountPoint(s.pool.Name, s.volume.Name)
-	newPath := driver.GetStoragePoolVolumeMountPoint(s.pool.Name, newName)
-	err = os.Rename(oldPath, newPath)
-	if err != nil {
-		return err
-	}
-
-	logger.Infof(`Renamed DIR storage volume on storage pool "%s" from "%s" to "%s`,
-		s.pool.Name, s.volume.Name, newName)
-
-	return s.s.Cluster.StoragePoolVolumeRename("default", s.volume.Name, newName,
-		storagePoolVolumeTypeCustom, s.poolID)
-}
-
-func (s *storageDir) ContainerStorageReady(container instance.Instance) bool {
-	containerMntPoint := driver.GetContainerMountPoint(container.Project(), s.pool.Name, container.Name())
-	ok, _ := shared.PathIsEmpty(containerMntPoint)
-	return !ok
-}
-
-func (s *storageDir) ContainerCreate(container instance.Instance) error {
-	logger.Debugf("Creating empty DIR storage volume for container \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-
-	_, err := s.StoragePoolMount()
-	if err != nil {
-		return err
-	}
-
-	source := s.pool.Config["source"]
-	if source == "" {
-		return fmt.Errorf("no \"source\" property found for the storage pool")
-	}
-
-	containerMntPoint := driver.GetContainerMountPoint(container.Project(), s.pool.Name, container.Name())
-	err = driver.CreateContainerMountpoint(containerMntPoint, container.Path(), container.IsPrivileged())
-	if err != nil {
-		return err
-	}
-	revert := true
-	defer func() {
-		if !revert {
-			return
-		}
-		deleteContainerMountpoint(containerMntPoint, container.Path(), s.GetStorageTypeName())
-	}()
-
-	err = s.initQuota(containerMntPoint, s.volumeID)
-	if err != nil {
-		return err
-	}
-
-	err = container.DeferTemplateApply("create")
-	if err != nil {
-		return errors.Wrap(err, "Apply template")
-	}
-
-	revert = false
-
-	logger.Debugf("Created empty DIR storage volume for container \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-	return nil
-}
-
-func (s *storageDir) ContainerCreateFromImage(container instance.Instance, imageFingerprint string, tracker *ioprogress.ProgressTracker) error {
-	logger.Debugf("Creating DIR storage volume for container \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-
-	_, err := s.StoragePoolMount()
-	if err != nil {
-		return err
-	}
-
-	source := s.pool.Config["source"]
-	if source == "" {
-		return fmt.Errorf("no \"source\" property found for the storage pool")
-	}
-
-	privileged := container.IsPrivileged()
-	containerName := container.Name()
-	containerMntPoint := driver.GetContainerMountPoint(container.Project(), s.pool.Name, containerName)
-	err = driver.CreateContainerMountpoint(containerMntPoint, container.Path(), privileged)
-	if err != nil {
-		return errors.Wrap(err, "Create container mount point")
-	}
-	revert := true
-	defer func() {
-		if !revert {
-			return
-		}
-		s.ContainerDelete(container)
-	}()
-
-	err = s.initQuota(containerMntPoint, s.volumeID)
-	if err != nil {
-		return err
-	}
-
-	imagePath := shared.VarPath("images", imageFingerprint)
-	err = driver.ImageUnpack(imagePath, containerMntPoint, "", false, s.s.OS.RunningInUserNS, nil)
-	if err != nil {
-		return errors.Wrap(err, "Unpack image")
-	}
-
-	err = container.DeferTemplateApply("create")
-	if err != nil {
-		return errors.Wrap(err, "Apply template")
-	}
-
-	revert = false
-
-	logger.Debugf("Created DIR storage volume for container \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-	return nil
-}
-
-func (s *storageDir) ContainerDelete(container instance.Instance) error {
-	logger.Debugf("Deleting DIR storage volume for container \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-
-	source := s.pool.Config["source"]
-	if source == "" {
-		return fmt.Errorf("no \"source\" property found for the storage pool")
-	}
-
-	_, err := s.StoragePoolMount()
-	if err != nil {
-		return err
-	}
-
-	// Delete the container on its storage pool:
-	// ${POOL}/containers/<container_name>
-	containerName := container.Name()
-	containerMntPoint := driver.GetContainerMountPoint(container.Project(), s.pool.Name, containerName)
-
-	err = s.deleteQuota(containerMntPoint, s.volumeID)
-	if err != nil {
-		return err
-	}
-
-	if shared.PathExists(containerMntPoint) {
-		err := os.RemoveAll(containerMntPoint)
-		if err != nil {
-			// RemovaAll fails on very long paths, so attempt an rm -Rf
-			_, err := shared.RunCommand("rm", "-Rf", containerMntPoint)
-			if err != nil {
-				return fmt.Errorf("error removing %s: %s", containerMntPoint, err)
-			}
-		}
-	}
-
-	err = deleteContainerMountpoint(containerMntPoint, container.Path(), s.GetStorageTypeName())
-	if err != nil {
-		return err
-	}
-
-	// Delete potential leftover snapshot mountpoints.
-	snapshotMntPoint := driver.GetSnapshotMountPoint(container.Project(), s.pool.Name, container.Name())
-	if shared.PathExists(snapshotMntPoint) {
-		err := os.RemoveAll(snapshotMntPoint)
-		if err != nil {
-			return err
-		}
-	}
-
-	// Delete potential leftover snapshot symlinks:
-	// ${LXD_DIR}/snapshots/<container_name> to ${POOL}/snapshots/<container_name>
-	snapshotSymlink := shared.VarPath("snapshots", project.Prefix(container.Project(), container.Name()))
-	if shared.PathExists(snapshotSymlink) {
-		err := os.Remove(snapshotSymlink)
-		if err != nil {
-			return err
-		}
-	}
-
-	logger.Debugf("Deleted DIR storage volume for container \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-	return nil
-}
-
-func (s *storageDir) copyContainer(target instance.Instance, source instance.Instance) error {
-	if source.Type() != instancetype.Container {
-		return fmt.Errorf("Source Instance type must be container")
-	}
-
-	if target.Type() != instancetype.Container {
-		return fmt.Errorf("Target Instance type must be container")
-	}
-
-	srcCt := source.(*containerLXC)
-	targetCt := target.(*containerLXC)
-
-	_, sourcePool, _ := srcCt.Storage().GetContainerPoolInfo()
-	_, targetPool, _ := targetCt.Storage().GetContainerPoolInfo()
-	sourceContainerMntPoint := driver.GetContainerMountPoint(source.Project(), sourcePool, source.Name())
-	if source.IsSnapshot() {
-		sourceContainerMntPoint = driver.GetSnapshotMountPoint(source.Project(), sourcePool, source.Name())
-	}
-	targetContainerMntPoint := driver.GetContainerMountPoint(target.Project(), targetPool, target.Name())
-
-	err := driver.CreateContainerMountpoint(targetContainerMntPoint, target.Path(), target.IsPrivileged())
-	if err != nil {
-		return err
-	}
-
-	err = s.initQuota(targetContainerMntPoint, s.volumeID)
-	if err != nil {
-		return err
-	}
-
-	bwlimit := s.pool.Config["rsync.bwlimit"]
-	output, err := rsync.LocalCopy(sourceContainerMntPoint, targetContainerMntPoint, bwlimit, true)
-	if err != nil {
-		return fmt.Errorf("Failed to rsync container: %s: %s", string(output), err)
-	}
-
-	err = target.DeferTemplateApply("copy")
-	if err != nil {
-		return err
-	}
-
-	return nil
-}
-
-func (s *storageDir) copySnapshot(target instance.Instance, targetPool string, source instance.Instance, sourcePool string) error {
-	sourceName := source.Name()
-	targetName := target.Name()
-	sourceContainerMntPoint := driver.GetSnapshotMountPoint(source.Project(), sourcePool, sourceName)
-	targetContainerMntPoint := driver.GetSnapshotMountPoint(target.Project(), targetPool, targetName)
-
-	targetParentName, _, _ := shared.InstanceGetParentAndSnapshotName(target.Name())
-	containersPath := driver.GetSnapshotMountPoint(target.Project(), targetPool, targetParentName)
-	snapshotMntPointSymlinkTarget := shared.VarPath("storage-pools", targetPool, "containers-snapshots", project.Prefix(target.Project(), targetParentName))
-	snapshotMntPointSymlink := shared.VarPath("snapshots", project.Prefix(target.Project(), targetParentName))
-	err := driver.CreateSnapshotMountpoint(containersPath, snapshotMntPointSymlinkTarget, snapshotMntPointSymlink)
-	if err != nil {
-		return err
-	}
-
-	bwlimit := s.pool.Config["rsync.bwlimit"]
-	output, err := rsync.LocalCopy(sourceContainerMntPoint, targetContainerMntPoint, bwlimit, true)
-	if err != nil {
-		return fmt.Errorf("Failed to rsync container: %s: %s", string(output), err)
-	}
-
-	return nil
-}
-
-func (s *storageDir) ContainerCopy(target instance.Instance, source instance.Instance, containerOnly bool) error {
-	logger.Debugf("Copying DIR container storage %s to %s", source.Name(), target.Name())
-
-	err := s.doContainerCopy(target, source, containerOnly, false, nil)
-	if err != nil {
-		return err
-	}
-
-	logger.Debugf("Copied DIR container storage %s to %s", source.Name(), target.Name())
-	return nil
-}
-
-func (s *storageDir) doContainerCopy(target instance.Instance, source instance.Instance, containerOnly bool, refresh bool, refreshSnapshots []instance.Instance) error {
-	_, err := s.StoragePoolMount()
-	if err != nil {
-		return err
-	}
-
-	ourStart, err := source.StorageStart()
-	if err != nil {
-		return err
-	}
-	if ourStart {
-		defer source.StorageStop()
-	}
-
-	sourcePool, err := source.StoragePool()
-	if err != nil {
-		return err
-	}
-	targetPool, err := target.StoragePool()
-	if err != nil {
-		return err
-	}
-
-	srcState := s.s
-	if sourcePool != targetPool {
-		// setup storage for the source volume
-		srcStorage, err := storagePoolVolumeInit(s.s, "default", sourcePool, source.Name(), storagePoolVolumeTypeContainer)
-		if err != nil {
-			return err
-		}
-
-		ourMount, err := srcStorage.StoragePoolMount()
-		if err != nil {
-			return err
-		}
-		if ourMount {
-			defer srcStorage.StoragePoolUmount()
-		}
-		srcState = srcStorage.GetState()
-	}
-
-	err = s.copyContainer(target, source)
-	if err != nil {
-		return err
-	}
-
-	if containerOnly {
-		return nil
-	}
-
-	var snapshots []instance.Instance
-
-	if refresh {
-		snapshots = refreshSnapshots
-	} else {
-		snapshots, err = source.Snapshots()
-		if err != nil {
-			return err
-		}
-	}
-
-	if len(snapshots) == 0 {
-		return nil
-	}
-
-	for _, snap := range snapshots {
-		sourceSnapshot, err := instance.LoadByProjectAndName(srcState, source.Project(), snap.Name())
-		if err != nil {
-			return err
-		}
-
-		_, snapOnlyName, _ := shared.InstanceGetParentAndSnapshotName(snap.Name())
-		newSnapName := fmt.Sprintf("%s/%s", target.Name(), snapOnlyName)
-		targetSnapshot, err := instance.LoadByProjectAndName(s.s, source.Project(), newSnapName)
-		if err != nil {
-			return err
-		}
-
-		err = s.copySnapshot(targetSnapshot, targetPool, sourceSnapshot, sourcePool)
-		if err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (s *storageDir) ContainerRefresh(target instance.Instance, source instance.Instance, snapshots []instance.Instance) error {
-	logger.Debugf("Refreshing DIR container storage for %s from %s", target.Name(), source.Name())
-
-	err := s.doContainerCopy(target, source, len(snapshots) == 0, true, snapshots)
-	if err != nil {
-		return err
-	}
-
-	logger.Debugf("Refreshed DIR container storage for %s from %s", target.Name(), source.Name())
-	return nil
-}
-
-func (s *storageDir) ContainerMount(c instance.Instance) (bool, error) {
-	return s.StoragePoolMount()
-}
-
-func (s *storageDir) ContainerUmount(c instance.Instance, path string) (bool, error) {
-	return true, nil
-}
-
-func (s *storageDir) ContainerRename(container instance.Instance, newName string) error {
-	logger.Debugf("Renaming DIR storage volume for container \"%s\" from %s to %s", s.volume.Name, s.volume.Name, newName)
-
-	_, err := s.StoragePoolMount()
-	if err != nil {
-		return err
-	}
-
-	source := s.pool.Config["source"]
-	if source == "" {
-		return fmt.Errorf("no \"source\" property found for the storage pool")
-	}
-
-	oldContainerMntPoint := driver.GetContainerMountPoint(container.Project(), s.pool.Name, container.Name())
-	oldContainerSymlink := shared.VarPath("containers", project.Prefix(container.Project(), container.Name()))
-	newContainerMntPoint := driver.GetContainerMountPoint(container.Project(), s.pool.Name, newName)
-	newContainerSymlink := shared.VarPath("containers", project.Prefix(container.Project(), newName))
-	err = renameContainerMountpoint(oldContainerMntPoint, oldContainerSymlink, newContainerMntPoint, newContainerSymlink)
-	if err != nil {
-		return err
-	}
-
-	// Rename the snapshot mountpoint for the container if existing:
-	// ${POOL}/snapshots/<old_container_name> to ${POOL}/snapshots/<new_container_name>
-	oldSnapshotsMntPoint := driver.GetSnapshotMountPoint(container.Project(), s.pool.Name, container.Name())
-	newSnapshotsMntPoint := driver.GetSnapshotMountPoint(container.Project(), s.pool.Name, newName)
-	if shared.PathExists(oldSnapshotsMntPoint) {
-		err = os.Rename(oldSnapshotsMntPoint, newSnapshotsMntPoint)
-		if err != nil {
-			return err
-		}
-	}
-
-	// Remove the old snapshot symlink:
-	// ${LXD_DIR}/snapshots/<old_container_name>
-	oldSnapshotSymlink := shared.VarPath("snapshots", project.Prefix(container.Project(), container.Name()))
-	newSnapshotSymlink := shared.VarPath("snapshots", project.Prefix(container.Project(), newName))
-	if shared.PathExists(oldSnapshotSymlink) {
-		err := os.Remove(oldSnapshotSymlink)
-		if err != nil {
-			return err
-		}
-
-		// Create the new snapshot symlink:
-		// ${LXD_DIR}/snapshots/<new_container_name> to ${POOL}/snapshots/<new_container_name>
-		err = os.Symlink(newSnapshotsMntPoint, newSnapshotSymlink)
-		if err != nil {
-			return err
-		}
-	}
-
-	logger.Debugf("Renamed DIR storage volume for container \"%s\" from %s to %s", s.volume.Name, s.volume.Name, newName)
-	return nil
-}
-
-func (s *storageDir) ContainerRestore(container instance.Instance, sourceContainer instance.Instance) error {
-	logger.Debugf("Restoring DIR storage volume for container \"%s\" from %s to %s", s.volume.Name, sourceContainer.Name(), container.Name())
-
-	_, err := s.StoragePoolMount()
-	if err != nil {
-		return err
-	}
-
-	targetPath := container.Path()
-	sourcePath := sourceContainer.Path()
-
-	// Restore using rsync
-	bwlimit := s.pool.Config["rsync.bwlimit"]
-	output, err := rsync.LocalCopy(sourcePath, targetPath, bwlimit, true)
-	if err != nil {
-		return fmt.Errorf("Failed to rsync container: %s: %s", string(output), err)
-	}
-
-	logger.Debugf("Restored DIR storage volume for container \"%s\" from %s to %s", s.volume.Name, sourceContainer.Name(), container.Name())
-	return nil
-}
-
-func (s *storageDir) ContainerGetUsage(c instance.Instance) (int64, error) {
-	path := driver.GetContainerMountPoint(c.Project(), s.pool.Name, c.Name())
-
-	ok, err := quota.Supported(path)
-	if err != nil || !ok {
-		return -1, fmt.Errorf("The backing filesystem doesn't support quotas")
-	}
-
-	projectID := uint32(s.volumeID + 10000)
-	size, err := quota.GetProjectUsage(path, projectID)
-	if err != nil {
-		return -1, err
-	}
-
-	return size, nil
-}
-
-func (s *storageDir) ContainerSnapshotCreate(snapshotContainer instance.Instance, sourceContainer instance.Instance) error {
-	logger.Debugf("Creating DIR storage volume for snapshot \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-
-	_, err := s.StoragePoolMount()
-	if err != nil {
-		return err
-	}
-
-	// Create the path for the snapshot.
-	targetContainerName := snapshotContainer.Name()
-	targetContainerMntPoint := driver.GetSnapshotMountPoint(sourceContainer.Project(), s.pool.Name, targetContainerName)
-	err = os.MkdirAll(targetContainerMntPoint, 0711)
-	if err != nil {
-		return err
-	}
-
-	rsync := func(snapshotContainer instance.Instance, oldPath string, newPath string, bwlimit string) error {
-		output, err := rsync.LocalCopy(oldPath, newPath, bwlimit, true)
-		if err != nil {
-			s.ContainerDelete(snapshotContainer)
-			return fmt.Errorf("Failed to rsync: %s: %s", string(output), err)
-		}
-		return nil
-	}
-
-	ourStart, err := sourceContainer.StorageStart()
-	if err != nil {
-		return err
-	}
-	if ourStart {
-		defer sourceContainer.StorageStop()
-	}
-
-	if sourceContainer.Type() != instancetype.Container {
-		return fmt.Errorf("Source Instance type must be container")
-	}
-
-	srcCt := sourceContainer.(*containerLXC)
-
-	_, sourcePool, _ := srcCt.Storage().GetContainerPoolInfo()
-	sourceContainerName := sourceContainer.Name()
-	sourceContainerMntPoint := driver.GetContainerMountPoint(sourceContainer.Project(), sourcePool, sourceContainerName)
-	bwlimit := s.pool.Config["rsync.bwlimit"]
-	err = rsync(snapshotContainer, sourceContainerMntPoint, targetContainerMntPoint, bwlimit)
-	if err != nil {
-		return err
-	}
-
-	if sourceContainer.IsRunning() {
-		// This is done to ensure consistency when snapshotting. But we
-		// probably shouldn't fail just because of that.
-		logger.Debugf("Trying to freeze and rsync again to ensure consistency")
-
-		err := sourceContainer.Freeze()
-		if err != nil {
-			logger.Errorf("Trying to freeze and rsync again failed")
-			goto onSuccess
-		}
-		defer sourceContainer.Unfreeze()
-
-		err = rsync(snapshotContainer, sourceContainerMntPoint, targetContainerMntPoint, bwlimit)
-		if err != nil {
-			return err
-		}
-	}
-
-onSuccess:
-	// Check if the symlink
-	// ${LXD_DIR}/snapshots/<source_container_name> to ${POOL_PATH}/snapshots/<source_container_name>
-	// exists and if not create it.
-	sourceContainerSymlink := shared.VarPath("snapshots", project.Prefix(sourceContainer.Project(), sourceContainerName))
-	sourceContainerSymlinkTarget := driver.GetSnapshotMountPoint(sourceContainer.Project(), sourcePool, sourceContainerName)
-	if !shared.PathExists(sourceContainerSymlink) {
-		err = os.Symlink(sourceContainerSymlinkTarget, sourceContainerSymlink)
-		if err != nil {
-			return err
-		}
-	}
-
-	logger.Debugf("Created DIR storage volume for snapshot \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-	return nil
-}
-
-func (s *storageDir) ContainerSnapshotCreateEmpty(snapshotContainer instance.Instance) error {
-	logger.Debugf("Creating empty DIR storage volume for snapshot \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-
-	_, err := s.StoragePoolMount()
-	if err != nil {
-		return err
-	}
-
-	// Create the path for the snapshot.
-	targetContainerName := snapshotContainer.Name()
-	targetContainerMntPoint := driver.GetSnapshotMountPoint(snapshotContainer.Project(), s.pool.Name, targetContainerName)
-	err = os.MkdirAll(targetContainerMntPoint, 0711)
-	if err != nil {
-		return err
-	}
-	revert := true
-	defer func() {
-		if !revert {
-			return
-		}
-		s.ContainerSnapshotDelete(snapshotContainer)
-	}()
-
-	// Check if the symlink
-	// ${LXD_DIR}/snapshots/<source_container_name> to ${POOL_PATH}/snapshots/<source_container_name>
-	// exists and if not create it.
-	targetContainerMntPoint = driver.GetSnapshotMountPoint(snapshotContainer.Project(), s.pool.Name,
-		targetContainerName)
-	sourceName, _, _ := shared.InstanceGetParentAndSnapshotName(targetContainerName)
-	snapshotMntPointSymlinkTarget := shared.VarPath("storage-pools",
-		s.pool.Name, "containers-snapshots", project.Prefix(snapshotContainer.Project(), sourceName))
-	snapshotMntPointSymlink := shared.VarPath("snapshots", project.Prefix(snapshotContainer.Project(), sourceName))
-	err = driver.CreateSnapshotMountpoint(targetContainerMntPoint,
-		snapshotMntPointSymlinkTarget, snapshotMntPointSymlink)
-	if err != nil {
-		return err
-	}
-
-	revert = false
-
-	logger.Debugf("Created empty DIR storage volume for snapshot \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-	return nil
-}
-
-func dirSnapshotDeleteInternal(projectName, poolName string, snapshotName string) error {
-	snapshotContainerMntPoint := driver.GetSnapshotMountPoint(projectName, poolName, snapshotName)
-	if shared.PathExists(snapshotContainerMntPoint) {
-		err := os.RemoveAll(snapshotContainerMntPoint)
-		if err != nil {
-			return err
-		}
-	}
-
-	sourceContainerName, _, _ := shared.InstanceGetParentAndSnapshotName(snapshotName)
-	snapshotContainerPath := driver.GetSnapshotMountPoint(projectName, poolName, sourceContainerName)
-	empty, _ := shared.PathIsEmpty(snapshotContainerPath)
-	if empty == true {
-		err := os.Remove(snapshotContainerPath)
-		if err != nil {
-			return err
-		}
-
-		snapshotSymlink := shared.VarPath("snapshots", project.Prefix(projectName, sourceContainerName))
-		if shared.PathExists(snapshotSymlink) {
-			err := os.Remove(snapshotSymlink)
-			if err != nil {
-				return err
-			}
-		}
-	}
-
-	return nil
-}
-
-func (s *storageDir) ContainerSnapshotDelete(snapshotContainer instance.Instance) error {
-	logger.Debugf("Deleting DIR storage volume for snapshot \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-
-	_, err := s.StoragePoolMount()
-	if err != nil {
-		return err
-	}
-
-	source := s.pool.Config["source"]
-	if source == "" {
-		return fmt.Errorf("no \"source\" property found for the storage pool")
-	}
-
-	snapshotContainerName := snapshotContainer.Name()
-	err = dirSnapshotDeleteInternal(snapshotContainer.Project(), s.pool.Name, snapshotContainerName)
-	if err != nil {
-		return err
-	}
-
-	logger.Debugf("Deleted DIR storage volume for snapshot \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-	return nil
-}
-
-func (s *storageDir) ContainerSnapshotRename(snapshotContainer instance.Instance, newName string) error {
-	logger.Debugf("Renaming DIR storage volume for snapshot \"%s\" from %s to %s", s.volume.Name, s.volume.Name, newName)
-
-	_, err := s.StoragePoolMount()
-	if err != nil {
-		return err
-	}
-
-	// Rename the mountpoint for the snapshot:
-	// ${POOL}/snapshots/<old_snapshot_name> to ${POOL}/snapshots/<new_snapshot_name>
-	oldSnapshotMntPoint := driver.GetSnapshotMountPoint(snapshotContainer.Project(), s.pool.Name, snapshotContainer.Name())
-	newSnapshotMntPoint := driver.GetSnapshotMountPoint(snapshotContainer.Project(), s.pool.Name, newName)
-	err = os.Rename(oldSnapshotMntPoint, newSnapshotMntPoint)
-	if err != nil {
-		return err
-	}
-
-	logger.Debugf("Renamed DIR storage volume for snapshot \"%s\" from %s to %s", s.volume.Name, s.volume.Name, newName)
-	return nil
-}
-
-func (s *storageDir) ContainerSnapshotStart(container instance.Instance) (bool, error) {
-	return s.StoragePoolMount()
-}
-
-func (s *storageDir) ContainerSnapshotStop(container instance.Instance) (bool, error) {
-	return true, nil
-}
-
-func (s *storageDir) ContainerBackupCreate(path string, backup backup.Backup, source instance.Instance) error {
-	// Prepare for rsync
-	rsync := func(oldPath string, newPath string, bwlimit string) error {
-		output, err := rsync.LocalCopy(oldPath, newPath, bwlimit, true)
-		if err != nil {
-			return fmt.Errorf("Failed to rsync: %s: %s", string(output), err)
-		}
-
-		return nil
-	}
-
-	bwlimit := s.pool.Config["rsync.bwlimit"]
-
-	// Handle snapshots
-	if !backup.InstanceOnly() {
-		snapshotsPath := fmt.Sprintf("%s/snapshots", path)
-
-		// Retrieve the snapshots
-		snapshots, err := source.Snapshots()
-		if err != nil {
-			return err
-		}
-
-		// Create the snapshot path
-		if len(snapshots) > 0 {
-			err = os.MkdirAll(snapshotsPath, 0711)
-			if err != nil {
-				return err
-			}
-		}
-
-		for _, snap := range snapshots {
-			_, snapName, _ := shared.InstanceGetParentAndSnapshotName(snap.Name())
-			snapshotMntPoint := driver.GetSnapshotMountPoint(snap.Project(), s.pool.Name, snap.Name())
-			target := fmt.Sprintf("%s/%s", snapshotsPath, snapName)
-
-			// Copy the snapshot
-			err = rsync(snapshotMntPoint, target, bwlimit)
-			if err != nil {
-				return err
-			}
-		}
-	}
-
-	if source.IsRunning() {
-		// This is done to ensure consistency when snapshotting. But we
-		// probably shouldn't fail just because of that.
-		logger.Debugf("Freezing container '%s' for backup", source.Name())
-
-		err := source.Freeze()
-		if err != nil {
-			logger.Errorf("Failed to freeze container '%s' for backup: %v", source.Name(), err)
-		}
-		defer source.Unfreeze()
-	}
-
-	// Copy the container
-	containerPath := fmt.Sprintf("%s/container", path)
-
-	return rsync(source.Path(), containerPath, bwlimit)
-}
-
-func (s *storageDir) ContainerBackupLoad(info backup.Info, data io.ReadSeeker, tarArgs []string) error {
-	_, err := s.StoragePoolMount()
-	if err != nil {
-		return err
-	}
-
-	source := s.pool.Config["source"]
-	if source == "" {
-		return fmt.Errorf("no \"source\" property found for the storage pool")
-	}
-
-	// Create mountpoints
-	containerMntPoint := driver.GetContainerMountPoint(info.Project, s.pool.Name, info.Name)
-	err = driver.CreateContainerMountpoint(containerMntPoint, driver.InstancePath(instancetype.Container, info.Project, info.Name, false), info.Privileged)
-	if err != nil {
-		return errors.Wrap(err, "Create container mount point")
-	}
-
-	// Prepare tar arguments
-	args := append(tarArgs, []string{
-		"-",
-		"--strip-components=2",
-		"--xattrs-include=*",
-		"-C", containerMntPoint, "backup/container",
-	}...)
-
-	// Extract container
-	data.Seek(0, 0)
-	err = shared.RunCommandWithFds(data, nil, "tar", args...)
-	if err != nil {
-		return err
-	}
-
-	if len(info.Snapshots) > 0 {
-		// Create mountpoints
-		snapshotMntPoint := driver.GetSnapshotMountPoint(info.Project, s.pool.Name, info.Name)
-		snapshotMntPointSymlinkTarget := shared.VarPath("storage-pools", s.pool.Name,
-			"containers-snapshots", project.Prefix(info.Project, info.Name))
-		snapshotMntPointSymlink := shared.VarPath("snapshots", project.Prefix(info.Project, info.Name))
-		err := driver.CreateSnapshotMountpoint(snapshotMntPoint, snapshotMntPointSymlinkTarget,
-			snapshotMntPointSymlink)
-		if err != nil {
-			return err
-		}
-
-		// Prepare tar arguments
-		args := append(tarArgs, []string{
-			"-",
-			"--strip-components=2",
-			"--xattrs-include=*",
-			"-C", snapshotMntPoint, "backup/snapshots",
-		}...)
-
-		// Extract snapshots
-		data.Seek(0, 0)
-		err = shared.RunCommandWithFds(data, nil, "tar", args...)
-		if err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (s *storageDir) ImageCreate(fingerprint string, tracker *ioprogress.ProgressTracker) error {
-	return nil
-}
-
-func (s *storageDir) ImageDelete(fingerprint string) error {
-	err := s.deleteImageDbPoolVolume(fingerprint)
-	if err != nil {
-		return err
-	}
-
-	return nil
-}
-
-func (s *storageDir) ImageMount(fingerprint string) (bool, error) {
-	return true, nil
-}
-
-func (s *storageDir) ImageUmount(fingerprint string) (bool, error) {
-	return true, nil
-}
-
-func (s *storageDir) MigrationType() migration.MigrationFSType {
-	return migration.MigrationFSType_RSYNC
-}
-
-func (s *storageDir) PreservesInodes() bool {
-	return false
-}
-
-func (s *storageDir) MigrationSource(args MigrationSourceArgs) (MigrationStorageSourceDriver, error) {
-	return rsyncMigrationSource(args)
-}
-
-func (s *storageDir) MigrationSink(conn *websocket.Conn, op *operations.Operation, args MigrationSinkArgs) error {
-	return rsyncMigrationSink(conn, op, args)
-}
-
-func (s *storageDir) StorageEntitySetQuota(volumeType int, size int64, data interface{}) error {
-	var path string
-	switch volumeType {
-	case storagePoolVolumeTypeContainer:
-		c := data.(instance.Instance)
-		path = driver.GetContainerMountPoint(c.Project(), s.pool.Name, c.Name())
-	case storagePoolVolumeTypeCustom:
-		path = driver.GetStoragePoolVolumeMountPoint(s.pool.Name, s.volume.Name)
-	}
-
-	ok, err := quota.Supported(path)
-	if err != nil || !ok {
-		logger.Warnf("Skipping setting disk quota for '%s' as the underlying filesystem doesn't support them", s.volume.Name)
-		return nil
-	}
-
-	projectID := uint32(s.volumeID + 10000)
-	err = quota.SetProjectQuota(path, projectID, size)
-	if err != nil {
-		return err
-	}
-
-	return nil
-}
-
-func (s *storageDir) initQuota(path string, id int64) error {
-	if s.volumeID == 0 {
-		return fmt.Errorf("Missing volume ID")
-	}
-
-	ok, err := quota.Supported(path)
-	if err != nil || !ok {
-		return nil
-	}
-
-	projectID := uint32(s.volumeID + 10000)
-	err = quota.SetProject(path, projectID)
-	if err != nil {
-		return err
-	}
-
-	return nil
-}
-
-func (s *storageDir) deleteQuota(path string, id int64) error {
-	if s.volumeID == 0 {
-		return fmt.Errorf("Missing volume ID")
-	}
-
-	ok, err := quota.Supported(path)
-	if err != nil || !ok {
-		return nil
-	}
-
-	err = quota.SetProject(path, 0)
-	if err != nil {
-		return err
-	}
-
-	projectID := uint32(s.volumeID + 10000)
-	err = quota.SetProjectQuota(path, projectID, 0)
-	if err != nil {
-		return err
-	}
-
-	return nil
-}
-
-func (s *storageDir) StoragePoolResources() (*api.ResourcesStoragePool, error) {
-	_, err := s.StoragePoolMount()
-	if err != nil {
-		return nil, err
-	}
-
-	poolMntPoint := driver.GetStoragePoolMountPoint(s.pool.Name)
-
-	return driver.GetStorageResource(poolMntPoint)
-}
-
-func (s *storageDir) StoragePoolVolumeCopy(source *api.StorageVolumeSource) error {
-	logger.Infof("Copying DIR storage volume \"%s\" on storage pool \"%s\" as \"%s\" to storage pool \"%s\"", source.Name, source.Pool, s.volume.Name, s.pool.Name)
-	successMsg := fmt.Sprintf("Copied DIR storage volume \"%s\" on storage pool \"%s\" as \"%s\" to storage pool \"%s\"", source.Name, source.Pool, s.volume.Name, s.pool.Name)
-
-	if s.pool.Name != source.Pool {
-		// setup storage for the source volume
-		srcStorage, err := storagePoolVolumeInit(s.s, "default", source.Pool, source.Name, storagePoolVolumeTypeCustom)
-		if err != nil {
-			logger.Errorf("Failed to initialize DIR storage volume \"%s\" on storage pool \"%s\": %s", s.volume.Name, s.pool.Name, err)
-			return err
-		}
-
-		ourMount, err := srcStorage.StoragePoolMount()
-		if err != nil {
-			logger.Errorf("Failed to mount DIR storage volume \"%s\" on storage pool \"%s\": %s", s.volume.Name, s.pool.Name, err)
-			return err
-		}
-		if ourMount {
-			defer srcStorage.StoragePoolUmount()
-		}
-	}
-
-	err := s.copyVolume(source.Pool, source.Name, s.volume.Name)
-	if err != nil {
-		return err
-	}
-
-	if source.VolumeOnly {
-		logger.Infof(successMsg)
-		return nil
-	}
-
-	snapshots, err := driver.VolumeSnapshotsGet(s.s, source.Pool, source.Name, storagePoolVolumeTypeCustom)
-	if err != nil {
-		return err
-	}
-
-	for _, snap := range snapshots {
-		_, snapOnlyName, _ := shared.InstanceGetParentAndSnapshotName(snap.Name)
-		err = s.copyVolumeSnapshot(source.Pool, snap.Name, fmt.Sprintf("%s/%s", s.volume.Name, snapOnlyName))
-		if err != nil {
-			return err
-		}
-	}
-
-	logger.Infof(successMsg)
-	return nil
-}
-
-func (s *storageDir) StorageMigrationSource(args MigrationSourceArgs) (MigrationStorageSourceDriver, error) {
-	return rsyncStorageMigrationSource(args)
-}
-
-func (s *storageDir) StorageMigrationSink(conn *websocket.Conn, op *operations.Operation, args MigrationSinkArgs) error {
-	return rsyncStorageMigrationSink(conn, op, args)
-}
-
-func (s *storageDir) StoragePoolVolumeSnapshotCreate(target *api.StorageVolumeSnapshotsPost) error {
-	logger.Infof("Creating DIR storage volume snapshot \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-
-	_, err := s.StoragePoolMount()
-	if err != nil {
-		return err
-	}
-
-	source := s.pool.Config["source"]
-	if source == "" {
-		return fmt.Errorf("no \"source\" property found for the storage pool")
-	}
-
-	sourceName, _, ok := shared.InstanceGetParentAndSnapshotName(target.Name)
-	if !ok {
-		return fmt.Errorf("Not a snapshot name")
-	}
-
-	targetPath := driver.GetStoragePoolVolumeSnapshotMountPoint(s.pool.Name, target.Name)
-	err = os.MkdirAll(targetPath, 0711)
-	if err != nil {
-		return err
-	}
-
-	sourcePath := driver.GetStoragePoolVolumeMountPoint(s.pool.Name, sourceName)
-	bwlimit := s.pool.Config["rsync.bwlimit"]
-	msg, err := rsync.LocalCopy(sourcePath, targetPath, bwlimit, true)
-	if err != nil {
-		return fmt.Errorf("Failed to rsync: %s: %s", string(msg), err)
-	}
-
-	logger.Infof("Created DIR storage volume snapshot \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-	return nil
-}
-
-func (s *storageDir) StoragePoolVolumeSnapshotDelete() error {
-	logger.Infof("Deleting DIR storage volume snapshot \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-
-	source := s.pool.Config["source"]
-	if source == "" {
-		return fmt.Errorf("no \"source\" property found for the storage pool")
-	}
-
-	storageVolumePath := driver.GetStoragePoolVolumeSnapshotMountPoint(s.pool.Name, s.volume.Name)
-	err := os.RemoveAll(storageVolumePath)
-	if err != nil && !os.IsNotExist(err) {
-		return err
-	}
-
-	sourceName, _, _ := shared.InstanceGetParentAndSnapshotName(s.volume.Name)
-	storageVolumeSnapshotPath := driver.GetStoragePoolVolumeSnapshotMountPoint(s.pool.Name, sourceName)
-	empty, err := shared.PathIsEmpty(storageVolumeSnapshotPath)
-	if err == nil && empty {
-		os.RemoveAll(storageVolumeSnapshotPath)
-	}
-
-	err = s.s.Cluster.StoragePoolVolumeDelete(
-		"default",
-		s.volume.Name,
-		storagePoolVolumeTypeCustom,
-		s.poolID)
-	if err != nil {
-		logger.Errorf(`Failed to delete database entry for DIR storage volume "%s" on storage pool "%s"`,
-			s.volume.Name, s.pool.Name)
-	}
-
-	logger.Infof("Deleted DIR storage volume snapshot \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-	return nil
-}
-
-func (s *storageDir) StoragePoolVolumeSnapshotRename(newName string) error {
-	sourceName, _, ok := shared.InstanceGetParentAndSnapshotName(s.volume.Name)
-	fullSnapshotName := fmt.Sprintf("%s%s%s", sourceName, shared.SnapshotDelimiter, newName)
-
-	logger.Infof("Renaming DIR storage volume on storage pool \"%s\" from \"%s\" to \"%s\"", s.pool.Name, s.volume.Name, fullSnapshotName)
-
-	if !ok {
-		return fmt.Errorf("Not a snapshot name")
-	}
-
-	oldPath := driver.GetStoragePoolVolumeSnapshotMountPoint(s.pool.Name, s.volume.Name)
-	newPath := driver.GetStoragePoolVolumeSnapshotMountPoint(s.pool.Name, fullSnapshotName)
-
-	err := os.Rename(oldPath, newPath)
-	if err != nil {
-		return err
-	}
-
-	logger.Infof("Renamed DIR storage volume on storage pool \"%s\" from \"%s\" to \"%s\"", s.pool.Name, s.volume.Name, fullSnapshotName)
-
-	return s.s.Cluster.StoragePoolVolumeRename("default", s.volume.Name, fullSnapshotName, storagePoolVolumeTypeCustom, s.poolID)
-}
-
-func (s *storageDir) copyVolume(sourcePool string, source string, target string) error {
-	var srcMountPoint string
-
-	if shared.IsSnapshot(source) {
-		srcMountPoint = driver.GetStoragePoolVolumeSnapshotMountPoint(sourcePool, source)
-	} else {
-		srcMountPoint = driver.GetStoragePoolVolumeMountPoint(sourcePool, source)
-	}
-
-	dstMountPoint := driver.GetStoragePoolVolumeMountPoint(s.pool.Name, target)
-
-	err := os.MkdirAll(dstMountPoint, 0711)
-	if err != nil {
-		return err
-	}
-
-	err = s.initQuota(dstMountPoint, s.volumeID)
-	if err != nil {
-		return err
-	}
-
-	bwlimit := s.pool.Config["rsync.bwlimit"]
-
-	_, err = rsync.LocalCopy(srcMountPoint, dstMountPoint, bwlimit, true)
-	if err != nil {
-		os.RemoveAll(dstMountPoint)
-		logger.Errorf("Failed to rsync into DIR storage volume \"%s\" on storage pool \"%s\": %s", s.volume.Name, s.pool.Name, err)
-		return err
-	}
-
-	return nil
-}
-
-func (s *storageDir) copyVolumeSnapshot(sourcePool string, source string, target string) error {
-	srcMountPoint := driver.GetStoragePoolVolumeSnapshotMountPoint(sourcePool, source)
-	dstMountPoint := driver.GetStoragePoolVolumeSnapshotMountPoint(s.pool.Name, target)
-
-	err := os.MkdirAll(dstMountPoint, 0711)
-	if err != nil {
-		return err
-	}
-
-	bwlimit := s.pool.Config["rsync.bwlimit"]
-
-	_, err = rsync.LocalCopy(srcMountPoint, dstMountPoint, bwlimit, true)
-	if err != nil {
-		os.RemoveAll(dstMountPoint)
-		logger.Errorf("Failed to rsync into DIR storage volume \"%s\" on storage pool \"%s\": %s", target, s.pool.Name, err)
-		return err
-	}
-
-	return nil
-}
diff --git a/lxd/storage_migration.go b/lxd/storage_migration.go
index 8d46d47e1f..aa9fc2c190 100644
--- a/lxd/storage_migration.go
+++ b/lxd/storage_migration.go
@@ -316,136 +316,67 @@ func rsyncMigrationSink(conn *websocket.Conn, op *operations.Operation, args Mig
 		return fmt.Errorf("Instance type must be container")
 	}
 
-	ct := args.Instance.(*containerLXC)
-
-	isDirBackend := ct.Storage().GetStorageType() == storageTypeDir
-	if isDirBackend {
-		if !args.InstanceOnly {
-			for _, snap := range args.Snapshots {
-				isSnapshotOutdated := true
-
-				for _, localSnap := range localSnapshots {
-					if localSnap.Name() == snap.GetName() {
-						if localSnap.CreationDate().Unix() > snap.GetCreationDate() {
-							isSnapshotOutdated = false
-							break
-						}
-					}
-				}
-
-				// Only copy snapshot if it's outdated
-				if !isSnapshotOutdated {
-					continue
-				}
-
-				snapArgs := snapshotProtobufToInstanceArgs(args.Instance.Project(), args.Instance.Name(), snap)
-
-				// Ensure that snapshot and parent container have the
-				// same storage pool in their local root disk device.
-				// If the root disk device for the snapshot comes from a
-				// profile on the new instance as well we don't need to
-				// do anything.
-				if snapArgs.Devices != nil {
-					snapLocalRootDiskDeviceKey, _, _ := shared.GetRootDiskDevice(snapArgs.Devices.CloneNative())
-					if snapLocalRootDiskDeviceKey != "" {
-						snapArgs.Devices[snapLocalRootDiskDeviceKey]["pool"] = parentStoragePool
-					}
-				}
-
-				// Try and a load instance
-				s, err := instance.LoadByProjectAndName(args.Instance.DaemonState(),
-					args.Instance.Project(), snapArgs.Name)
-				if err != nil {
-					// Create the snapshot since it doesn't seem to exist
-					s, err = containerCreateEmptySnapshot(args.Instance.DaemonState(), snapArgs)
-					if err != nil {
-						return err
-					}
-				}
-
-				wrapper := migration.ProgressTracker(op, "fs_progress", s.Name())
-				if err := rsync.Recv(shared.AddSlash(s.Path()), &shared.WebsocketIO{Conn: conn}, wrapper, args.RsyncFeatures); err != nil {
-					return err
-				}
+	if !args.InstanceOnly {
+		for _, snap := range args.Snapshots {
+			isSnapshotOutdated := true
 
-				if args.Instance.Type() == instancetype.Container {
-					c := args.Instance.(*containerLXC)
-					err = resetContainerDiskIdmap(c, args.Idmap)
-					if err != nil {
-						return err
+			for _, localSnap := range localSnapshots {
+				if localSnap.Name() == snap.GetName() {
+					if localSnap.CreationDate().Unix() > snap.GetCreationDate() {
+						isSnapshotOutdated = false
+						break
 					}
 				}
 			}
-		}
 
-		wrapper := migration.ProgressTracker(op, "fs_progress", args.Instance.Name())
-		err = rsync.Recv(shared.AddSlash(args.Instance.Path()), &shared.WebsocketIO{Conn: conn}, wrapper, args.RsyncFeatures)
-		if err != nil {
-			return err
-		}
-	} else {
-		if !args.InstanceOnly {
-			for _, snap := range args.Snapshots {
-				isSnapshotOutdated := true
-
-				for _, localSnap := range localSnapshots {
-					if localSnap.Name() == snap.GetName() {
-						if localSnap.CreationDate().Unix() > snap.GetCreationDate() {
-							isSnapshotOutdated = false
-							break
-						}
-					}
-				}
+			// Only copy snapshot if it's outdated
+			if !isSnapshotOutdated {
+				continue
+			}
 
-				// Only copy snapshot if it's outdated
-				if !isSnapshotOutdated {
-					continue
+			snapArgs := snapshotProtobufToInstanceArgs(args.Instance.Project(), args.Instance.Name(), snap)
+
+			// Ensure that snapshot and parent container have the
+			// same storage pool in their local root disk device.
+			// If the root disk device for the snapshot comes from a
+			// profile on the new instance as well we don't need to
+			// do anything.
+			if snapArgs.Devices != nil {
+				snapLocalRootDiskDeviceKey, _, _ := shared.GetRootDiskDevice(snapArgs.Devices.CloneNative())
+				if snapLocalRootDiskDeviceKey != "" {
+					snapArgs.Devices[snapLocalRootDiskDeviceKey]["pool"] = parentStoragePool
 				}
+			}
 
-				snapArgs := snapshotProtobufToInstanceArgs(args.Instance.Project(), args.Instance.Name(), snap)
-
-				// Ensure that snapshot and parent container have the
-				// same storage pool in their local root disk device.
-				// If the root disk device for the snapshot comes from a
-				// profile on the new instance as well we don't need to
-				// do anything.
-				if snapArgs.Devices != nil {
-					snapLocalRootDiskDeviceKey, _, _ := shared.GetRootDiskDevice(snapArgs.Devices.CloneNative())
-					if snapLocalRootDiskDeviceKey != "" {
-						snapArgs.Devices[snapLocalRootDiskDeviceKey]["pool"] = parentStoragePool
-					}
-				}
+			wrapper := migration.ProgressTracker(op, "fs_progress", snap.GetName())
+			err := rsync.Recv(shared.AddSlash(args.Instance.Path()), &shared.WebsocketIO{Conn: conn}, wrapper, args.RsyncFeatures)
+			if err != nil {
+				return err
+			}
 
-				wrapper := migration.ProgressTracker(op, "fs_progress", snap.GetName())
-				err := rsync.Recv(shared.AddSlash(args.Instance.Path()), &shared.WebsocketIO{Conn: conn}, wrapper, args.RsyncFeatures)
+			if args.Instance.Type() == instancetype.Container {
+				c := args.Instance.(*containerLXC)
+				err = resetContainerDiskIdmap(c, args.Idmap)
 				if err != nil {
 					return err
 				}
+			}
 
-				if args.Instance.Type() == instancetype.Container {
-					c := args.Instance.(*containerLXC)
-					err = resetContainerDiskIdmap(c, args.Idmap)
-					if err != nil {
-						return err
-					}
-				}
-
-				_, err = instance.LoadByProjectAndName(args.Instance.DaemonState(),
-					args.Instance.Project(), snapArgs.Name)
+			_, err = instance.LoadByProjectAndName(args.Instance.DaemonState(),
+				args.Instance.Project(), snapArgs.Name)
+			if err != nil {
+				_, err = instanceCreateAsSnapshot(args.Instance.DaemonState(), snapArgs, args.Instance, op)
 				if err != nil {
-					_, err = instanceCreateAsSnapshot(args.Instance.DaemonState(), snapArgs, args.Instance, op)
-					if err != nil {
-						return err
-					}
+					return err
 				}
 			}
 		}
+	}
 
-		wrapper := migration.ProgressTracker(op, "fs_progress", args.Instance.Name())
-		err = rsync.Recv(shared.AddSlash(args.Instance.Path()), &shared.WebsocketIO{Conn: conn}, wrapper, args.RsyncFeatures)
-		if err != nil {
-			return err
-		}
+	wrapper := migration.ProgressTracker(op, "fs_progress", args.Instance.Name())
+	err = rsync.Recv(shared.AddSlash(args.Instance.Path()), &shared.WebsocketIO{Conn: conn}, wrapper, args.RsyncFeatures)
+	if err != nil {
+		return err
 	}
 
 	if args.Live {

From 19134c978cc3668f0a7be12f778657ebaabc9d83 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?St=C3=A9phane=20Graber?= <stgraber at ubuntu.com>
Date: Thu, 12 Dec 2019 09:36:15 -0500
Subject: [PATCH 02/36] lxd/storage: Remove legacy btrfs implementation
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Signed-off-by: Stéphane Graber <stgraber at ubuntu.com>
---
 lxd/api_internal.go            |    2 +-
 lxd/patches_utils.go           |  245 +++
 lxd/storage.go                 |   29 +-
 lxd/storage_btrfs.go           | 3087 --------------------------------
 lxd/storage_migration_btrfs.go |  195 --
 5 files changed, 247 insertions(+), 3311 deletions(-)
 delete mode 100644 lxd/storage_btrfs.go
 delete mode 100644 lxd/storage_migration_btrfs.go

diff --git a/lxd/api_internal.go b/lxd/api_internal.go
index 6310fbf307..8e6cab8f35 100644
--- a/lxd/api_internal.go
+++ b/lxd/api_internal.go
@@ -712,7 +712,7 @@ func internalImport(d *Daemon, r *http.Request) response.Response {
 		switch backup.Pool.Driver {
 		case "btrfs":
 			snpMntPt := storagePools.GetSnapshotMountPoint(projectName, backup.Pool.Name, snap.Name)
-			if !shared.PathExists(snpMntPt) || !isBtrfsSubVolume(snpMntPt) {
+			if !shared.PathExists(snpMntPt) || !btrfsIsSubVolume(snpMntPt) {
 				if req.Force {
 					continue
 				}
diff --git a/lxd/patches_utils.go b/lxd/patches_utils.go
index 01429d1a59..217d684973 100644
--- a/lxd/patches_utils.go
+++ b/lxd/patches_utils.go
@@ -1,13 +1,23 @@
 package main
 
 import (
+	"fmt"
 	"os"
+	"path"
+	"path/filepath"
+	"sort"
+	"strings"
+
+	"golang.org/x/sys/unix"
 
 	"github.com/lxc/lxd/lxd/project"
+	"github.com/lxc/lxd/lxd/state"
 	driver "github.com/lxc/lxd/lxd/storage"
 	"github.com/lxc/lxd/shared"
+	"github.com/lxc/lxd/shared/logger"
 )
 
+// For 'dir' storage backend.
 func dirSnapshotDeleteInternal(projectName, poolName string, snapshotName string) error {
 	snapshotContainerMntPoint := driver.GetSnapshotMountPoint(projectName, poolName, snapshotName)
 	if shared.PathExists(snapshotContainerMntPoint) {
@@ -37,3 +47,238 @@ func dirSnapshotDeleteInternal(projectName, poolName string, snapshotName string
 
 	return nil
 }
+
+// For 'btrfs' storage backend.
+func btrfsSubVolumeCreate(subvol string) error {
+	parentDestPath := filepath.Dir(subvol)
+	if !shared.PathExists(parentDestPath) {
+		err := os.MkdirAll(parentDestPath, 0711)
+		if err != nil {
+			return err
+		}
+	}
+
+	_, err := shared.RunCommand(
+		"btrfs",
+		"subvolume",
+		"create",
+		subvol)
+	if err != nil {
+		logger.Errorf("Failed to create BTRFS subvolume \"%s\": %v", subvol, err)
+		return err
+	}
+
+	return nil
+}
+
+func btrfsSnapshotDeleteInternal(projectName, poolName string, snapshotName string) error {
+	snapshotSubvolumeName := driver.GetSnapshotMountPoint(projectName, poolName, snapshotName)
+	// Also delete any leftover .ro snapshot.
+	roSnapshotSubvolumeName := fmt.Sprintf("%s.ro", snapshotSubvolumeName)
+	names := []string{snapshotSubvolumeName, roSnapshotSubvolumeName}
+	for _, name := range names {
+		if shared.PathExists(name) && btrfsIsSubVolume(name) {
+			err := btrfsSubVolumesDelete(name)
+			if err != nil {
+				return err
+			}
+		}
+	}
+
+	sourceSnapshotMntPoint := shared.VarPath("snapshots", project.Prefix(projectName, snapshotName))
+	os.Remove(sourceSnapshotMntPoint)
+	os.Remove(snapshotSubvolumeName)
+
+	sourceName, _, _ := shared.InstanceGetParentAndSnapshotName(snapshotName)
+	snapshotSubvolumePath := driver.GetSnapshotMountPoint(projectName, poolName, sourceName)
+	os.Remove(snapshotSubvolumePath)
+	if !shared.PathExists(snapshotSubvolumePath) {
+		snapshotMntPointSymlink := shared.VarPath("snapshots", project.Prefix(projectName, sourceName))
+		os.Remove(snapshotMntPointSymlink)
+	}
+
+	return nil
+}
+
+func btrfsSubVolumeQGroup(subvol string) (string, error) {
+	output, err := shared.RunCommand(
+		"btrfs",
+		"qgroup",
+		"show",
+		"-e",
+		"-f",
+		subvol)
+
+	if err != nil {
+		return "", fmt.Errorf("Quotas disabled on filesystem")
+	}
+
+	var qgroup string
+	for _, line := range strings.Split(output, "\n") {
+		if line == "" || strings.HasPrefix(line, "qgroupid") || strings.HasPrefix(line, "---") {
+			continue
+		}
+
+		fields := strings.Fields(line)
+		if len(fields) != 4 {
+			continue
+		}
+
+		qgroup = fields[0]
+	}
+
+	if qgroup == "" {
+		return "", fmt.Errorf("Unable to find quota group")
+	}
+
+	return qgroup, nil
+}
+
+func btrfsSubVolumeDelete(subvol string) error {
+	// Attempt (but don't fail on) to delete any qgroup on the subvolume
+	qgroup, err := btrfsSubVolumeQGroup(subvol)
+	if err == nil {
+		shared.RunCommand(
+			"btrfs",
+			"qgroup",
+			"destroy",
+			qgroup,
+			subvol)
+	}
+
+	// Attempt to make the subvolume writable
+	shared.RunCommand("btrfs", "property", "set", subvol, "ro", "false")
+
+	// Delete the subvolume itself
+	_, err = shared.RunCommand(
+		"btrfs",
+		"subvolume",
+		"delete",
+		subvol)
+
+	return err
+}
+
+func btrfsSubVolumesDelete(subvol string) error {
+	// Delete subsubvols.
+	subsubvols, err := btrfsSubVolumesGet(subvol)
+	if err != nil {
+		return err
+	}
+	sort.Sort(sort.Reverse(sort.StringSlice(subsubvols)))
+
+	for _, subsubvol := range subsubvols {
+		err := btrfsSubVolumeDelete(path.Join(subvol, subsubvol))
+		if err != nil {
+			return err
+		}
+	}
+
+	// Delete the subvol itself
+	err = btrfsSubVolumeDelete(subvol)
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func btrfsSnapshot(s *state.State, source string, dest string, readonly bool) error {
+	var output string
+	var err error
+	if readonly && !s.OS.RunningInUserNS {
+		output, err = shared.RunCommand(
+			"btrfs",
+			"subvolume",
+			"snapshot",
+			"-r",
+			source,
+			dest)
+	} else {
+		output, err = shared.RunCommand(
+			"btrfs",
+			"subvolume",
+			"snapshot",
+			source,
+			dest)
+	}
+	if err != nil {
+		return fmt.Errorf(
+			"subvolume snapshot failed, source=%s, dest=%s, output=%s",
+			source,
+			dest,
+			output,
+		)
+	}
+
+	return err
+}
+
+func btrfsIsSubVolume(subvolPath string) bool {
+	fs := unix.Stat_t{}
+	err := unix.Lstat(subvolPath, &fs)
+	if err != nil {
+		return false
+	}
+
+	// Check if BTRFS_FIRST_FREE_OBJECTID
+	if fs.Ino != 256 {
+		return false
+	}
+
+	return true
+}
+
+func btrfsSubVolumeIsRo(path string) bool {
+	output, err := shared.RunCommand("btrfs", "property", "get", "-ts", path)
+	if err != nil {
+		return false
+	}
+
+	return strings.HasPrefix(string(output), "ro=true")
+}
+
+func btrfsSubVolumeMakeRo(path string) error {
+	_, err := shared.RunCommand("btrfs", "property", "set", "-ts", path, "ro", "true")
+	return err
+}
+
+func btrfsSubVolumeMakeRw(path string) error {
+	_, err := shared.RunCommand("btrfs", "property", "set", "-ts", path, "ro", "false")
+	return err
+}
+
+func btrfsSubVolumesGet(path string) ([]string, error) {
+	result := []string{}
+
+	if !strings.HasSuffix(path, "/") {
+		path = path + "/"
+	}
+
+	// Unprivileged users can't get to fs internals
+	filepath.Walk(path, func(fpath string, fi os.FileInfo, err error) error {
+		// Skip walk errors
+		if err != nil {
+			return nil
+		}
+
+		// Ignore the base path
+		if strings.TrimRight(fpath, "/") == strings.TrimRight(path, "/") {
+			return nil
+		}
+
+		// Subvolumes can only be directories
+		if !fi.IsDir() {
+			return nil
+		}
+
+		// Check if a btrfs subvolume
+		if btrfsIsSubVolume(fpath) {
+			result = append(result, strings.TrimPrefix(fpath, path))
+		}
+
+		return nil
+	})
+
+	return result, nil
+}
diff --git a/lxd/storage.go b/lxd/storage.go
index d78220e4cd..b58034a345 100644
--- a/lxd/storage.go
+++ b/lxd/storage.go
@@ -58,10 +58,6 @@ func getPoolMountLockID(poolName string) string {
 	return fmt.Sprintf("mount/pool/%s", poolName)
 }
 
-func getPoolUmountLockID(poolName string) string {
-	return fmt.Sprintf("umount/pool/%s", poolName)
-}
-
 func getImageCreateLockID(poolName string, fingerprint string) string {
 	return fmt.Sprintf("create/image/%s/%s", poolName, fingerprint)
 }
@@ -100,8 +96,7 @@ func readStoragePoolDriversCache() map[string]string {
 type storageType int
 
 const (
-	storageTypeBtrfs storageType = iota
-	storageTypeCeph
+	storageTypeCeph storageType = iota
 	storageTypeLvm
 	storageTypeMock
 	storageTypeZfs
@@ -111,8 +106,6 @@ var supportedStoragePoolDrivers = []string{"btrfs", "ceph", "cephfs", "dir", "lv
 
 func storageTypeToString(sType storageType) (string, error) {
 	switch sType {
-	case storageTypeBtrfs:
-		return "btrfs", nil
 	case storageTypeCeph:
 		return "ceph", nil
 	case storageTypeLvm:
@@ -128,8 +121,6 @@ func storageTypeToString(sType storageType) (string, error) {
 
 func storageStringToType(sName string) (storageType, error) {
 	switch sName {
-	case "btrfs":
-		return storageTypeBtrfs, nil
 	case "ceph":
 		return storageTypeCeph, nil
 	case "lvm":
@@ -256,13 +247,6 @@ func storageCoreInit(driver string) (storage, error) {
 	}
 
 	switch sType {
-	case storageTypeBtrfs:
-		btrfs := storageBtrfs{}
-		err = btrfs.StorageCoreInit()
-		if err != nil {
-			return nil, err
-		}
-		return &btrfs, nil
 	case storageTypeCeph:
 		ceph := storageCeph{}
 		err = ceph.StorageCoreInit()
@@ -325,17 +309,6 @@ func storageInit(s *state.State, project, poolName, volumeName string, volumeTyp
 	}
 
 	switch sType {
-	case storageTypeBtrfs:
-		btrfs := storageBtrfs{}
-		btrfs.poolID = poolID
-		btrfs.pool = pool
-		btrfs.volume = volume
-		btrfs.s = s
-		err = btrfs.StoragePoolInit()
-		if err != nil {
-			return nil, err
-		}
-		return &btrfs, nil
 	case storageTypeCeph:
 		ceph := storageCeph{}
 		ceph.poolID = poolID
diff --git a/lxd/storage_btrfs.go b/lxd/storage_btrfs.go
deleted file mode 100644
index 86f9c09b3d..0000000000
--- a/lxd/storage_btrfs.go
+++ /dev/null
@@ -1,3087 +0,0 @@
-package main
-
-import (
-	"fmt"
-	"io"
-	"io/ioutil"
-	"os"
-	"os/exec"
-	"path"
-	"path/filepath"
-	"sort"
-	"strconv"
-	"strings"
-
-	"github.com/gorilla/websocket"
-	"github.com/pkg/errors"
-	"golang.org/x/sys/unix"
-
-	"github.com/lxc/lxd/lxd/backup"
-	"github.com/lxc/lxd/lxd/instance"
-	"github.com/lxc/lxd/lxd/instance/instancetype"
-	"github.com/lxc/lxd/lxd/migration"
-	"github.com/lxc/lxd/lxd/operations"
-	"github.com/lxc/lxd/lxd/project"
-	"github.com/lxc/lxd/lxd/rsync"
-	"github.com/lxc/lxd/lxd/state"
-	driver "github.com/lxc/lxd/lxd/storage"
-	"github.com/lxc/lxd/lxd/storage/drivers"
-	"github.com/lxc/lxd/lxd/util"
-	"github.com/lxc/lxd/shared"
-	"github.com/lxc/lxd/shared/api"
-	"github.com/lxc/lxd/shared/ioprogress"
-	"github.com/lxc/lxd/shared/logger"
-	"github.com/lxc/lxd/shared/units"
-)
-
-type storageBtrfs struct {
-	remount uintptr
-	storageShared
-}
-
-var btrfsVersion = ""
-
-func (s *storageBtrfs) getBtrfsMountOptions() string {
-	if s.pool.Config["btrfs.mount_options"] != "" {
-		return s.pool.Config["btrfs.mount_options"]
-	}
-
-	return "user_subvol_rm_allowed"
-}
-
-func (s *storageBtrfs) setBtrfsMountOptions(mountOptions string) {
-	s.pool.Config["btrfs.mount_options"] = mountOptions
-}
-
-// ${LXD_DIR}/storage-pools/<pool>/containers
-func (s *storageBtrfs) getContainerSubvolumePath(poolName string) string {
-	return shared.VarPath("storage-pools", poolName, "containers")
-}
-
-// ${LXD_DIR}/storage-pools/<pool>/containers-snapshots
-func getSnapshotSubvolumePath(projectName, poolName string, containerName string) string {
-	return shared.VarPath("storage-pools", poolName, "containers-snapshots", project.Prefix(projectName, containerName))
-}
-
-// ${LXD_DIR}/storage-pools/<pool>/images
-func (s *storageBtrfs) getImageSubvolumePath(poolName string) string {
-	return shared.VarPath("storage-pools", poolName, "images")
-}
-
-// ${LXD_DIR}/storage-pools/<pool>/custom
-func (s *storageBtrfs) getCustomSubvolumePath(poolName string) string {
-	return shared.VarPath("storage-pools", poolName, "custom")
-}
-
-// ${LXD_DIR}/storage-pools/<pool>/custom-snapshots
-func (s *storageBtrfs) getCustomSnapshotSubvolumePath(poolName string) string {
-	return shared.VarPath("storage-pools", poolName, "custom-snapshots")
-}
-
-func (s *storageBtrfs) StorageCoreInit() error {
-	s.sType = storageTypeBtrfs
-	typeName, err := storageTypeToString(s.sType)
-	if err != nil {
-		return err
-	}
-	s.sTypeName = typeName
-
-	if btrfsVersion != "" {
-		s.sTypeVersion = btrfsVersion
-		return nil
-	}
-
-	out, err := exec.LookPath("btrfs")
-	if err != nil || len(out) == 0 {
-		return fmt.Errorf("The 'btrfs' tool isn't available")
-	}
-
-	output, err := shared.RunCommand("btrfs", "version")
-	if err != nil {
-		return fmt.Errorf("The 'btrfs' tool isn't working properly")
-	}
-
-	count, err := fmt.Sscanf(strings.SplitN(output, " ", 2)[1], "v%s\n", &s.sTypeVersion)
-	if err != nil || count != 1 {
-		return fmt.Errorf("The 'btrfs' tool isn't working properly")
-	}
-
-	btrfsVersion = s.sTypeVersion
-
-	return nil
-}
-
-func (s *storageBtrfs) StoragePoolInit() error {
-	err := s.StorageCoreInit()
-	if err != nil {
-		return err
-	}
-
-	return nil
-}
-
-func (s *storageBtrfs) StoragePoolCheck() error {
-	// FIXEM(brauner): Think of something smart or useful (And then think
-	// again if it is worth implementing it. :)).
-	logger.Debugf("Checking BTRFS storage pool \"%s\"", s.pool.Name)
-	return nil
-}
-
-func (s *storageBtrfs) StoragePoolCreate() error {
-	logger.Infof("Creating BTRFS storage pool \"%s\"", s.pool.Name)
-	s.pool.Config["volatile.initial_source"] = s.pool.Config["source"]
-
-	isBlockDev := false
-
-	source := s.pool.Config["source"]
-	if strings.HasPrefix(source, "/") {
-		source = shared.HostPath(s.pool.Config["source"])
-	}
-
-	defaultSource := filepath.Join(shared.VarPath("disks"), fmt.Sprintf("%s.img", s.pool.Name))
-	if source == "" || source == defaultSource {
-		source = defaultSource
-		s.pool.Config["source"] = source
-
-		f, err := os.Create(source)
-		if err != nil {
-			return fmt.Errorf("Failed to open %s: %s", source, err)
-		}
-		defer f.Close()
-
-		err = f.Chmod(0600)
-		if err != nil {
-			return fmt.Errorf("Failed to chmod %s: %s", source, err)
-		}
-
-		size, err := units.ParseByteSizeString(s.pool.Config["size"])
-		if err != nil {
-			return err
-		}
-		err = f.Truncate(size)
-		if err != nil {
-			return fmt.Errorf("Failed to create sparse file %s: %s", source, err)
-		}
-
-		output, err := makeFSType(source, "btrfs", &mkfsOptions{Label: s.pool.Name})
-		if err != nil {
-			return fmt.Errorf("Failed to create the BTRFS pool: %v (%s)", err, output)
-		}
-	} else {
-		// Unset size property since it doesn't make sense.
-		s.pool.Config["size"] = ""
-
-		if filepath.IsAbs(source) {
-			isBlockDev = shared.IsBlockdevPath(source)
-			if isBlockDev {
-				output, err := makeFSType(source, "btrfs", &mkfsOptions{Label: s.pool.Name})
-				if err != nil {
-					return fmt.Errorf("Failed to create the BTRFS pool: %v (%s)", err, output)
-				}
-			} else {
-				if isBtrfsSubVolume(source) {
-					subvols, err := btrfsSubVolumesGet(source)
-					if err != nil {
-						return fmt.Errorf("Could not determine if existing BTRFS subvolume ist empty: %s", err)
-					}
-					if len(subvols) > 0 {
-						return fmt.Errorf("Requested BTRFS subvolume exists but is not empty")
-					}
-				} else {
-					cleanSource := filepath.Clean(source)
-					lxdDir := shared.VarPath()
-					poolMntPoint := driver.GetStoragePoolMountPoint(s.pool.Name)
-					if shared.PathExists(source) && !isOnBtrfs(source) {
-						return fmt.Errorf("Existing path is neither a BTRFS subvolume nor does it reside on a BTRFS filesystem")
-					} else if strings.HasPrefix(cleanSource, lxdDir) {
-						if cleanSource != poolMntPoint {
-							return fmt.Errorf("BTRFS subvolumes requests in LXD directory \"%s\" are only valid under \"%s\"\n(e.g. source=%s)", shared.VarPath(), shared.VarPath("storage-pools"), poolMntPoint)
-						} else if s.s.OS.BackingFS != "btrfs" {
-							return fmt.Errorf("Creation of BTRFS subvolume requested but \"%s\" does not reside on BTRFS filesystem", source)
-						}
-					}
-
-					err := btrfsSubVolumeCreate(source)
-					if err != nil {
-						return err
-					}
-				}
-			}
-		} else {
-			return fmt.Errorf("Invalid \"source\" property")
-		}
-	}
-
-	poolMntPoint := driver.GetStoragePoolMountPoint(s.pool.Name)
-	if !shared.PathExists(poolMntPoint) {
-		err := os.MkdirAll(poolMntPoint, driver.StoragePoolsDirMode)
-		if err != nil {
-			return err
-		}
-	}
-
-	var err1 error
-	var devUUID string
-	mountFlags, mountOptions := resolveMountOptions(s.getBtrfsMountOptions())
-	mountFlags |= s.remount
-	if isBlockDev && filepath.IsAbs(source) {
-		devUUID, _ = shared.LookupUUIDByBlockDevPath(source)
-		// The symlink might not have been created even with the delay
-		// we granted it above. So try to call btrfs filesystem show and
-		// parse it out. (I __hate__ this!)
-		if devUUID == "" {
-			logger.Warnf("Failed to detect UUID by looking at /dev/disk/by-uuid")
-			devUUID, err1 = s.btrfsLookupFsUUID(source)
-			if err1 != nil {
-				logger.Errorf("Failed to detect UUID by parsing filesystem info")
-				return err1
-			}
-		}
-		s.pool.Config["source"] = devUUID
-
-		// If the symlink in /dev/disk/by-uuid hasn't been created yet
-		// aka we only detected it by parsing btrfs filesystem show, we
-		// cannot call StoragePoolMount() since it will try to do the
-		// reverse operation. So instead we shamelessly mount using the
-		// block device path at the time of pool creation.
-		err1 = unix.Mount(source, poolMntPoint, "btrfs", mountFlags, mountOptions)
-	} else {
-		_, err1 = s.StoragePoolMount()
-	}
-	if err1 != nil {
-		return err1
-	}
-
-	// Create default subvolumes.
-	dummyDir := driver.GetContainerMountPoint("default", s.pool.Name, "")
-	err := btrfsSubVolumeCreate(dummyDir)
-	if err != nil {
-		return fmt.Errorf("Could not create btrfs subvolume: %s", dummyDir)
-	}
-
-	dummyDir = driver.GetSnapshotMountPoint("default", s.pool.Name, "")
-	err = btrfsSubVolumeCreate(dummyDir)
-	if err != nil {
-		return fmt.Errorf("Could not create btrfs subvolume: %s", dummyDir)
-	}
-
-	dummyDir = driver.GetImageMountPoint(s.pool.Name, "")
-	err = btrfsSubVolumeCreate(dummyDir)
-	if err != nil {
-		return fmt.Errorf("Could not create btrfs subvolume: %s", dummyDir)
-	}
-
-	dummyDir = driver.GetStoragePoolVolumeMountPoint(s.pool.Name, "")
-	err = btrfsSubVolumeCreate(dummyDir)
-	if err != nil {
-		return fmt.Errorf("Could not create btrfs subvolume: %s", dummyDir)
-	}
-
-	dummyDir = driver.GetStoragePoolVolumeSnapshotMountPoint(s.pool.Name, "")
-	err = btrfsSubVolumeCreate(dummyDir)
-	if err != nil {
-		return fmt.Errorf("Could not create btrfs subvolume: %s", dummyDir)
-	}
-
-	err = s.StoragePoolCheck()
-	if err != nil {
-		return err
-	}
-
-	logger.Infof("Created BTRFS storage pool \"%s\"", s.pool.Name)
-	return nil
-}
-
-func (s *storageBtrfs) StoragePoolDelete() error {
-	logger.Infof("Deleting BTRFS storage pool \"%s\"", s.pool.Name)
-
-	source := s.pool.Config["source"]
-	if strings.HasPrefix(source, "/") {
-		source = shared.HostPath(s.pool.Config["source"])
-	}
-
-	if source == "" {
-		return fmt.Errorf("no \"source\" property found for the storage pool")
-	}
-
-	// Delete default subvolumes.
-	dummyDir := driver.GetContainerMountPoint("default", s.pool.Name, "")
-	btrfsSubVolumesDelete(dummyDir)
-
-	dummyDir = driver.GetSnapshotMountPoint("default", s.pool.Name, "")
-	btrfsSubVolumesDelete(dummyDir)
-
-	dummyDir = driver.GetImageMountPoint(s.pool.Name, "")
-	btrfsSubVolumesDelete(dummyDir)
-
-	dummyDir = driver.GetStoragePoolVolumeMountPoint(s.pool.Name, "")
-	btrfsSubVolumesDelete(dummyDir)
-
-	_, err := s.StoragePoolUmount()
-	if err != nil {
-		return err
-	}
-
-	// This is a UUID. Check whether we can find the block device.
-	if !filepath.IsAbs(source) {
-		// Try to lookup the disk device by UUID but don't fail. If we
-		// don't find one this might just mean we have been given the
-		// UUID of a subvolume.
-		byUUID := fmt.Sprintf("/dev/disk/by-uuid/%s", source)
-		diskPath, err := os.Readlink(byUUID)
-		msg := ""
-		if err == nil {
-			msg = fmt.Sprintf("Removing disk device %s with UUID: %s.", diskPath, source)
-		} else {
-			msg = fmt.Sprintf("Failed to lookup disk device with UUID: %s: %s.", source, err)
-		}
-		logger.Debugf(msg)
-	} else {
-		var err error
-		cleanSource := filepath.Clean(source)
-		sourcePath := shared.VarPath("disks", s.pool.Name)
-		loopFilePath := sourcePath + ".img"
-		if cleanSource == loopFilePath {
-			// This is a loop file so simply remove it.
-			err = os.Remove(source)
-		} else {
-			if !isBtrfsFilesystem(source) && isBtrfsSubVolume(source) {
-				err = btrfsSubVolumesDelete(source)
-			}
-		}
-		if err != nil && !os.IsNotExist(err) {
-			return err
-		}
-	}
-
-	// Remove the mountpoint for the storage pool.
-	err = os.RemoveAll(driver.GetStoragePoolMountPoint(s.pool.Name))
-	if err != nil && !os.IsNotExist(err) {
-		return err
-	}
-
-	logger.Infof("Deleted BTRFS storage pool \"%s\"", s.pool.Name)
-	return nil
-}
-
-func (s *storageBtrfs) StoragePoolMount() (bool, error) {
-	logger.Debugf("Mounting BTRFS storage pool \"%s\"", s.pool.Name)
-
-	source := s.pool.Config["source"]
-	if strings.HasPrefix(source, "/") {
-		source = shared.HostPath(s.pool.Config["source"])
-	}
-
-	if source == "" {
-		return false, fmt.Errorf("no \"source\" property found for the storage pool")
-	}
-
-	poolMntPoint := driver.GetStoragePoolMountPoint(s.pool.Name)
-
-	poolMountLockID := getPoolMountLockID(s.pool.Name)
-	lxdStorageMapLock.Lock()
-	if waitChannel, ok := lxdStorageOngoingOperationMap[poolMountLockID]; ok {
-		lxdStorageMapLock.Unlock()
-		if _, ok := <-waitChannel; ok {
-			logger.Warnf("Received value over semaphore, this should not have happened")
-		}
-		// Give the benefit of the doubt and assume that the other
-		// thread actually succeeded in mounting the storage pool.
-		return false, nil
-	}
-
-	lxdStorageOngoingOperationMap[poolMountLockID] = make(chan bool)
-	lxdStorageMapLock.Unlock()
-
-	removeLockFromMap := func() {
-		lxdStorageMapLock.Lock()
-		if waitChannel, ok := lxdStorageOngoingOperationMap[poolMountLockID]; ok {
-			close(waitChannel)
-			delete(lxdStorageOngoingOperationMap, poolMountLockID)
-		}
-		lxdStorageMapLock.Unlock()
-	}
-	defer removeLockFromMap()
-
-	// Check whether the mount poolMntPoint exits.
-	if !shared.PathExists(poolMntPoint) {
-		err := os.MkdirAll(poolMntPoint, driver.StoragePoolsDirMode)
-		if err != nil {
-			return false, err
-		}
-	}
-
-	if shared.IsMountPoint(poolMntPoint) && (s.remount&unix.MS_REMOUNT) == 0 {
-		return false, nil
-	}
-
-	mountFlags, mountOptions := resolveMountOptions(s.getBtrfsMountOptions())
-	mountSource := source
-	isBlockDev := shared.IsBlockdevPath(source)
-	if filepath.IsAbs(source) {
-		cleanSource := filepath.Clean(source)
-		poolMntPoint := driver.GetStoragePoolMountPoint(s.pool.Name)
-		loopFilePath := shared.VarPath("disks", s.pool.Name+".img")
-		if !isBlockDev && cleanSource == loopFilePath {
-			// If source == "${LXD_DIR}"/disks/{pool_name} it is a
-			// loop file we're dealing with.
-			//
-			// Since we mount the loop device LO_FLAGS_AUTOCLEAR is
-			// fine since the loop device will be kept around for as
-			// long as the mount exists.
-			loopF, loopErr := drivers.PrepareLoopDev(source, drivers.LoFlagsAutoclear)
-			if loopErr != nil {
-				return false, loopErr
-			}
-			mountSource = loopF.Name()
-			defer loopF.Close()
-		} else if !isBlockDev && cleanSource != poolMntPoint {
-			mountSource = source
-			mountFlags |= unix.MS_BIND
-		} else if !isBlockDev && cleanSource == poolMntPoint && s.s.OS.BackingFS == "btrfs" {
-			return false, nil
-		}
-		// User is using block device path.
-	} else {
-		// Try to lookup the disk device by UUID but don't fail. If we
-		// don't find one this might just mean we have been given the
-		// UUID of a subvolume.
-		byUUID := fmt.Sprintf("/dev/disk/by-uuid/%s", source)
-		diskPath, err := os.Readlink(byUUID)
-		if err == nil {
-			mountSource = fmt.Sprintf("/dev/%s", strings.Trim(diskPath, "../../"))
-		} else {
-			// We have very likely been given a subvolume UUID. In
-			// this case we should simply assume that the user has
-			// mounted the parent of the subvolume or the subvolume
-			// itself. Otherwise this becomes a really messy
-			// detection task.
-			return false, nil
-		}
-	}
-
-	mountFlags |= s.remount
-	err := unix.Mount(mountSource, poolMntPoint, "btrfs", mountFlags, mountOptions)
-	if err != nil {
-		logger.Errorf("Failed to mount BTRFS storage pool \"%s\" onto \"%s\" with mountoptions \"%s\": %s", mountSource, poolMntPoint, mountOptions, err)
-		return false, err
-	}
-
-	logger.Debugf("Mounted BTRFS storage pool \"%s\"", s.pool.Name)
-	return true, nil
-}
-
-func (s *storageBtrfs) StoragePoolUmount() (bool, error) {
-	logger.Debugf("Unmounting BTRFS storage pool \"%s\"", s.pool.Name)
-
-	poolMntPoint := driver.GetStoragePoolMountPoint(s.pool.Name)
-
-	poolUmountLockID := getPoolUmountLockID(s.pool.Name)
-	lxdStorageMapLock.Lock()
-	if waitChannel, ok := lxdStorageOngoingOperationMap[poolUmountLockID]; ok {
-		lxdStorageMapLock.Unlock()
-		if _, ok := <-waitChannel; ok {
-			logger.Warnf("Received value over semaphore, this should not have happened")
-		}
-		// Give the benefit of the doubt and assume that the other
-		// thread actually succeeded in unmounting the storage pool.
-		return false, nil
-	}
-
-	lxdStorageOngoingOperationMap[poolUmountLockID] = make(chan bool)
-	lxdStorageMapLock.Unlock()
-
-	removeLockFromMap := func() {
-		lxdStorageMapLock.Lock()
-		if waitChannel, ok := lxdStorageOngoingOperationMap[poolUmountLockID]; ok {
-			close(waitChannel)
-			delete(lxdStorageOngoingOperationMap, poolUmountLockID)
-		}
-		lxdStorageMapLock.Unlock()
-	}
-
-	defer removeLockFromMap()
-
-	if shared.IsMountPoint(poolMntPoint) {
-		err := unix.Unmount(poolMntPoint, 0)
-		if err != nil {
-			return false, err
-		}
-	}
-
-	logger.Debugf("Unmounted BTRFS storage pool \"%s\"", s.pool.Name)
-	return true, nil
-}
-
-func (s *storageBtrfs) StoragePoolUpdate(writable *api.StoragePoolPut,
-	changedConfig []string) error {
-	logger.Infof(`Updating BTRFS storage pool "%s"`, s.pool.Name)
-
-	changeable := changeableStoragePoolProperties["btrfs"]
-	unchangeable := []string{}
-	for _, change := range changedConfig {
-		if !shared.StringInSlice(change, changeable) {
-			unchangeable = append(unchangeable, change)
-		}
-	}
-
-	if len(unchangeable) > 0 {
-		return updateStoragePoolError(unchangeable, "btrfs")
-	}
-
-	// "rsync.bwlimit" requires no on-disk modifications.
-
-	if shared.StringInSlice("btrfs.mount_options", changedConfig) {
-		s.setBtrfsMountOptions(writable.Config["btrfs.mount_options"])
-		s.remount |= unix.MS_REMOUNT
-		_, err := s.StoragePoolMount()
-		if err != nil {
-			return err
-		}
-	}
-
-	logger.Infof(`Updated BTRFS storage pool "%s"`, s.pool.Name)
-	return nil
-}
-
-func (s *storageBtrfs) GetContainerPoolInfo() (int64, string, string) {
-	return s.poolID, s.pool.Name, s.pool.Name
-}
-
-// Functions dealing with storage volumes.
-func (s *storageBtrfs) StoragePoolVolumeCreate() error {
-	logger.Infof("Creating BTRFS storage volume \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-
-	_, err := s.StoragePoolMount()
-	if err != nil {
-		return err
-	}
-
-	isSnapshot := shared.IsSnapshot(s.volume.Name)
-
-	// Create subvolume path on the storage pool.
-	var customSubvolumePath string
-
-	if isSnapshot {
-		customSubvolumePath = s.getCustomSnapshotSubvolumePath(s.pool.Name)
-	} else {
-		customSubvolumePath = s.getCustomSubvolumePath(s.pool.Name)
-	}
-
-	if !shared.PathExists(customSubvolumePath) {
-		err := os.MkdirAll(customSubvolumePath, 0700)
-		if err != nil {
-			return err
-		}
-	}
-
-	// Create subvolume.
-	var customSubvolumeName string
-
-	if isSnapshot {
-		customSubvolumeName = driver.GetStoragePoolVolumeSnapshotMountPoint(s.pool.Name, s.volume.Name)
-	} else {
-		customSubvolumeName = driver.GetStoragePoolVolumeMountPoint(s.pool.Name, s.volume.Name)
-	}
-
-	err = btrfsSubVolumeCreate(customSubvolumeName)
-	if err != nil {
-		return err
-	}
-
-	// apply quota
-	if s.volume.Config["size"] != "" {
-		size, err := units.ParseByteSizeString(s.volume.Config["size"])
-		if err != nil {
-			return err
-		}
-
-		err = s.StorageEntitySetQuota(storagePoolVolumeTypeCustom, size, nil)
-		if err != nil {
-			return err
-		}
-	}
-
-	logger.Infof("Created BTRFS storage volume \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-	return nil
-}
-
-func (s *storageBtrfs) StoragePoolVolumeDelete() error {
-	logger.Infof("Deleting BTRFS storage volume \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-
-	_, err := s.StoragePoolMount()
-	if err != nil {
-		return err
-	}
-
-	// Delete subvolume.
-	customSubvolumeName := driver.GetStoragePoolVolumeMountPoint(s.pool.Name, s.volume.Name)
-	if shared.PathExists(customSubvolumeName) && isBtrfsSubVolume(customSubvolumeName) {
-		err = btrfsSubVolumesDelete(customSubvolumeName)
-		if err != nil {
-			return err
-		}
-	}
-
-	// Delete the mountpoint.
-	if shared.PathExists(customSubvolumeName) {
-		err = os.Remove(customSubvolumeName)
-		if err != nil {
-			return err
-		}
-	}
-
-	err = s.s.Cluster.StoragePoolVolumeDelete(
-		"default",
-		s.volume.Name,
-		storagePoolVolumeTypeCustom,
-		s.poolID)
-	if err != nil {
-		logger.Errorf(`Failed to delete database entry for BTRFS storage volume "%s" on storage pool "%s"`, s.volume.Name, s.pool.Name)
-	}
-
-	logger.Infof("Deleted BTRFS storage volume \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-	return nil
-}
-
-func (s *storageBtrfs) StoragePoolVolumeMount() (bool, error) {
-	logger.Debugf("Mounting BTRFS storage volume \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-
-	// The storage pool must be mounted.
-	_, err := s.StoragePoolMount()
-	if err != nil {
-		return false, err
-	}
-
-	logger.Debugf("Mounted BTRFS storage volume \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-	return true, nil
-}
-
-func (s *storageBtrfs) StoragePoolVolumeUmount() (bool, error) {
-	return true, nil
-}
-
-func (s *storageBtrfs) StoragePoolVolumeUpdate(writable *api.StorageVolumePut, changedConfig []string) error {
-	if writable.Restore != "" {
-		logger.Debugf(`Restoring BTRFS storage volume "%s" from snapshot "%s"`,
-			s.volume.Name, writable.Restore)
-
-		// The storage pool must be mounted.
-		_, err := s.StoragePoolMount()
-		if err != nil {
-			return err
-		}
-
-		// Create a backup so we can revert.
-		targetVolumeSubvolumeName := driver.GetStoragePoolVolumeMountPoint(s.pool.Name, s.volume.Name)
-		backupTargetVolumeSubvolumeName := fmt.Sprintf("%s.tmp", targetVolumeSubvolumeName)
-		err = os.Rename(targetVolumeSubvolumeName, backupTargetVolumeSubvolumeName)
-		if err != nil {
-			return err
-		}
-		undo := true
-		defer func() {
-			if undo {
-				os.Rename(backupTargetVolumeSubvolumeName, targetVolumeSubvolumeName)
-			}
-		}()
-
-		sourceVolumeSubvolumeName := driver.GetStoragePoolVolumeSnapshotMountPoint(
-			s.pool.Name, fmt.Sprintf("%s/%s", s.volume.Name, writable.Restore))
-		err = s.btrfsPoolVolumesSnapshot(sourceVolumeSubvolumeName,
-			targetVolumeSubvolumeName, false, true)
-		if err != nil {
-			return err
-		}
-
-		undo = false
-		err = btrfsSubVolumesDelete(backupTargetVolumeSubvolumeName)
-		if err != nil {
-			return err
-		}
-
-		logger.Debugf(`Restored BTRFS storage volume "%s" from snapshot "%s"`,
-			s.volume.Name, writable.Restore)
-		return nil
-	}
-
-	logger.Infof(`Updating BTRFS storage volume "%s"`, s.volume.Name)
-
-	changeable := changeableStoragePoolVolumeProperties["btrfs"]
-	unchangeable := []string{}
-	for _, change := range changedConfig {
-		if !shared.StringInSlice(change, changeable) {
-			unchangeable = append(unchangeable, change)
-		}
-	}
-
-	if len(unchangeable) > 0 {
-		return updateStoragePoolVolumeError(unchangeable, "btrfs")
-	}
-
-	if shared.StringInSlice("size", changedConfig) {
-		if s.volume.Type != storagePoolVolumeTypeNameCustom {
-			return updateStoragePoolVolumeError([]string{"size"}, "btrfs")
-		}
-
-		if s.volume.Config["size"] != writable.Config["size"] {
-			size, err := units.ParseByteSizeString(writable.Config["size"])
-			if err != nil {
-				return err
-			}
-
-			err = s.StorageEntitySetQuota(storagePoolVolumeTypeCustom, size, nil)
-			if err != nil {
-				return err
-			}
-		}
-	}
-
-	logger.Infof(`Updated BTRFS storage volume "%s"`, s.volume.Name)
-	return nil
-}
-
-func (s *storageBtrfs) StoragePoolVolumeRename(newName string) error {
-	logger.Infof(`Renaming BTRFS storage volume on storage pool "%s" from "%s" to "%s`,
-		s.pool.Name, s.volume.Name, newName)
-
-	_, err := s.StoragePoolMount()
-	if err != nil {
-		return err
-	}
-
-	usedBy, err := storagePoolVolumeUsedByInstancesGet(s.s, "default", s.pool.Name, s.volume.Name)
-	if err != nil {
-		return err
-	}
-	if len(usedBy) > 0 {
-		return fmt.Errorf(`BTRFS storage volume "%s" on storage pool "%s" is attached to containers`,
-			s.volume.Name, s.pool.Name)
-	}
-
-	oldPath := driver.GetStoragePoolVolumeMountPoint(s.pool.Name, s.volume.Name)
-	newPath := driver.GetStoragePoolVolumeMountPoint(s.pool.Name, newName)
-	err = os.Rename(oldPath, newPath)
-	if err != nil {
-		return err
-	}
-
-	logger.Infof(`Renamed BTRFS storage volume on storage pool "%s" from "%s" to "%s`,
-		s.pool.Name, s.volume.Name, newName)
-
-	err = s.s.Cluster.StoragePoolVolumeRename("default", s.volume.Name, newName,
-		storagePoolVolumeTypeCustom, s.poolID)
-	if err != nil {
-		return err
-	}
-
-	// Get volumes attached to source storage volume
-	volumes, err := s.s.Cluster.StoragePoolVolumeSnapshotsGetType(s.volume.Name,
-		storagePoolVolumeTypeCustom, s.poolID)
-	if err != nil {
-		return err
-	}
-
-	for _, vol := range volumes {
-		_, snapshotName, _ := shared.InstanceGetParentAndSnapshotName(vol.Name)
-		oldVolumeName := fmt.Sprintf("%s%s%s", s.volume.Name, shared.SnapshotDelimiter, snapshotName)
-		newVolumeName := fmt.Sprintf("%s%s%s", newName, shared.SnapshotDelimiter, snapshotName)
-
-		// Rename volume snapshots
-		oldPath = driver.GetStoragePoolVolumeSnapshotMountPoint(s.pool.Name, oldVolumeName)
-		newPath = driver.GetStoragePoolVolumeSnapshotMountPoint(s.pool.Name, newVolumeName)
-		err = os.Rename(oldPath, newPath)
-		if err != nil {
-			return err
-		}
-
-		err = s.s.Cluster.StoragePoolVolumeRename("default", oldVolumeName, newVolumeName,
-			storagePoolVolumeTypeCustom, s.poolID)
-		if err != nil {
-			return nil
-		}
-	}
-
-	return nil
-}
-
-// Functions dealing with container storage.
-func (s *storageBtrfs) ContainerStorageReady(container instance.Instance) bool {
-	containerMntPoint := driver.GetContainerMountPoint(container.Project(), s.pool.Name, container.Name())
-	return isBtrfsSubVolume(containerMntPoint)
-}
-
-func (s *storageBtrfs) doContainerCreate(projectName, name string, privileged bool) error {
-	logger.Debugf("Creating empty BTRFS storage volume for container \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-
-	_, err := s.StoragePoolMount()
-	if err != nil {
-		return err
-	}
-
-	// We can only create the btrfs subvolume under the mounted storage
-	// pool. The on-disk layout for containers on a btrfs storage pool will
-	// thus be
-	// ${LXD_DIR}/storage-pools/<pool>/containers/. The btrfs tool will
-	// complain if the intermediate path does not exist, so create it if it
-	// doesn't already.
-	containerSubvolumePath := s.getContainerSubvolumePath(s.pool.Name)
-	if !shared.PathExists(containerSubvolumePath) {
-		err := os.MkdirAll(containerSubvolumePath, driver.ContainersDirMode)
-		if err != nil {
-			return err
-		}
-	}
-
-	// Create empty subvolume for container.
-	containerSubvolumeName := driver.GetContainerMountPoint(projectName, s.pool.Name, name)
-	err = btrfsSubVolumeCreate(containerSubvolumeName)
-	if err != nil {
-		return err
-	}
-
-	// Create the mountpoint for the container at:
-	// ${LXD_DIR}/containers/<name>
-	err = driver.CreateContainerMountpoint(containerSubvolumeName, shared.VarPath("containers", project.Prefix(projectName, name)), privileged)
-	if err != nil {
-		return err
-	}
-
-	logger.Debugf("Created empty BTRFS storage volume for container \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-	return nil
-}
-
-func (s *storageBtrfs) ContainerCreate(container instance.Instance) error {
-	err := s.doContainerCreate(container.Project(), container.Name(), container.IsPrivileged())
-	if err != nil {
-		return err
-	}
-
-	return container.DeferTemplateApply("create")
-}
-
-// And this function is why I started hating on btrfs...
-func (s *storageBtrfs) ContainerCreateFromImage(container instance.Instance, fingerprint string, tracker *ioprogress.ProgressTracker) error {
-	logger.Debugf("Creating BTRFS storage volume for container \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-
-	source := s.pool.Config["source"]
-	if source == "" {
-		return fmt.Errorf("no \"source\" property found for the storage pool")
-	}
-
-	_, err := s.StoragePoolMount()
-	if err != nil {
-		return errors.Wrap(err, "Failed to mount storage pool")
-	}
-
-	// We can only create the btrfs subvolume under the mounted storage
-	// pool. The on-disk layout for containers on a btrfs storage pool will
-	// thus be
-	// ${LXD_DIR}/storage-pools/<pool>/containers/. The btrfs tool will
-	// complain if the intermediate path does not exist, so create it if it
-	// doesn't already.
-	containerSubvolumePath := s.getContainerSubvolumePath(s.pool.Name)
-	if !shared.PathExists(containerSubvolumePath) {
-		err := os.MkdirAll(containerSubvolumePath, driver.ContainersDirMode)
-		if err != nil {
-			return errors.Wrap(err, "Failed to create volume directory")
-		}
-	}
-
-	// Mountpoint of the image:
-	// ${LXD_DIR}/images/<fingerprint>
-	imageMntPoint := driver.GetImageMountPoint(s.pool.Name, fingerprint)
-	imageStoragePoolLockID := getImageCreateLockID(s.pool.Name, fingerprint)
-	lxdStorageMapLock.Lock()
-	if waitChannel, ok := lxdStorageOngoingOperationMap[imageStoragePoolLockID]; ok {
-		lxdStorageMapLock.Unlock()
-		if _, ok := <-waitChannel; ok {
-			logger.Warnf("Received value over semaphore, this should not have happened")
-		}
-	} else {
-		lxdStorageOngoingOperationMap[imageStoragePoolLockID] = make(chan bool)
-		lxdStorageMapLock.Unlock()
-
-		var imgerr error
-		if !shared.PathExists(imageMntPoint) || !isBtrfsSubVolume(imageMntPoint) {
-			imgerr = s.ImageCreate(fingerprint, tracker)
-		}
-
-		lxdStorageMapLock.Lock()
-		if waitChannel, ok := lxdStorageOngoingOperationMap[imageStoragePoolLockID]; ok {
-			close(waitChannel)
-			delete(lxdStorageOngoingOperationMap, imageStoragePoolLockID)
-		}
-		lxdStorageMapLock.Unlock()
-
-		if imgerr != nil {
-			return errors.Wrap(imgerr, "Failed to create image volume")
-		}
-	}
-
-	// Create a rw snapshot at
-	// ${LXD_DIR}/storage-pools/<pool>/containers/<name>
-	// from the mounted ro image snapshot mounted at
-	// ${LXD_DIR}/storage-pools/<pool>/images/<fingerprint>
-	containerSubvolumeName := driver.GetContainerMountPoint(container.Project(), s.pool.Name, container.Name())
-	err = s.btrfsPoolVolumesSnapshot(imageMntPoint, containerSubvolumeName, false, false)
-	if err != nil {
-		return errors.Wrap(err, "Failed to storage pool volume snapshot")
-	}
-
-	// Create the mountpoint for the container at:
-	// ${LXD_DIR}/containers/<name>
-	err = driver.CreateContainerMountpoint(containerSubvolumeName, container.Path(), container.IsPrivileged())
-	if err != nil {
-		return errors.Wrap(err, "Failed to create container mountpoint")
-	}
-
-	logger.Debugf("Created BTRFS storage volume for container \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-	err = container.DeferTemplateApply("create")
-	if err != nil {
-		return errors.Wrap(err, "Failed to apply container template")
-	}
-	return nil
-}
-
-func (s *storageBtrfs) ContainerDelete(container instance.Instance) error {
-	logger.Debugf("Deleting BTRFS storage volume for container \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-
-	// The storage pool needs to be mounted.
-	_, err := s.StoragePoolMount()
-	if err != nil {
-		return err
-	}
-
-	// Delete the subvolume.
-	containerSubvolumeName := driver.GetContainerMountPoint(container.Project(), s.pool.Name, container.Name())
-	if shared.PathExists(containerSubvolumeName) && isBtrfsSubVolume(containerSubvolumeName) {
-		err = btrfsSubVolumesDelete(containerSubvolumeName)
-		if err != nil {
-			return err
-		}
-	}
-
-	// Delete the container's symlink to the subvolume.
-	err = deleteContainerMountpoint(containerSubvolumeName, container.Path(), s.GetStorageTypeName())
-	if err != nil {
-		return err
-	}
-
-	// Delete potential snapshot mountpoints.
-	snapshotMntPoint := driver.GetSnapshotMountPoint(container.Project(), s.pool.Name, container.Name())
-	if shared.PathExists(snapshotMntPoint) {
-		err := os.RemoveAll(snapshotMntPoint)
-		if err != nil && !os.IsNotExist(err) {
-			return err
-		}
-	}
-
-	// Delete potential symlink
-	// ${LXD_DIR}/snapshots/<container_name> to ${POOL}/snapshots/<container_name>
-	snapshotSymlink := shared.VarPath("snapshots", project.Prefix(container.Project(), container.Name()))
-	if shared.PathExists(snapshotSymlink) {
-		err := os.Remove(snapshotSymlink)
-		if err != nil {
-			return err
-		}
-	}
-
-	logger.Debugf("Deleted BTRFS storage volume for container \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-	return nil
-}
-
-func (s *storageBtrfs) copyContainer(target instance.Instance, source instance.Instance) error {
-	sourceContainerSubvolumeName := driver.GetContainerMountPoint(source.Project(), s.pool.Name, source.Name())
-	if source.IsSnapshot() {
-		sourceContainerSubvolumeName = driver.GetSnapshotMountPoint(source.Project(), s.pool.Name, source.Name())
-	}
-	targetContainerSubvolumeName := driver.GetContainerMountPoint(target.Project(), s.pool.Name, target.Name())
-
-	containersPath := driver.GetContainerMountPoint("default", s.pool.Name, "")
-	// Ensure that the directories immediately preceding the subvolume directory exist.
-	if !shared.PathExists(containersPath) {
-		err := os.MkdirAll(containersPath, driver.ContainersDirMode)
-		if err != nil {
-			return err
-		}
-	}
-
-	err := s.btrfsPoolVolumesSnapshot(sourceContainerSubvolumeName, targetContainerSubvolumeName, false, true)
-	if err != nil {
-		return err
-	}
-
-	err = driver.CreateContainerMountpoint(targetContainerSubvolumeName, target.Path(), target.IsPrivileged())
-	if err != nil {
-		return err
-	}
-
-	err = target.DeferTemplateApply("copy")
-	if err != nil {
-		return err
-	}
-
-	return nil
-}
-
-func (s *storageBtrfs) copySnapshot(target instance.Instance, source instance.Instance) error {
-	sourceName := source.Name()
-	targetName := target.Name()
-	sourceContainerSubvolumeName := driver.GetSnapshotMountPoint(source.Project(), s.pool.Name, sourceName)
-	targetContainerSubvolumeName := driver.GetSnapshotMountPoint(target.Project(), s.pool.Name, targetName)
-
-	targetParentName, _, _ := shared.InstanceGetParentAndSnapshotName(target.Name())
-	containersPath := driver.GetSnapshotMountPoint(target.Project(), s.pool.Name, targetParentName)
-	snapshotMntPointSymlinkTarget := shared.VarPath("storage-pools", s.pool.Name, "containers-snapshots", project.Prefix(target.Project(), targetParentName))
-	snapshotMntPointSymlink := shared.VarPath("snapshots", project.Prefix(target.Project(), targetParentName))
-	err := driver.CreateSnapshotMountpoint(containersPath, snapshotMntPointSymlinkTarget, snapshotMntPointSymlink)
-	if err != nil {
-		return err
-	}
-
-	// Ensure that the directories immediately preceding the subvolume directory exist.
-	if !shared.PathExists(containersPath) {
-		err := os.MkdirAll(containersPath, driver.ContainersDirMode)
-		if err != nil {
-			return err
-		}
-	}
-
-	err = s.btrfsPoolVolumesSnapshot(sourceContainerSubvolumeName, targetContainerSubvolumeName, true, true)
-	if err != nil {
-		return err
-	}
-
-	return nil
-}
-
-func (s *storageBtrfs) doCrossPoolContainerCopy(target instance.Instance, source instance.Instance, containerOnly bool, refresh bool, refreshSnapshots []instance.Instance) error {
-	sourcePool, err := source.StoragePool()
-	if err != nil {
-		return err
-	}
-
-	// setup storage for the source volume
-	srcStorage, err := storagePoolVolumeInit(s.s, "default", sourcePool, source.Name(), storagePoolVolumeTypeContainer)
-	if err != nil {
-		return err
-	}
-
-	ourMount, err := srcStorage.StoragePoolMount()
-	if err != nil {
-		return err
-	}
-	if ourMount {
-		defer srcStorage.StoragePoolUmount()
-	}
-
-	targetPool, err := target.StoragePool()
-	if err != nil {
-		return err
-	}
-
-	var snapshots []instance.Instance
-
-	if refresh {
-		snapshots = refreshSnapshots
-	} else {
-		snapshots, err = source.Snapshots()
-		if err != nil {
-			return err
-		}
-
-		// create the main container
-		err = s.doContainerCreate(target.Project(), target.Name(), target.IsPrivileged())
-		if err != nil {
-			return err
-		}
-	}
-
-	destContainerMntPoint := driver.GetContainerMountPoint(target.Project(), targetPool, target.Name())
-	bwlimit := s.pool.Config["rsync.bwlimit"]
-	if !containerOnly {
-		for _, snap := range snapshots {
-			srcSnapshotMntPoint := driver.GetSnapshotMountPoint(target.Project(), sourcePool, snap.Name())
-			_, err = rsync.LocalCopy(srcSnapshotMntPoint, destContainerMntPoint, bwlimit, true)
-			if err != nil {
-				logger.Errorf("Failed to rsync into BTRFS storage volume \"%s\" on storage pool \"%s\": %s", s.volume.Name, s.pool.Name, err)
-				return err
-			}
-
-			// create snapshot
-			_, snapOnlyName, _ := shared.InstanceGetParentAndSnapshotName(snap.Name())
-			err = s.doContainerSnapshotCreate(target.Project(), fmt.Sprintf("%s/%s", target.Name(), snapOnlyName), target.Name())
-			if err != nil {
-				return err
-			}
-		}
-	}
-
-	srcContainerMntPoint := driver.GetContainerMountPoint(source.Project(), sourcePool, source.Name())
-	_, err = rsync.LocalCopy(srcContainerMntPoint, destContainerMntPoint, bwlimit, true)
-	if err != nil {
-		logger.Errorf("Failed to rsync into BTRFS storage volume \"%s\" on storage pool \"%s\": %s", s.volume.Name, s.pool.Name, err)
-		return err
-	}
-
-	return nil
-}
-
-func (s *storageBtrfs) ContainerCopy(target instance.Instance, source instance.Instance, containerOnly bool) error {
-	logger.Debugf("Copying BTRFS container storage %s to %s", source.Name(), target.Name())
-
-	// The storage pool needs to be mounted.
-	_, err := s.StoragePoolMount()
-	if err != nil {
-		return err
-	}
-
-	ourStart, err := source.StorageStart()
-	if err != nil {
-		return err
-	}
-	if ourStart {
-		defer source.StorageStop()
-	}
-
-	if target.Type() != instancetype.Container {
-		return fmt.Errorf("Target Instance type must be container")
-	}
-
-	if source.Type() != instancetype.Container {
-		return fmt.Errorf("Source Instance type must be container")
-	}
-
-	targetCt := target.(*containerLXC)
-	srcCt := source.(*containerLXC)
-
-	_, sourcePool, _ := srcCt.Storage().GetContainerPoolInfo()
-	_, targetPool, _ := targetCt.Storage().GetContainerPoolInfo()
-	if sourcePool != targetPool {
-		err = s.doCrossPoolContainerCopy(target, source, containerOnly, false, nil)
-		if err != nil {
-			return err
-		}
-
-		return target.DeferTemplateApply("copy")
-	}
-
-	err = s.copyContainer(target, source)
-	if err != nil {
-		return err
-	}
-
-	if containerOnly {
-		logger.Debugf("Copied BTRFS container storage %s to %s", source.Name(), target.Name())
-		return nil
-	}
-
-	snapshots, err := source.Snapshots()
-	if err != nil {
-		return err
-	}
-
-	if len(snapshots) == 0 {
-		logger.Debugf("Copied BTRFS container storage %s to %s", source.Name(), target.Name())
-		return nil
-	}
-
-	for _, snap := range snapshots {
-		sourceSnapshot, err := instance.LoadByProjectAndName(s.s, source.Project(), snap.Name())
-		if err != nil {
-			return err
-		}
-
-		_, snapOnlyName, _ := shared.InstanceGetParentAndSnapshotName(snap.Name())
-		newSnapName := fmt.Sprintf("%s/%s", target.Name(), snapOnlyName)
-		targetSnapshot, err := instance.LoadByProjectAndName(s.s, target.Project(), newSnapName)
-		if err != nil {
-			return err
-		}
-
-		err = s.copySnapshot(targetSnapshot, sourceSnapshot)
-		if err != nil {
-			return err
-		}
-	}
-
-	logger.Debugf("Copied BTRFS container storage %s to %s", source.Name(), target.Name())
-	return nil
-}
-
-func (s *storageBtrfs) ContainerRefresh(target instance.Instance, source instance.Instance, snapshots []instance.Instance) error {
-	logger.Debugf("Refreshing BTRFS container storage for %s from %s", target.Name(), source.Name())
-
-	// The storage pool needs to be mounted.
-	_, err := s.StoragePoolMount()
-	if err != nil {
-		return err
-	}
-
-	ourStart, err := source.StorageStart()
-	if err != nil {
-		return err
-	}
-	if ourStart {
-		defer source.StorageStop()
-	}
-
-	return s.doCrossPoolContainerCopy(target, source, len(snapshots) == 0, true, snapshots)
-}
-
-func (s *storageBtrfs) ContainerMount(c instance.Instance) (bool, error) {
-	logger.Debugf("Mounting BTRFS storage volume for container \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-
-	// The storage pool must be mounted.
-	_, err := s.StoragePoolMount()
-	if err != nil {
-		return false, err
-	}
-
-	logger.Debugf("Mounted BTRFS storage volume for container \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-	return true, nil
-}
-
-func (s *storageBtrfs) ContainerUmount(c instance.Instance, path string) (bool, error) {
-	return true, nil
-}
-
-func (s *storageBtrfs) ContainerRename(container instance.Instance, newName string) error {
-	logger.Debugf("Renaming BTRFS storage volume for container \"%s\" from %s to %s", s.volume.Name, s.volume.Name, newName)
-
-	// The storage pool must be mounted.
-	_, err := s.StoragePoolMount()
-	if err != nil {
-		return err
-	}
-
-	oldContainerSubvolumeName := driver.GetContainerMountPoint(container.Project(), s.pool.Name, container.Name())
-	newContainerSubvolumeName := driver.GetContainerMountPoint(container.Project(), s.pool.Name, newName)
-	err = os.Rename(oldContainerSubvolumeName, newContainerSubvolumeName)
-	if err != nil {
-		return err
-	}
-
-	newSymlink := shared.VarPath("containers", project.Prefix(container.Project(), newName))
-	err = renameContainerMountpoint(oldContainerSubvolumeName, container.Path(), newContainerSubvolumeName, newSymlink)
-	if err != nil {
-		return err
-	}
-
-	oldSnapshotSubvolumeName := driver.GetSnapshotMountPoint(container.Project(), s.pool.Name, container.Name())
-	newSnapshotSubvolumeName := driver.GetSnapshotMountPoint(container.Project(), s.pool.Name, newName)
-	if shared.PathExists(oldSnapshotSubvolumeName) {
-		err = os.Rename(oldSnapshotSubvolumeName, newSnapshotSubvolumeName)
-		if err != nil {
-			return err
-		}
-	}
-
-	oldSnapshotSymlink := shared.VarPath("snapshots", project.Prefix(container.Project(), container.Name()))
-	newSnapshotSymlink := shared.VarPath("snapshots", project.Prefix(container.Project(), newName))
-	if shared.PathExists(oldSnapshotSymlink) {
-		err := os.Remove(oldSnapshotSymlink)
-		if err != nil {
-			return err
-		}
-
-		err = os.Symlink(newSnapshotSubvolumeName, newSnapshotSymlink)
-		if err != nil {
-			return err
-		}
-	}
-
-	logger.Debugf("Renamed BTRFS storage volume for container \"%s\" from %s to %s", s.volume.Name, s.volume.Name, newName)
-	return nil
-}
-
-func (s *storageBtrfs) ContainerRestore(container instance.Instance, sourceContainer instance.Instance) error {
-	logger.Debugf("Restoring BTRFS storage volume for container \"%s\" from %s to %s", s.volume.Name, sourceContainer.Name(), container.Name())
-
-	// The storage pool must be mounted.
-	_, err := s.StoragePoolMount()
-	if err != nil {
-		return err
-	}
-
-	// Create a backup so we can revert.
-	targetContainerSubvolumeName := driver.GetContainerMountPoint(container.Project(), s.pool.Name, container.Name())
-	backupTargetContainerSubvolumeName := fmt.Sprintf("%s.tmp", targetContainerSubvolumeName)
-	err = os.Rename(targetContainerSubvolumeName, backupTargetContainerSubvolumeName)
-	if err != nil {
-		return err
-	}
-	undo := true
-	defer func() {
-		if undo {
-			os.Rename(backupTargetContainerSubvolumeName, targetContainerSubvolumeName)
-		}
-	}()
-
-	ourStart, err := sourceContainer.StorageStart()
-	if err != nil {
-		return err
-	}
-	if ourStart {
-		defer sourceContainer.StorageStop()
-	}
-
-	// Mount the source container.
-	if sourceContainer.Type() != instancetype.Container {
-		return fmt.Errorf("Instance type must be container")
-	}
-
-	ct := sourceContainer.(*containerLXC)
-
-	srcContainerStorage := ct.Storage()
-	_, sourcePool, _ := srcContainerStorage.GetContainerPoolInfo()
-	sourceContainerSubvolumeName := ""
-	if sourceContainer.IsSnapshot() {
-		sourceContainerSubvolumeName = driver.GetSnapshotMountPoint(sourceContainer.Project(), sourcePool, sourceContainer.Name())
-	} else {
-		sourceContainerSubvolumeName = driver.GetContainerMountPoint(container.Project(), sourcePool, sourceContainer.Name())
-	}
-
-	var failure error
-	_, targetPool, _ := s.GetContainerPoolInfo()
-	if targetPool == sourcePool {
-		// They are on the same storage pool, so we can simply snapshot.
-		err := s.btrfsPoolVolumesSnapshot(sourceContainerSubvolumeName, targetContainerSubvolumeName, false, true)
-		if err != nil {
-			failure = err
-		}
-	} else {
-		err := btrfsSubVolumeCreate(targetContainerSubvolumeName)
-		if err == nil {
-			// Use rsync to fill the empty volume.  Sync by using
-			// the subvolume name.
-			bwlimit := s.pool.Config["rsync.bwlimit"]
-			output, err := rsync.LocalCopy(sourceContainerSubvolumeName, targetContainerSubvolumeName, bwlimit, true)
-			if err != nil {
-				s.ContainerDelete(container)
-				logger.Errorf("ContainerRestore: rsync failed: %s", string(output))
-				failure = err
-			}
-		} else {
-			failure = err
-		}
-	}
-
-	if failure == nil {
-		undo = false
-		_, sourcePool, _ := srcContainerStorage.GetContainerPoolInfo()
-		_, targetPool, _ := s.GetContainerPoolInfo()
-		if targetPool == sourcePool {
-			// Remove the backup, we made
-			return btrfsSubVolumesDelete(backupTargetContainerSubvolumeName)
-		}
-
-		err = os.RemoveAll(backupTargetContainerSubvolumeName)
-		if err != nil && !os.IsNotExist(err) {
-			return err
-		}
-	}
-
-	logger.Debugf("Restored BTRFS storage volume for container \"%s\" from %s to %s", s.volume.Name, sourceContainer.Name(), container.Name())
-	return failure
-}
-
-func (s *storageBtrfs) ContainerGetUsage(container instance.Instance) (int64, error) {
-	return s.btrfsPoolVolumeQGroupUsage(container.Path())
-}
-
-func (s *storageBtrfs) doContainerSnapshotCreate(projectName string, targetName string, sourceName string) error {
-	logger.Debugf("Creating BTRFS storage volume for snapshot \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-
-	_, err := s.StoragePoolMount()
-	if err != nil {
-		return err
-	}
-
-	// We can only create the btrfs subvolume under the mounted storage
-	// pool. The on-disk layout for snapshots on a btrfs storage pool will
-	// thus be
-	// ${LXD_DIR}/storage-pools/<pool>/snapshots/. The btrfs tool will
-	// complain if the intermediate path does not exist, so create it if it
-	// doesn't already.
-	snapshotSubvolumePath := getSnapshotSubvolumePath(projectName, s.pool.Name, sourceName)
-	if !shared.PathExists(snapshotSubvolumePath) {
-		err := os.MkdirAll(snapshotSubvolumePath, driver.ContainersDirMode)
-		if err != nil {
-			return err
-		}
-	}
-
-	snapshotMntPointSymlinkTarget := shared.VarPath("storage-pools", s.pool.Name, "containers-snapshots", project.Prefix(projectName, s.volume.Name))
-	snapshotMntPointSymlink := shared.VarPath("snapshots", project.Prefix(projectName, sourceName))
-	if !shared.PathExists(snapshotMntPointSymlink) {
-		if !shared.PathExists(snapshotMntPointSymlinkTarget) {
-			err = os.MkdirAll(snapshotMntPointSymlinkTarget, driver.SnapshotsDirMode)
-			if err != nil {
-				return err
-			}
-		}
-
-		err := os.Symlink(snapshotMntPointSymlinkTarget, snapshotMntPointSymlink)
-		if err != nil {
-			return err
-		}
-	}
-
-	srcContainerSubvolumeName := driver.GetContainerMountPoint(projectName, s.pool.Name, sourceName)
-	snapshotSubvolumeName := driver.GetSnapshotMountPoint(projectName, s.pool.Name, targetName)
-	err = s.btrfsPoolVolumesSnapshot(srcContainerSubvolumeName, snapshotSubvolumeName, true, true)
-	if err != nil {
-		return err
-	}
-
-	logger.Debugf("Created BTRFS storage volume for snapshot \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-	return nil
-}
-
-func (s *storageBtrfs) ContainerSnapshotCreate(snapshotContainer instance.Instance, sourceContainer instance.Instance) error {
-	err := s.doContainerSnapshotCreate(sourceContainer.Project(), snapshotContainer.Name(), sourceContainer.Name())
-	if err != nil {
-		s.ContainerSnapshotDelete(snapshotContainer)
-		return err
-	}
-
-	return nil
-}
-
-func btrfsSnapshotDeleteInternal(projectName, poolName string, snapshotName string) error {
-	snapshotSubvolumeName := driver.GetSnapshotMountPoint(projectName, poolName, snapshotName)
-	// Also delete any leftover .ro snapshot.
-	roSnapshotSubvolumeName := fmt.Sprintf("%s.ro", snapshotSubvolumeName)
-	names := []string{snapshotSubvolumeName, roSnapshotSubvolumeName}
-	for _, name := range names {
-		if shared.PathExists(name) && isBtrfsSubVolume(name) {
-			err := btrfsSubVolumesDelete(name)
-			if err != nil {
-				return err
-			}
-		}
-	}
-
-	sourceSnapshotMntPoint := shared.VarPath("snapshots", project.Prefix(projectName, snapshotName))
-	os.Remove(sourceSnapshotMntPoint)
-	os.Remove(snapshotSubvolumeName)
-
-	sourceName, _, _ := shared.InstanceGetParentAndSnapshotName(snapshotName)
-	snapshotSubvolumePath := getSnapshotSubvolumePath(projectName, poolName, sourceName)
-	os.Remove(snapshotSubvolumePath)
-	if !shared.PathExists(snapshotSubvolumePath) {
-		snapshotMntPointSymlink := shared.VarPath("snapshots", project.Prefix(projectName, sourceName))
-		os.Remove(snapshotMntPointSymlink)
-	}
-
-	return nil
-}
-
-func (s *storageBtrfs) ContainerSnapshotDelete(snapshotContainer instance.Instance) error {
-	logger.Debugf("Deleting BTRFS storage volume for snapshot \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-
-	_, err := s.StoragePoolMount()
-	if err != nil {
-		return err
-	}
-
-	err = btrfsSnapshotDeleteInternal(snapshotContainer.Project(), s.pool.Name, snapshotContainer.Name())
-	if err != nil {
-		return err
-	}
-
-	logger.Debugf("Deleted BTRFS storage volume for snapshot \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-	return nil
-}
-
-func (s *storageBtrfs) ContainerSnapshotStart(container instance.Instance) (bool, error) {
-	logger.Debugf("Initializing BTRFS storage volume for snapshot \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-
-	_, err := s.StoragePoolMount()
-	if err != nil {
-		return false, err
-	}
-
-	snapshotSubvolumeName := driver.GetSnapshotMountPoint(container.Project(), s.pool.Name, container.Name())
-	roSnapshotSubvolumeName := fmt.Sprintf("%s.ro", snapshotSubvolumeName)
-	if shared.PathExists(roSnapshotSubvolumeName) {
-		logger.Debugf("The BTRFS snapshot is already mounted read-write")
-		return false, nil
-	}
-
-	err = os.Rename(snapshotSubvolumeName, roSnapshotSubvolumeName)
-	if err != nil {
-		return false, err
-	}
-
-	err = s.btrfsPoolVolumesSnapshot(roSnapshotSubvolumeName, snapshotSubvolumeName, false, true)
-	if err != nil {
-		return false, err
-	}
-
-	logger.Debugf("Initialized BTRFS storage volume for snapshot \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-	return true, nil
-}
-
-func (s *storageBtrfs) ContainerSnapshotStop(container instance.Instance) (bool, error) {
-	logger.Debugf("Stopping BTRFS storage volume for snapshot \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-
-	_, err := s.StoragePoolMount()
-	if err != nil {
-		return false, err
-	}
-
-	snapshotSubvolumeName := driver.GetSnapshotMountPoint(container.Project(), s.pool.Name, container.Name())
-	roSnapshotSubvolumeName := fmt.Sprintf("%s.ro", snapshotSubvolumeName)
-	if !shared.PathExists(roSnapshotSubvolumeName) {
-		logger.Debugf("The BTRFS snapshot is currently not mounted read-write")
-		return false, nil
-	}
-
-	if shared.PathExists(snapshotSubvolumeName) && isBtrfsSubVolume(snapshotSubvolumeName) {
-		err = btrfsSubVolumesDelete(snapshotSubvolumeName)
-		if err != nil {
-			return false, err
-		}
-	}
-
-	err = os.Rename(roSnapshotSubvolumeName, snapshotSubvolumeName)
-	if err != nil {
-		return false, err
-	}
-
-	logger.Debugf("Stopped BTRFS storage volume for snapshot \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-	return true, nil
-}
-
-// ContainerSnapshotRename renames a snapshot of a container.
-func (s *storageBtrfs) ContainerSnapshotRename(snapshotContainer instance.Instance, newName string) error {
-	logger.Debugf("Renaming BTRFS storage volume for snapshot \"%s\" from %s to %s", s.volume.Name, s.volume.Name, newName)
-
-	// The storage pool must be mounted.
-	_, err := s.StoragePoolMount()
-	if err != nil {
-		return err
-	}
-
-	// Unmount the snapshot if it is mounted otherwise we'll get EBUSY.
-	// Rename the subvolume on the storage pool.
-	oldSnapshotSubvolumeName := driver.GetSnapshotMountPoint(snapshotContainer.Project(), s.pool.Name, snapshotContainer.Name())
-	newSnapshotSubvolumeName := driver.GetSnapshotMountPoint(snapshotContainer.Project(), s.pool.Name, newName)
-	err = os.Rename(oldSnapshotSubvolumeName, newSnapshotSubvolumeName)
-	if err != nil {
-		return err
-	}
-
-	logger.Debugf("Renamed BTRFS storage volume for snapshot \"%s\" from %s to %s", s.volume.Name, s.volume.Name, newName)
-	return nil
-}
-
-// Needed for live migration where an empty snapshot needs to be created before
-// rsyncing into it.
-func (s *storageBtrfs) ContainerSnapshotCreateEmpty(snapshotContainer instance.Instance) error {
-	logger.Debugf("Creating empty BTRFS storage volume for snapshot \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-
-	// Mount the storage pool.
-	_, err := s.StoragePoolMount()
-	if err != nil {
-		return err
-	}
-
-	// Create the snapshot subvole path on the storage pool.
-	sourceName, _, _ := shared.InstanceGetParentAndSnapshotName(snapshotContainer.Name())
-	snapshotSubvolumePath := getSnapshotSubvolumePath(snapshotContainer.Project(), s.pool.Name, sourceName)
-	snapshotSubvolumeName := driver.GetSnapshotMountPoint(snapshotContainer.Project(), s.pool.Name, snapshotContainer.Name())
-	if !shared.PathExists(snapshotSubvolumePath) {
-		err := os.MkdirAll(snapshotSubvolumePath, driver.ContainersDirMode)
-		if err != nil {
-			return err
-		}
-	}
-
-	err = btrfsSubVolumeCreate(snapshotSubvolumeName)
-	if err != nil {
-		return err
-	}
-
-	snapshotMntPointSymlinkTarget := shared.VarPath("storage-pools", s.pool.Name, "containers-snapshots", project.Prefix(snapshotContainer.Project(), sourceName))
-	snapshotMntPointSymlink := shared.VarPath("snapshots", project.Prefix(snapshotContainer.Project(), sourceName))
-	if !shared.PathExists(snapshotMntPointSymlink) {
-		err := driver.CreateContainerMountpoint(snapshotMntPointSymlinkTarget, snapshotMntPointSymlink, snapshotContainer.IsPrivileged())
-		if err != nil {
-			return err
-		}
-	}
-
-	logger.Debugf("Created empty BTRFS storage volume for snapshot \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-	return nil
-}
-
-func (s *storageBtrfs) doBtrfsBackup(cur string, prev string, target string) error {
-	args := []string{"send"}
-	if prev != "" {
-		args = append(args, "-p", prev)
-	}
-	args = append(args, cur)
-
-	eater, err := os.OpenFile(target, os.O_RDWR|os.O_CREATE, 0644)
-	if err != nil {
-		return err
-	}
-	defer eater.Close()
-
-	btrfsSendCmd := exec.Command("btrfs", args...)
-	btrfsSendCmd.Stdout = eater
-
-	err = btrfsSendCmd.Run()
-	if err != nil {
-		return err
-	}
-
-	return err
-}
-
-func (s *storageBtrfs) doContainerBackupCreateOptimized(tmpPath string, backup backup.Backup, source instance.Instance) error {
-	// Handle snapshots
-	finalParent := ""
-	if !backup.InstanceOnly() {
-		snapshotsPath := fmt.Sprintf("%s/snapshots", tmpPath)
-
-		// Retrieve the snapshots
-		snapshots, err := source.Snapshots()
-		if err != nil {
-			return err
-		}
-
-		// Create the snapshot path
-		if len(snapshots) > 0 {
-			err = os.MkdirAll(snapshotsPath, 0711)
-			if err != nil {
-				return err
-			}
-		}
-
-		for i, snap := range snapshots {
-			_, snapName, _ := shared.InstanceGetParentAndSnapshotName(snap.Name())
-
-			// Figure out previous and current subvolumes
-			prev := ""
-			if i > 0 {
-				// /var/lib/lxd/storage-pools/<pool>/containers-snapshots/<container>/<snapshot>
-				prev = driver.GetSnapshotMountPoint(source.Project(), s.pool.Name, snapshots[i-1].Name())
-			}
-			cur := driver.GetSnapshotMountPoint(source.Project(), s.pool.Name, snap.Name())
-
-			// Make a binary btrfs backup
-			target := fmt.Sprintf("%s/%s.bin", snapshotsPath, snapName)
-			err := s.doBtrfsBackup(cur, prev, target)
-			if err != nil {
-				return err
-			}
-
-			finalParent = cur
-		}
-	}
-
-	// Make a temporary copy of the container
-	sourceVolume := driver.GetContainerMountPoint(source.Project(), s.pool.Name, source.Name())
-	containersPath := driver.GetContainerMountPoint("default", s.pool.Name, "")
-	tmpContainerMntPoint, err := ioutil.TempDir(containersPath, source.Name())
-	if err != nil {
-		return err
-	}
-	defer os.RemoveAll(tmpContainerMntPoint)
-
-	err = os.Chmod(tmpContainerMntPoint, 0100)
-	if err != nil {
-		return err
-	}
-
-	targetVolume := fmt.Sprintf("%s/.backup", tmpContainerMntPoint)
-	err = s.btrfsPoolVolumesSnapshot(sourceVolume, targetVolume, true, true)
-	if err != nil {
-		return err
-	}
-	defer btrfsSubVolumesDelete(targetVolume)
-
-	// Dump the container to a file
-	fsDump := fmt.Sprintf("%s/container.bin", tmpPath)
-	err = s.doBtrfsBackup(targetVolume, finalParent, fsDump)
-	if err != nil {
-		return err
-	}
-
-	return nil
-}
-
-func (s *storageBtrfs) doContainerBackupCreateVanilla(tmpPath string, backup backup.Backup, source instance.Instance) error {
-	// Prepare for rsync
-	rsync := func(oldPath string, newPath string, bwlimit string) error {
-		output, err := rsync.LocalCopy(oldPath, newPath, bwlimit, true)
-		if err != nil {
-			return fmt.Errorf("Failed to rsync: %s: %s", string(output), err)
-		}
-
-		return nil
-	}
-
-	bwlimit := s.pool.Config["rsync.bwlimit"]
-
-	// Handle snapshots
-	if !backup.InstanceOnly() {
-		snapshotsPath := fmt.Sprintf("%s/snapshots", tmpPath)
-
-		// Retrieve the snapshots
-		snapshots, err := source.Snapshots()
-		if err != nil {
-			return err
-		}
-
-		// Create the snapshot path
-		if len(snapshots) > 0 {
-			err = os.MkdirAll(snapshotsPath, 0711)
-			if err != nil {
-				return err
-			}
-		}
-
-		for _, snap := range snapshots {
-			_, snapName, _ := shared.InstanceGetParentAndSnapshotName(snap.Name())
-
-			// Mount the snapshot to a usable path
-			_, err := s.ContainerSnapshotStart(snap)
-			if err != nil {
-				return err
-			}
-
-			snapshotMntPoint := driver.GetSnapshotMountPoint(snap.Project(), s.pool.Name, snap.Name())
-			target := fmt.Sprintf("%s/%s", snapshotsPath, snapName)
-
-			// Copy the snapshot
-			err = rsync(snapshotMntPoint, target, bwlimit)
-			s.ContainerSnapshotStop(snap)
-			if err != nil {
-				return err
-			}
-		}
-	}
-
-	// Make a temporary copy of the container
-	sourceVolume := driver.GetContainerMountPoint(source.Project(), s.pool.Name, source.Name())
-	containersPath := driver.GetContainerMountPoint("default", s.pool.Name, "")
-	tmpContainerMntPoint, err := ioutil.TempDir(containersPath, source.Name())
-	if err != nil {
-		return err
-	}
-	defer os.RemoveAll(tmpContainerMntPoint)
-
-	err = os.Chmod(tmpContainerMntPoint, 0100)
-	if err != nil {
-		return err
-	}
-
-	targetVolume := fmt.Sprintf("%s/.backup", tmpContainerMntPoint)
-	err = s.btrfsPoolVolumesSnapshot(sourceVolume, targetVolume, true, true)
-	if err != nil {
-		return err
-	}
-	defer btrfsSubVolumesDelete(targetVolume)
-
-	// Copy the container
-	containerPath := fmt.Sprintf("%s/container", tmpPath)
-	err = rsync(targetVolume, containerPath, bwlimit)
-	if err != nil {
-		return err
-	}
-
-	return nil
-}
-
-func (s *storageBtrfs) ContainerBackupCreate(path string, backup backup.Backup, source instance.Instance) error {
-	var err error
-
-	// Generate the actual backup
-	if backup.OptimizedStorage() {
-		err = s.doContainerBackupCreateOptimized(path, backup, source)
-		if err != nil {
-			return err
-		}
-	} else {
-		err := s.doContainerBackupCreateVanilla(path, backup, source)
-		if err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (s *storageBtrfs) doContainerBackupLoadOptimized(info backup.Info, data io.ReadSeeker, tarArgs []string) error {
-	containerName, _, _ := shared.InstanceGetParentAndSnapshotName(info.Name)
-
-	containerMntPoint := driver.GetContainerMountPoint("default", s.pool.Name, "")
-	unpackDir, err := ioutil.TempDir(containerMntPoint, containerName)
-	if err != nil {
-		return err
-	}
-	defer os.RemoveAll(unpackDir)
-
-	err = os.Chmod(unpackDir, 0100)
-	if err != nil {
-		return err
-	}
-
-	unpackPath := fmt.Sprintf("%s/.backup_unpack", unpackDir)
-	err = os.MkdirAll(unpackPath, 0711)
-	if err != nil {
-		return err
-	}
-
-	// Prepare tar arguments
-	args := append(tarArgs, []string{
-		"-",
-		"--strip-components=1",
-		"-C", unpackPath, "backup",
-	}...)
-
-	// Extract container
-	data.Seek(0, 0)
-	err = shared.RunCommandWithFds(data, nil, "tar", args...)
-	if err != nil {
-		logger.Errorf("Failed to untar \"%s\" into \"%s\": %s", "backup", unpackPath, err)
-		return err
-	}
-
-	for _, snapshotOnlyName := range info.Snapshots {
-		snapshotBackup := fmt.Sprintf("%s/snapshots/%s.bin", unpackPath, snapshotOnlyName)
-		feeder, err := os.Open(snapshotBackup)
-		if err != nil {
-			return err
-		}
-
-		// create mountpoint
-		snapshotMntPoint := driver.GetSnapshotMountPoint(info.Project, s.pool.Name, containerName)
-		snapshotMntPointSymlinkTarget := shared.VarPath("storage-pools", s.pool.Name, "containers-snapshots", project.Prefix(info.Project, containerName))
-		snapshotMntPointSymlink := shared.VarPath("snapshots", project.Prefix(info.Project, containerName))
-		err = driver.CreateSnapshotMountpoint(snapshotMntPoint, snapshotMntPointSymlinkTarget, snapshotMntPointSymlink)
-		if err != nil {
-			feeder.Close()
-			return err
-		}
-
-		// /var/lib/lxd/storage-pools/<pool>/snapshots/<container>/
-		btrfsRecvCmd := exec.Command("btrfs", "receive", "-e", snapshotMntPoint)
-		btrfsRecvCmd.Stdin = feeder
-		msg, err := btrfsRecvCmd.CombinedOutput()
-		feeder.Close()
-		if err != nil {
-			logger.Errorf("Failed to receive contents of btrfs backup \"%s\": %s", snapshotBackup, string(msg))
-			return err
-		}
-	}
-
-	containerBackupFile := fmt.Sprintf("%s/container.bin", unpackPath)
-	feeder, err := os.Open(containerBackupFile)
-	if err != nil {
-		return err
-	}
-	defer feeder.Close()
-
-	// /var/lib/lxd/storage-pools/<pool>/containers/
-	btrfsRecvCmd := exec.Command("btrfs", "receive", "-vv", "-e", unpackDir)
-	btrfsRecvCmd.Stdin = feeder
-	msg, err := btrfsRecvCmd.CombinedOutput()
-	if err != nil {
-		logger.Errorf("Failed to receive contents of btrfs backup \"%s\": %s", containerBackupFile, string(msg))
-		return err
-	}
-	tmpContainerMntPoint := fmt.Sprintf("%s/.backup", unpackDir)
-	defer btrfsSubVolumesDelete(tmpContainerMntPoint)
-
-	containerMntPoint = driver.GetContainerMountPoint(info.Project, s.pool.Name, info.Name)
-	err = s.btrfsPoolVolumesSnapshot(tmpContainerMntPoint, containerMntPoint, false, true)
-	if err != nil {
-		logger.Errorf("Failed to create btrfs snapshot \"%s\" of \"%s\": %s", tmpContainerMntPoint, containerMntPoint, err)
-		return err
-	}
-
-	// Create mountpoints
-	err = driver.CreateContainerMountpoint(containerMntPoint, shared.VarPath("containers", project.Prefix(info.Project, info.Name)), info.Privileged)
-	if err != nil {
-		return err
-	}
-
-	return nil
-}
-
-func (s *storageBtrfs) doContainerBackupLoadVanilla(info backup.Info, data io.ReadSeeker, tarArgs []string) error {
-	// create the main container
-	err := s.doContainerCreate(info.Project, info.Name, info.Privileged)
-	if err != nil {
-		return err
-	}
-
-	containerMntPoint := driver.GetContainerMountPoint(info.Project, s.pool.Name, info.Name)
-	// Extract container
-	for _, snap := range info.Snapshots {
-		cur := fmt.Sprintf("backup/snapshots/%s", snap)
-
-		// Prepare tar arguments
-		args := append(tarArgs, []string{
-			"-",
-			"--recursive-unlink",
-			"--xattrs-include=*",
-			"--strip-components=3",
-			"-C", containerMntPoint, cur,
-		}...)
-
-		// Extract snapshots
-		data.Seek(0, 0)
-		err = shared.RunCommandWithFds(data, nil, "tar", args...)
-		if err != nil {
-			logger.Errorf("Failed to untar \"%s\" into \"%s\": %s", cur, containerMntPoint, err)
-			return err
-		}
-
-		// create snapshot
-		err = s.doContainerSnapshotCreate(info.Project, fmt.Sprintf("%s/%s", info.Name, snap), info.Name)
-		if err != nil {
-			return err
-		}
-	}
-
-	// Prepare tar arguments
-	args := append(tarArgs, []string{
-		"-",
-		"--strip-components=2",
-		"--xattrs-include=*",
-		"-C", containerMntPoint, "backup/container",
-	}...)
-
-	// Extract container
-	data.Seek(0, 0)
-	err = shared.RunCommandWithFds(data, nil, "tar", args...)
-	if err != nil {
-		logger.Errorf("Failed to untar \"backup/container\" into \"%s\": %s", containerMntPoint, err)
-		return err
-	}
-
-	return nil
-}
-
-func (s *storageBtrfs) ContainerBackupLoad(info backup.Info, data io.ReadSeeker, tarArgs []string) error {
-	logger.Debugf("Loading BTRFS storage volume for backup \"%s\" on storage pool \"%s\"", info.Name, s.pool.Name)
-
-	if info.OptimizedStorage {
-		return s.doContainerBackupLoadOptimized(info, data, tarArgs)
-	}
-
-	return s.doContainerBackupLoadVanilla(info, data, tarArgs)
-}
-
-func (s *storageBtrfs) ImageCreate(fingerprint string, tracker *ioprogress.ProgressTracker) error {
-	logger.Debugf("Creating BTRFS storage volume for image \"%s\" on storage pool \"%s\"", fingerprint, s.pool.Name)
-
-	// Create the subvolume.
-	source := s.pool.Config["source"]
-	if source == "" {
-		return fmt.Errorf("no \"source\" property found for the storage pool")
-	}
-
-	_, err := s.StoragePoolMount()
-	if err != nil {
-		return err
-	}
-
-	err = s.createImageDbPoolVolume(fingerprint)
-	if err != nil {
-		return err
-	}
-
-	// We can only create the btrfs subvolume under the mounted storage
-	// pool. The on-disk layout for images on a btrfs storage pool will thus
-	// be
-	// ${LXD_DIR}/storage-pools/<pool>/images/. The btrfs tool will
-	// complain if the intermediate path does not exist, so create it if it
-	// doesn't already.
-	imageSubvolumePath := s.getImageSubvolumePath(s.pool.Name)
-	if !shared.PathExists(imageSubvolumePath) {
-		err := os.MkdirAll(imageSubvolumePath, driver.ImagesDirMode)
-		if err != nil {
-			return err
-		}
-	}
-
-	// Create a temporary rw btrfs subvolume. From this rw subvolume we'll
-	// create a ro snapshot below. The path with which we do this is
-	// ${LXD_DIR}/storage-pools/<pool>/images/<fingerprint>@<pool>_tmp.
-	imageSubvolumeName := driver.GetImageMountPoint(s.pool.Name, fingerprint)
-	tmpImageSubvolumeName := fmt.Sprintf("%s_tmp", imageSubvolumeName)
-	err = btrfsSubVolumeCreate(tmpImageSubvolumeName)
-	if err != nil {
-		return err
-	}
-	// Delete volume on error.
-	undo := true
-	defer func() {
-		if undo {
-			btrfsSubVolumesDelete(tmpImageSubvolumeName)
-		}
-	}()
-
-	// Unpack the image in imageMntPoint.
-	imagePath := shared.VarPath("images", fingerprint)
-	err = driver.ImageUnpack(imagePath, tmpImageSubvolumeName, "", false, s.s.OS.RunningInUserNS, tracker)
-	if err != nil {
-		return err
-	}
-
-	// Now create a read-only snapshot of the subvolume.
-	// The path with which we do this is
-	// ${LXD_DIR}/storage-pools/<pool>/images/<fingerprint>.
-	err = s.btrfsPoolVolumesSnapshot(tmpImageSubvolumeName, imageSubvolumeName, true, true)
-	if err != nil {
-		return err
-	}
-
-	defer func() {
-		if undo {
-			btrfsSubVolumesDelete(imageSubvolumeName)
-		}
-	}()
-
-	err = btrfsSubVolumesDelete(tmpImageSubvolumeName)
-	if err != nil {
-		return err
-	}
-
-	undo = false
-
-	logger.Debugf("Created BTRFS storage volume for image \"%s\" on storage pool \"%s\"", fingerprint, s.pool.Name)
-	return nil
-}
-
-func (s *storageBtrfs) ImageDelete(fingerprint string) error {
-	logger.Debugf("Deleting BTRFS storage volume for image \"%s\" on storage pool \"%s\"", fingerprint, s.pool.Name)
-
-	_, err := s.StoragePoolMount()
-	if err != nil {
-		return err
-	}
-
-	// Delete the btrfs subvolume. The path with which we
-	// do this is ${LXD_DIR}/storage-pools/<pool>/images/<fingerprint>.
-	imageSubvolumeName := driver.GetImageMountPoint(s.pool.Name, fingerprint)
-	if shared.PathExists(imageSubvolumeName) && isBtrfsSubVolume(imageSubvolumeName) {
-		err = btrfsSubVolumesDelete(imageSubvolumeName)
-		if err != nil {
-			return err
-		}
-	}
-
-	err = s.deleteImageDbPoolVolume(fingerprint)
-	if err != nil {
-		return err
-	}
-
-	// Now delete the mountpoint for the image:
-	// ${LXD_DIR}/images/<fingerprint>.
-	if shared.PathExists(imageSubvolumeName) {
-		err := os.RemoveAll(imageSubvolumeName)
-		if err != nil && !os.IsNotExist(err) {
-			return err
-		}
-	}
-
-	logger.Debugf("Deleted BTRFS storage volume for image \"%s\" on storage pool \"%s\"", fingerprint, s.pool.Name)
-	return nil
-}
-
-func (s *storageBtrfs) ImageMount(fingerprint string) (bool, error) {
-	logger.Debugf("Mounting BTRFS storage volume for image \"%s\" on storage pool \"%s\"", fingerprint, s.pool.Name)
-
-	// The storage pool must be mounted.
-	_, err := s.StoragePoolMount()
-	if err != nil {
-		return false, err
-	}
-
-	logger.Debugf("Mounted BTRFS storage volume for image \"%s\" on storage pool \"%s\"", fingerprint, s.pool.Name)
-	return true, nil
-}
-
-func (s *storageBtrfs) ImageUmount(fingerprint string) (bool, error) {
-	return true, nil
-}
-
-func btrfsSubVolumeCreate(subvol string) error {
-	parentDestPath := filepath.Dir(subvol)
-	if !shared.PathExists(parentDestPath) {
-		err := os.MkdirAll(parentDestPath, 0711)
-		if err != nil {
-			return err
-		}
-	}
-
-	_, err := shared.RunCommand(
-		"btrfs",
-		"subvolume",
-		"create",
-		subvol)
-	if err != nil {
-		logger.Errorf("Failed to create BTRFS subvolume \"%s\": %v", subvol, err)
-		return err
-	}
-
-	return nil
-}
-
-var btrfsErrNoQuota = fmt.Errorf("Quotas disabled on filesystem")
-var btrfsErrNoQGroup = fmt.Errorf("Unable to find quota group")
-
-func btrfsSubVolumeQGroup(subvol string) (string, error) {
-	output, err := shared.RunCommand(
-		"btrfs",
-		"qgroup",
-		"show",
-		"-e",
-		"-f",
-		subvol)
-
-	if err != nil {
-		return "", btrfsErrNoQuota
-	}
-
-	var qgroup string
-	for _, line := range strings.Split(output, "\n") {
-		if line == "" || strings.HasPrefix(line, "qgroupid") || strings.HasPrefix(line, "---") {
-			continue
-		}
-
-		fields := strings.Fields(line)
-		if len(fields) != 4 {
-			continue
-		}
-
-		qgroup = fields[0]
-	}
-
-	if qgroup == "" {
-		return "", btrfsErrNoQGroup
-	}
-
-	return qgroup, nil
-}
-
-func (s *storageBtrfs) btrfsPoolVolumeQGroupUsage(subvol string) (int64, error) {
-	output, err := shared.RunCommand(
-		"btrfs",
-		"qgroup",
-		"show",
-		"-e",
-		"-f",
-		subvol)
-
-	if err != nil {
-		return -1, fmt.Errorf("BTRFS quotas not supported. Try enabling them with \"btrfs quota enable\"")
-	}
-
-	for _, line := range strings.Split(output, "\n") {
-		if line == "" || strings.HasPrefix(line, "qgroupid") || strings.HasPrefix(line, "---") {
-			continue
-		}
-
-		fields := strings.Fields(line)
-		if len(fields) != 4 {
-			continue
-		}
-
-		usage, err := strconv.ParseInt(fields[2], 10, 64)
-		if err != nil {
-			continue
-		}
-
-		return usage, nil
-	}
-
-	return -1, fmt.Errorf("Unable to find current qgroup usage")
-}
-
-func btrfsSubVolumeDelete(subvol string) error {
-	// Attempt (but don't fail on) to delete any qgroup on the subvolume
-	qgroup, err := btrfsSubVolumeQGroup(subvol)
-	if err == nil {
-		shared.RunCommand(
-			"btrfs",
-			"qgroup",
-			"destroy",
-			qgroup,
-			subvol)
-	}
-
-	// Attempt to make the subvolume writable
-	shared.RunCommand("btrfs", "property", "set", subvol, "ro", "false")
-
-	// Delete the subvolume itself
-	_, err = shared.RunCommand(
-		"btrfs",
-		"subvolume",
-		"delete",
-		subvol)
-
-	return err
-}
-
-// btrfsPoolVolumesDelete is the recursive variant on btrfsPoolVolumeDelete,
-// it first deletes subvolumes of the subvolume and then the
-// subvolume itself.
-func btrfsSubVolumesDelete(subvol string) error {
-	// Delete subsubvols.
-	subsubvols, err := btrfsSubVolumesGet(subvol)
-	if err != nil {
-		return err
-	}
-	sort.Sort(sort.Reverse(sort.StringSlice(subsubvols)))
-
-	for _, subsubvol := range subsubvols {
-		err := btrfsSubVolumeDelete(path.Join(subvol, subsubvol))
-		if err != nil {
-			return err
-		}
-	}
-
-	// Delete the subvol itself
-	err = btrfsSubVolumeDelete(subvol)
-	if err != nil {
-		return err
-	}
-
-	return nil
-}
-
-/*
- * btrfsSnapshot creates a snapshot of "source" to "dest"
- * the result will be readonly if "readonly" is True.
- */
-func btrfsSnapshot(s *state.State, source string, dest string, readonly bool) error {
-	var output string
-	var err error
-	if readonly && !s.OS.RunningInUserNS {
-		output, err = shared.RunCommand(
-			"btrfs",
-			"subvolume",
-			"snapshot",
-			"-r",
-			source,
-			dest)
-	} else {
-		output, err = shared.RunCommand(
-			"btrfs",
-			"subvolume",
-			"snapshot",
-			source,
-			dest)
-	}
-	if err != nil {
-		return fmt.Errorf(
-			"subvolume snapshot failed, source=%s, dest=%s, output=%s",
-			source,
-			dest,
-			output,
-		)
-	}
-
-	return err
-}
-
-func (s *storageBtrfs) btrfsPoolVolumeSnapshot(source string, dest string, readonly bool) error {
-	return btrfsSnapshot(s.s, source, dest, readonly)
-}
-
-func (s *storageBtrfs) btrfsPoolVolumesSnapshot(source string, dest string, readonly bool, recursive bool) error {
-	// Now snapshot all subvolumes of the root.
-	if recursive {
-		// Get a list of subvolumes of the root
-		subsubvols, err := btrfsSubVolumesGet(source)
-		if err != nil {
-			return err
-		}
-		sort.Sort(sort.StringSlice(subsubvols))
-
-		if len(subsubvols) > 0 && readonly {
-			// A root with subvolumes can never be readonly,
-			// also don't make subvolumes readonly.
-			readonly = false
-
-			logger.Warnf("Subvolumes detected, ignoring ro flag")
-		}
-
-		// First snapshot the root
-		err = s.btrfsPoolVolumeSnapshot(source, dest, readonly)
-		if err != nil {
-			return err
-		}
-
-		for _, subsubvol := range subsubvols {
-			// Clear the target for the subvol to use
-			os.Remove(path.Join(dest, subsubvol))
-
-			err := s.btrfsPoolVolumeSnapshot(path.Join(source, subsubvol), path.Join(dest, subsubvol), readonly)
-			if err != nil {
-				return err
-			}
-		}
-	} else {
-		err := s.btrfsPoolVolumeSnapshot(source, dest, readonly)
-		if err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-// isBtrfsSubVolume returns true if the given Path is a btrfs subvolume else
-// false.
-func isBtrfsSubVolume(subvolPath string) bool {
-	fs := unix.Stat_t{}
-	err := unix.Lstat(subvolPath, &fs)
-	if err != nil {
-		return false
-	}
-
-	// Check if BTRFS_FIRST_FREE_OBJECTID
-	if fs.Ino != 256 {
-		return false
-	}
-
-	return true
-}
-
-func isBtrfsFilesystem(path string) bool {
-	_, err := shared.RunCommand("btrfs", "filesystem", "show", path)
-	if err != nil {
-		return false
-	}
-
-	return true
-}
-
-func isOnBtrfs(path string) bool {
-	fs := unix.Statfs_t{}
-
-	err := unix.Statfs(path, &fs)
-	if err != nil {
-		return false
-	}
-
-	if fs.Type != util.FilesystemSuperMagicBtrfs {
-		return false
-	}
-
-	return true
-}
-
-func btrfsSubVolumeIsRo(path string) bool {
-	output, err := shared.RunCommand("btrfs", "property", "get", "-ts", path)
-	if err != nil {
-		return false
-	}
-
-	return strings.HasPrefix(string(output), "ro=true")
-}
-
-func btrfsSubVolumeMakeRo(path string) error {
-	_, err := shared.RunCommand("btrfs", "property", "set", "-ts", path, "ro", "true")
-	return err
-}
-
-func btrfsSubVolumeMakeRw(path string) error {
-	_, err := shared.RunCommand("btrfs", "property", "set", "-ts", path, "ro", "false")
-	return err
-}
-
-func btrfsSubVolumesGet(path string) ([]string, error) {
-	result := []string{}
-
-	if !strings.HasSuffix(path, "/") {
-		path = path + "/"
-	}
-
-	// Unprivileged users can't get to fs internals
-	filepath.Walk(path, func(fpath string, fi os.FileInfo, err error) error {
-		// Skip walk errors
-		if err != nil {
-			return nil
-		}
-
-		// Ignore the base path
-		if strings.TrimRight(fpath, "/") == strings.TrimRight(path, "/") {
-			return nil
-		}
-
-		// Subvolumes can only be directories
-		if !fi.IsDir() {
-			return nil
-		}
-
-		// Check if a btrfs subvolume
-		if isBtrfsSubVolume(fpath) {
-			result = append(result, strings.TrimPrefix(fpath, path))
-		}
-
-		return nil
-	})
-
-	return result, nil
-}
-
-func (s *storageBtrfs) MigrationType() migration.MigrationFSType {
-	if s.s.OS.RunningInUserNS {
-		return migration.MigrationFSType_RSYNC
-	}
-
-	return migration.MigrationFSType_BTRFS
-}
-
-func (s *storageBtrfs) PreservesInodes() bool {
-	if s.s.OS.RunningInUserNS {
-		return false
-	}
-
-	return true
-}
-
-func (s *storageBtrfs) MigrationSource(args MigrationSourceArgs) (MigrationStorageSourceDriver, error) {
-	if s.s.OS.RunningInUserNS {
-		return rsyncMigrationSource(args)
-	}
-
-	/* List all the snapshots in order of reverse creation. The idea here
-	 * is that we send the oldest to newest snapshot, hopefully saving on
-	 * xfer costs. Then, after all that, we send the container itself.
-	 */
-	var err error
-	var snapshots = []instance.Instance{}
-	if !args.InstanceOnly {
-		snapshots, err = args.Instance.Snapshots()
-		if err != nil {
-			return nil, err
-		}
-	}
-
-	sourceDriver := &btrfsMigrationSourceDriver{
-		container:          args.Instance,
-		snapshots:          snapshots,
-		btrfsSnapshotNames: []string{},
-		btrfs:              s,
-	}
-
-	if !args.InstanceOnly {
-		for _, snap := range snapshots {
-			btrfsPath := driver.GetSnapshotMountPoint(snap.Project(), s.pool.Name, snap.Name())
-			sourceDriver.btrfsSnapshotNames = append(sourceDriver.btrfsSnapshotNames, btrfsPath)
-		}
-	}
-
-	return sourceDriver, nil
-}
-
-func (s *storageBtrfs) MigrationSink(conn *websocket.Conn, op *operations.Operation, args MigrationSinkArgs) error {
-	if s.s.OS.RunningInUserNS {
-		return rsyncMigrationSink(conn, op, args)
-	}
-
-	btrfsRecv := func(snapName string, btrfsPath string, targetPath string, isSnapshot bool, writeWrapper func(io.WriteCloser) io.WriteCloser) error {
-		args := []string{"receive", "-e", btrfsPath}
-		cmd := exec.Command("btrfs", args...)
-
-		// Remove the existing pre-created subvolume
-		err := btrfsSubVolumesDelete(targetPath)
-		if err != nil {
-			logger.Errorf("Failed to delete pre-created BTRFS subvolume: %s: %v", btrfsPath, err)
-			return err
-		}
-
-		stdin, err := cmd.StdinPipe()
-		if err != nil {
-			return err
-		}
-
-		stderr, err := cmd.StderrPipe()
-		if err != nil {
-			return err
-		}
-
-		err = cmd.Start()
-		if err != nil {
-			return err
-		}
-
-		writePipe := io.WriteCloser(stdin)
-		if writeWrapper != nil {
-			writePipe = writeWrapper(stdin)
-		}
-
-		<-shared.WebsocketRecvStream(writePipe, conn)
-
-		output, err := ioutil.ReadAll(stderr)
-		if err != nil {
-			logger.Debugf("Problem reading btrfs receive stderr %s", err)
-		}
-
-		err = cmd.Wait()
-		if err != nil {
-			logger.Errorf("Problem with btrfs receive: %s", string(output))
-			return err
-		}
-
-		receivedSnapshot := fmt.Sprintf("%s/.migration-send", btrfsPath)
-		// handle older lxd versions
-		if !shared.PathExists(receivedSnapshot) {
-			receivedSnapshot = fmt.Sprintf("%s/.root", btrfsPath)
-		}
-		if isSnapshot {
-			receivedSnapshot = fmt.Sprintf("%s/%s", btrfsPath, snapName)
-			err = s.btrfsPoolVolumesSnapshot(receivedSnapshot, targetPath, true, true)
-		} else {
-			err = s.btrfsPoolVolumesSnapshot(receivedSnapshot, targetPath, false, true)
-		}
-		if err != nil {
-			logger.Errorf("Problem with btrfs snapshot: %s", err)
-			return err
-		}
-
-		err = btrfsSubVolumesDelete(receivedSnapshot)
-		if err != nil {
-			logger.Errorf("Failed to delete BTRFS subvolume \"%s\": %s", btrfsPath, err)
-			return err
-		}
-
-		return nil
-	}
-
-	instanceName := args.Instance.Name()
-
-	if args.Instance.Type() != instancetype.Container {
-		return fmt.Errorf("Instance type must be container")
-	}
-
-	ct := args.Instance.(*containerLXC)
-
-	_, instancePool, _ := ct.Storage().GetContainerPoolInfo()
-	containersPath := driver.GetSnapshotMountPoint(args.Instance.Project(), instancePool, instanceName)
-	if !args.InstanceOnly && len(args.Snapshots) > 0 {
-		err := os.MkdirAll(containersPath, driver.ContainersDirMode)
-		if err != nil {
-			return err
-		}
-
-		snapshotMntPointSymlinkTarget := shared.VarPath("storage-pools", instancePool, "containers-snapshots", project.Prefix(args.Instance.Project(), instanceName))
-		snapshotMntPointSymlink := shared.VarPath("snapshots", project.Prefix(args.Instance.Project(), instanceName))
-		if !shared.PathExists(snapshotMntPointSymlink) {
-			err := os.Symlink(snapshotMntPointSymlinkTarget, snapshotMntPointSymlink)
-			if err != nil {
-				return err
-			}
-		}
-	}
-
-	// At this point we have already figured out the parent
-	// instances's root disk device so we can simply
-	// retrieve it from the expanded devices.
-	parentStoragePool := ""
-	parentExpandedDevices := args.Instance.ExpandedDevices()
-	parentLocalRootDiskDeviceKey, parentLocalRootDiskDevice, _ := shared.GetRootDiskDevice(parentExpandedDevices.CloneNative())
-	if parentLocalRootDiskDeviceKey != "" {
-		parentStoragePool = parentLocalRootDiskDevice["pool"]
-	}
-
-	// A little neuroticism.
-	if parentStoragePool == "" {
-		return fmt.Errorf("Detected that the container's root device is missing the pool property during BTRFS migration")
-	}
-
-	if !args.InstanceOnly {
-		for _, snap := range args.Snapshots {
-			ctArgs := snapshotProtobufToInstanceArgs(args.Instance.Project(), instanceName, snap)
-
-			// Ensure that snapshot and parent container have the
-			// same storage pool in their local root disk device.
-			// If the root disk device for the snapshot comes from a
-			// profile on the new instance as well we don't need to
-			// do anything.
-			if ctArgs.Devices != nil {
-				snapLocalRootDiskDeviceKey, _, _ := shared.GetRootDiskDevice(ctArgs.Devices.CloneNative())
-				if snapLocalRootDiskDeviceKey != "" {
-					ctArgs.Devices[snapLocalRootDiskDeviceKey]["pool"] = parentStoragePool
-				}
-			}
-
-			snapshotMntPoint := driver.GetSnapshotMountPoint(args.Instance.Project(), instancePool, ctArgs.Name)
-			_, err := containerCreateEmptySnapshot(args.Instance.DaemonState(), ctArgs)
-			if err != nil {
-				return err
-			}
-
-			snapshotMntPointSymlinkTarget := shared.VarPath("storage-pools", s.pool.Name, "containers-snapshots", project.Prefix(args.Instance.Project(), instanceName))
-			snapshotMntPointSymlink := shared.VarPath("snapshots", project.Prefix(args.Instance.Project(), instanceName))
-			err = driver.CreateSnapshotMountpoint(snapshotMntPoint, snapshotMntPointSymlinkTarget, snapshotMntPointSymlink)
-			if err != nil {
-				return err
-			}
-
-			tmpSnapshotMntPoint, err := ioutil.TempDir(containersPath, project.Prefix(args.Instance.Project(), instanceName))
-			if err != nil {
-				return err
-			}
-			defer os.RemoveAll(tmpSnapshotMntPoint)
-
-			err = os.Chmod(tmpSnapshotMntPoint, 0100)
-			if err != nil {
-				return err
-			}
-
-			wrapper := migration.ProgressWriter(op, "fs_progress", *snap.Name)
-			err = btrfsRecv(*(snap.Name), tmpSnapshotMntPoint, snapshotMntPoint, true, wrapper)
-			if err != nil {
-				return err
-			}
-		}
-	}
-
-	/* finally, do the real instance */
-	containersMntPoint := driver.GetContainerMountPoint("default", s.pool.Name, "")
-	tmpContainerMntPoint, err := ioutil.TempDir(containersMntPoint, project.Prefix(args.Instance.Project(), instanceName))
-	if err != nil {
-		return err
-	}
-	defer os.RemoveAll(tmpContainerMntPoint)
-
-	err = os.Chmod(tmpContainerMntPoint, 0100)
-	if err != nil {
-		return err
-	}
-
-	wrapper := migration.ProgressWriter(op, "fs_progress", instanceName)
-	containerMntPoint := driver.GetContainerMountPoint(args.Instance.Project(), s.pool.Name, instanceName)
-	err = btrfsRecv("", tmpContainerMntPoint, containerMntPoint, false, wrapper)
-	if err != nil {
-		return err
-	}
-
-	if args.Live {
-		err = btrfsRecv("", tmpContainerMntPoint, containerMntPoint, false, wrapper)
-		if err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (s *storageBtrfs) btrfsLookupFsUUID(fs string) (string, error) {
-	output, err := shared.RunCommand(
-		"btrfs",
-		"filesystem",
-		"show",
-		"--raw",
-		fs)
-	if err != nil {
-		return "", fmt.Errorf("failed to detect UUID")
-	}
-
-	outputString := output
-	idx := strings.Index(outputString, "uuid: ")
-	outputString = outputString[idx+6:]
-	outputString = strings.TrimSpace(outputString)
-	idx = strings.Index(outputString, "\t")
-	outputString = outputString[:idx]
-	outputString = strings.Trim(outputString, "\n")
-
-	return outputString, nil
-}
-
-func (s *storageBtrfs) StorageEntitySetQuota(volumeType int, size int64, data interface{}) error {
-	logger.Debugf(`Setting BTRFS quota for "%s"`, s.volume.Name)
-
-	var c instance.Instance
-	var subvol string
-	switch volumeType {
-	case storagePoolVolumeTypeContainer:
-		c = data.(instance.Instance)
-		subvol = driver.GetContainerMountPoint(c.Project(), s.pool.Name, c.Name())
-	case storagePoolVolumeTypeCustom:
-		subvol = driver.GetStoragePoolVolumeMountPoint(s.pool.Name, s.volume.Name)
-	}
-
-	qgroup, err := btrfsSubVolumeQGroup(subvol)
-	if err != nil && !s.s.OS.RunningInUserNS {
-		var output string
-
-		if err == btrfsErrNoQuota {
-			// Enable quotas
-			poolMntPoint := driver.GetStoragePoolMountPoint(s.pool.Name)
-
-			_, err = shared.RunCommand("btrfs", "quota", "enable", poolMntPoint)
-			if err != nil {
-				return fmt.Errorf("Failed to enable quotas on BTRFS pool: %v", err)
-			}
-
-			// Retry
-			qgroup, err = btrfsSubVolumeQGroup(subvol)
-		}
-
-		if err == btrfsErrNoQGroup {
-			// Find the volume ID
-			_, err = shared.RunCommand("btrfs", "subvolume", "show", subvol)
-			if err != nil {
-				return fmt.Errorf("Failed to get subvol information: %v", err)
-			}
-
-			id := ""
-			for _, line := range strings.Split(output, "\n") {
-				line = strings.TrimSpace(line)
-				if strings.HasPrefix(line, "Subvolume ID:") {
-					fields := strings.Split(line, ":")
-					id = strings.TrimSpace(fields[len(fields)-1])
-				}
-			}
-
-			if id == "" {
-				return fmt.Errorf("Failed to find subvolume id")
-			}
-
-			// Create qgroup
-			_, err = shared.RunCommand("btrfs", "qgroup", "create", fmt.Sprintf("0/%s", id), subvol)
-			if err != nil {
-				return fmt.Errorf("Failed to create missing qgroup: %v", err)
-			}
-
-			// Retry
-			qgroup, err = btrfsSubVolumeQGroup(subvol)
-		}
-
-		if err != nil {
-			return err
-		}
-	}
-
-	// Attempt to make the subvolume writable
-	shared.RunCommand("btrfs", "property", "set", subvol, "ro", "false")
-	if size > 0 {
-		_, err := shared.RunCommand(
-			"btrfs",
-			"qgroup",
-			"limit",
-			"-e", fmt.Sprintf("%d", size),
-			subvol)
-
-		if err != nil {
-			return fmt.Errorf("Failed to set btrfs quota: %v", err)
-		}
-	} else if qgroup != "" {
-		_, err := shared.RunCommand(
-			"btrfs",
-			"qgroup",
-			"destroy",
-			qgroup,
-			subvol)
-
-		if err != nil {
-			return fmt.Errorf("Failed to set btrfs quota: %v", err)
-		}
-	}
-
-	logger.Debugf(`Set BTRFS quota for "%s"`, s.volume.Name)
-	return nil
-}
-
-func (s *storageBtrfs) StoragePoolResources() (*api.ResourcesStoragePool, error) {
-	ourMount, err := s.StoragePoolMount()
-	if err != nil {
-		return nil, err
-	}
-	if ourMount {
-		defer s.StoragePoolUmount()
-	}
-
-	poolMntPoint := driver.GetStoragePoolMountPoint(s.pool.Name)
-
-	// Inode allocation is dynamic so no use in reporting them.
-
-	return driver.GetStorageResource(poolMntPoint)
-}
-
-func (s *storageBtrfs) StoragePoolVolumeCopy(source *api.StorageVolumeSource) error {
-	logger.Infof("Copying BTRFS storage volume \"%s\" on storage pool \"%s\" as \"%s\" to storage pool \"%s\"", source.Name, source.Pool, s.volume.Name, s.pool.Name)
-	successMsg := fmt.Sprintf("Copied BTRFS storage volume \"%s\" on storage pool \"%s\" as \"%s\" to storage pool \"%s\"", source.Name, source.Pool, s.volume.Name, s.pool.Name)
-
-	// The storage pool needs to be mounted.
-	_, err := s.StoragePoolMount()
-	if err != nil {
-		return err
-	}
-
-	if s.pool.Name != source.Pool {
-		return s.doCrossPoolVolumeCopy(source.Pool, source.Name, source.VolumeOnly)
-	}
-
-	err = s.copyVolume(source.Pool, source.Name, s.volume.Name, source.VolumeOnly)
-	if err != nil {
-		logger.Errorf("Failed to create BTRFS storage volume \"%s\" on storage pool \"%s\": %s", s.volume.Name, s.pool.Name, err)
-		return err
-	}
-
-	if source.VolumeOnly {
-		logger.Infof(successMsg)
-		return nil
-	}
-
-	subvols, err := btrfsSubVolumesGet(s.getCustomSnapshotSubvolumePath(source.Pool))
-	if err != nil {
-		return err
-	}
-
-	for _, snapOnlyName := range subvols {
-		snap := fmt.Sprintf("%s/%s", source.Name, snapOnlyName)
-
-		err := s.copyVolume(source.Pool, snap, fmt.Sprintf("%s/%s", s.volume.Name, snapOnlyName), false)
-		if err != nil {
-			logger.Errorf("Failed to create BTRFS storage volume \"%s\" on storage pool \"%s\": %s", s.volume.Name, s.pool.Name, err)
-			return err
-		}
-	}
-
-	logger.Infof(successMsg)
-	return nil
-}
-
-func (s *storageBtrfs) copyVolume(sourcePool string, sourceName string, targetName string, volumeOnly bool) error {
-	var customDir string
-	var srcMountPoint string
-	var dstMountPoint string
-
-	isSrcSnapshot := shared.IsSnapshot(sourceName)
-	isDstSnapshot := shared.IsSnapshot(targetName)
-
-	if isSrcSnapshot {
-		srcMountPoint = driver.GetStoragePoolVolumeSnapshotMountPoint(sourcePool, sourceName)
-	} else {
-		srcMountPoint = driver.GetStoragePoolVolumeMountPoint(sourcePool, sourceName)
-	}
-
-	if isDstSnapshot {
-		dstMountPoint = driver.GetStoragePoolVolumeSnapshotMountPoint(s.pool.Name, targetName)
-	} else {
-		dstMountPoint = driver.GetStoragePoolVolumeMountPoint(s.pool.Name, targetName)
-	}
-
-	// Ensure that the directories immediately preceding the subvolume directory exist.
-	if isDstSnapshot {
-		volName, _, _ := shared.InstanceGetParentAndSnapshotName(targetName)
-		customDir = driver.GetStoragePoolVolumeSnapshotMountPoint(s.pool.Name, volName)
-	} else {
-		customDir = driver.GetStoragePoolVolumeMountPoint(s.pool.Name, "")
-	}
-
-	if !shared.PathExists(customDir) {
-		err := os.MkdirAll(customDir, driver.CustomDirMode)
-		if err != nil {
-			logger.Errorf("Failed to create directory \"%s\" for storage volume \"%s\" on storage pool \"%s\": %s", customDir, s.volume.Name, s.pool.Name, err)
-			return err
-		}
-	}
-
-	err := s.btrfsPoolVolumesSnapshot(srcMountPoint, dstMountPoint, false, true)
-	if err != nil {
-		logger.Errorf("Failed to create BTRFS snapshot for storage volume \"%s\" on storage pool \"%s\": %s", s.volume.Name, s.pool.Name, err)
-		return err
-	}
-
-	return nil
-}
-
-func (s *storageBtrfs) doCrossPoolVolumeCopy(sourcePool string, sourceName string, volumeOnly bool) error {
-	// setup storage for the source volume
-	srcStorage, err := storagePoolVolumeInit(s.s, "default", sourcePool, sourceName, storagePoolVolumeTypeCustom)
-	if err != nil {
-		return err
-	}
-
-	ourMount, err := srcStorage.StoragePoolMount()
-	if err != nil {
-		return err
-	}
-	if ourMount {
-		defer srcStorage.StoragePoolUmount()
-	}
-
-	err = s.StoragePoolVolumeCreate()
-	if err != nil {
-		return err
-	}
-
-	destVolumeMntPoint := driver.GetStoragePoolVolumeMountPoint(s.pool.Name, s.volume.Name)
-	bwlimit := s.pool.Config["rsync.bwlimit"]
-
-	if !volumeOnly {
-		// Handle snapshots
-		snapshots, err := driver.VolumeSnapshotsGet(s.s, sourcePool, sourceName, storagePoolVolumeTypeCustom)
-		if err != nil {
-			return err
-		}
-
-		for _, snap := range snapshots {
-			srcSnapshotMntPoint := driver.GetStoragePoolVolumeSnapshotMountPoint(sourcePool, snap.Name)
-
-			_, err = rsync.LocalCopy(srcSnapshotMntPoint, destVolumeMntPoint, bwlimit, true)
-			if err != nil {
-				logger.Errorf("Failed to rsync into BTRFS storage volume \"%s\" on storage pool \"%s\": %s", s.volume.Name, s.pool.Name, err)
-				return err
-			}
-
-			// create snapshot
-			_, snapOnlyName, _ := shared.InstanceGetParentAndSnapshotName(snap.Name)
-
-			err = s.doVolumeSnapshotCreate(s.pool.Name, s.volume.Name, fmt.Sprintf("%s/%s", s.volume.Name, snapOnlyName))
-			if err != nil {
-				return err
-			}
-		}
-	}
-
-	var srcVolumeMntPoint string
-
-	if shared.IsSnapshot(sourceName) {
-		// copy snapshot to volume
-		srcVolumeMntPoint = driver.GetStoragePoolVolumeSnapshotMountPoint(sourcePool, sourceName)
-	} else {
-		// copy volume to volume
-		srcVolumeMntPoint = driver.GetStoragePoolVolumeMountPoint(sourcePool, sourceName)
-	}
-
-	_, err = rsync.LocalCopy(srcVolumeMntPoint, destVolumeMntPoint, bwlimit, true)
-	if err != nil {
-		logger.Errorf("Failed to rsync into BTRFS storage volume \"%s\" on storage pool \"%s\": %s", s.volume.Name, s.pool.Name, err)
-		return err
-	}
-
-	return nil
-}
-
-func (s *storageBtrfs) StorageMigrationSource(args MigrationSourceArgs) (MigrationStorageSourceDriver, error) {
-	return rsyncStorageMigrationSource(args)
-}
-
-func (s *storageBtrfs) StorageMigrationSink(conn *websocket.Conn, op *operations.Operation, args MigrationSinkArgs) error {
-	return rsyncStorageMigrationSink(conn, op, args)
-}
-
-func (s *storageBtrfs) StoragePoolVolumeSnapshotCreate(target *api.StorageVolumeSnapshotsPost) error {
-	logger.Infof("Creating BTRFS storage volume snapshot \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-
-	err := s.doVolumeSnapshotCreate(s.pool.Name, s.volume.Name, target.Name)
-	if err != nil {
-		return err
-	}
-
-	logger.Infof("Created BTRFS storage volume snapshot \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-	return nil
-}
-
-func (s *storageBtrfs) doVolumeSnapshotCreate(sourcePool string, sourceName string, targetName string) error {
-	// Create subvolume path on the storage pool.
-	customSubvolumePath := s.getCustomSubvolumePath(s.pool.Name)
-
-	err := os.MkdirAll(customSubvolumePath, 0700)
-	if err != nil && !os.IsNotExist(err) {
-		return err
-	}
-
-	_, _, ok := shared.InstanceGetParentAndSnapshotName(targetName)
-	if !ok {
-		return err
-	}
-
-	customSnapshotSubvolumeName := driver.GetStoragePoolVolumeSnapshotMountPoint(s.pool.Name, s.volume.Name)
-
-	err = os.MkdirAll(customSnapshotSubvolumeName, driver.SnapshotsDirMode)
-	if err != nil && !os.IsNotExist(err) {
-		return err
-	}
-
-	sourcePath := driver.GetStoragePoolVolumeMountPoint(sourcePool, sourceName)
-	targetPath := driver.GetStoragePoolVolumeSnapshotMountPoint(s.pool.Name, targetName)
-
-	return s.btrfsPoolVolumesSnapshot(sourcePath, targetPath, true, true)
-}
-
-func (s *storageBtrfs) StoragePoolVolumeSnapshotDelete() error {
-	logger.Infof("Deleting BTRFS storage volume snapshot \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-
-	source := s.pool.Config["source"]
-	if source == "" {
-		return fmt.Errorf("no \"source\" property found for the storage pool")
-	}
-
-	snapshotSubvolumeName := driver.GetStoragePoolVolumeSnapshotMountPoint(s.pool.Name, s.volume.Name)
-	if shared.PathExists(snapshotSubvolumeName) && isBtrfsSubVolume(snapshotSubvolumeName) {
-		err := btrfsSubVolumesDelete(snapshotSubvolumeName)
-		if err != nil {
-			return err
-		}
-	}
-
-	err := os.RemoveAll(snapshotSubvolumeName)
-	if err != nil && !os.IsNotExist(err) {
-		return err
-	}
-
-	sourceName, _, _ := shared.InstanceGetParentAndSnapshotName(s.volume.Name)
-	storageVolumeSnapshotPath := driver.GetStoragePoolVolumeSnapshotMountPoint(s.pool.Name, sourceName)
-	empty, err := shared.PathIsEmpty(storageVolumeSnapshotPath)
-	if err == nil && empty {
-		err := os.RemoveAll(storageVolumeSnapshotPath)
-		if err != nil && !os.IsNotExist(err) {
-			return err
-		}
-	}
-
-	err = s.s.Cluster.StoragePoolVolumeDelete(
-		"default",
-		s.volume.Name,
-		storagePoolVolumeTypeCustom,
-		s.poolID)
-	if err != nil {
-		logger.Errorf(`Failed to delete database entry for BTRFS storage volume "%s" on storage pool "%s"`,
-			s.volume.Name, s.pool.Name)
-	}
-
-	logger.Infof("Deleted BTRFS storage volume snapshot \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-	return nil
-}
-
-func (s *storageBtrfs) StoragePoolVolumeSnapshotRename(newName string) error {
-	sourceName, _, ok := shared.InstanceGetParentAndSnapshotName(s.volume.Name)
-	fullSnapshotName := fmt.Sprintf("%s%s%s", sourceName, shared.SnapshotDelimiter, newName)
-
-	logger.Infof("Renaming BTRFS storage volume on storage pool \"%s\" from \"%s\" to \"%s\"", s.pool.Name, s.volume.Name, fullSnapshotName)
-	if !ok {
-		return fmt.Errorf("Not a snapshot name")
-	}
-
-	oldPath := driver.GetStoragePoolVolumeSnapshotMountPoint(s.pool.Name, s.volume.Name)
-	newPath := driver.GetStoragePoolVolumeSnapshotMountPoint(s.pool.Name, fullSnapshotName)
-
-	err := os.Rename(oldPath, newPath)
-	if err != nil {
-		return err
-	}
-
-	logger.Infof("Renamed BTRFS storage volume on storage pool \"%s\" from \"%s\" to \"%s\"", s.pool.Name, s.volume.Name, fullSnapshotName)
-
-	return s.s.Cluster.StoragePoolVolumeRename("default", s.volume.Name, fullSnapshotName, storagePoolVolumeTypeCustom, s.poolID)
-}
diff --git a/lxd/storage_migration_btrfs.go b/lxd/storage_migration_btrfs.go
deleted file mode 100644
index 1f52f3076e..0000000000
--- a/lxd/storage_migration_btrfs.go
+++ /dev/null
@@ -1,195 +0,0 @@
-package main
-
-import (
-	"fmt"
-	"io"
-	"io/ioutil"
-	"os"
-	"os/exec"
-
-	"github.com/gorilla/websocket"
-
-	"github.com/lxc/lxd/lxd/instance"
-	"github.com/lxc/lxd/lxd/instance/instancetype"
-	"github.com/lxc/lxd/lxd/migration"
-	"github.com/lxc/lxd/lxd/operations"
-	driver "github.com/lxc/lxd/lxd/storage"
-	"github.com/lxc/lxd/shared"
-	"github.com/lxc/lxd/shared/logger"
-)
-
-type btrfsMigrationSourceDriver struct {
-	container          instance.Instance
-	snapshots          []instance.Instance
-	btrfsSnapshotNames []string
-	btrfs              *storageBtrfs
-	runningSnapName    string
-	stoppedSnapName    string
-}
-
-func (s *btrfsMigrationSourceDriver) send(conn *websocket.Conn, btrfsPath string, btrfsParent string, readWrapper func(io.ReadCloser) io.ReadCloser) error {
-	args := []string{"send"}
-	if btrfsParent != "" {
-		args = append(args, "-p", btrfsParent)
-	}
-	args = append(args, btrfsPath)
-
-	cmd := exec.Command("btrfs", args...)
-
-	stdout, err := cmd.StdoutPipe()
-	if err != nil {
-		return err
-	}
-
-	readPipe := io.ReadCloser(stdout)
-	if readWrapper != nil {
-		readPipe = readWrapper(stdout)
-	}
-
-	stderr, err := cmd.StderrPipe()
-	if err != nil {
-		return err
-	}
-
-	err = cmd.Start()
-	if err != nil {
-		return err
-	}
-
-	<-shared.WebsocketSendStream(conn, readPipe, 4*1024*1024)
-
-	output, err := ioutil.ReadAll(stderr)
-	if err != nil {
-		logger.Errorf("Problem reading btrfs send stderr: %s", err)
-	}
-
-	err = cmd.Wait()
-	if err != nil {
-		logger.Errorf("Problem with btrfs send: %s", string(output))
-	}
-
-	return err
-}
-
-func (s *btrfsMigrationSourceDriver) SendWhileRunning(conn *websocket.Conn, op *operations.Operation, bwlimit string, containerOnly bool) error {
-	if s.container.Type() != instancetype.Container {
-		return fmt.Errorf("Instance type must be container")
-	}
-
-	ct := s.container.(*containerLXC)
-
-	_, containerPool, _ := ct.Storage().GetContainerPoolInfo()
-	containerName := s.container.Name()
-	containersPath := driver.GetContainerMountPoint("default", containerPool, "")
-	sourceName := containerName
-
-	// Deal with sending a snapshot to create a container on another LXD
-	// instance.
-	if s.container.IsSnapshot() {
-		sourceName, _, _ := shared.InstanceGetParentAndSnapshotName(containerName)
-		snapshotsPath := driver.GetSnapshotMountPoint(s.container.Project(), containerPool, sourceName)
-		tmpContainerMntPoint, err := ioutil.TempDir(snapshotsPath, sourceName)
-		if err != nil {
-			return err
-		}
-		defer os.RemoveAll(tmpContainerMntPoint)
-
-		err = os.Chmod(tmpContainerMntPoint, 0100)
-		if err != nil {
-			return err
-		}
-
-		migrationSendSnapshot := fmt.Sprintf("%s/.migration-send", tmpContainerMntPoint)
-		snapshotMntPoint := driver.GetSnapshotMountPoint(s.container.Project(), containerPool, containerName)
-		err = s.btrfs.btrfsPoolVolumesSnapshot(snapshotMntPoint, migrationSendSnapshot, true, true)
-		if err != nil {
-			return err
-		}
-		defer btrfsSubVolumesDelete(migrationSendSnapshot)
-
-		wrapper := migration.ProgressReader(op, "fs_progress", containerName)
-		return s.send(conn, migrationSendSnapshot, "", wrapper)
-	}
-
-	if !containerOnly {
-		for i, snap := range s.snapshots {
-			prev := ""
-			if i > 0 {
-				prev = driver.GetSnapshotMountPoint(snap.Project(), containerPool, s.snapshots[i-1].Name())
-			}
-
-			snapMntPoint := driver.GetSnapshotMountPoint(snap.Project(), containerPool, snap.Name())
-			wrapper := migration.ProgressReader(op, "fs_progress", snap.Name())
-			if err := s.send(conn, snapMntPoint, prev, wrapper); err != nil {
-				return err
-			}
-		}
-	}
-
-	tmpContainerMntPoint, err := ioutil.TempDir(containersPath, containerName)
-	if err != nil {
-		return err
-	}
-	defer os.RemoveAll(tmpContainerMntPoint)
-
-	err = os.Chmod(tmpContainerMntPoint, 0100)
-	if err != nil {
-		return err
-	}
-
-	migrationSendSnapshot := fmt.Sprintf("%s/.migration-send", tmpContainerMntPoint)
-	containerMntPoint := driver.GetContainerMountPoint(s.container.Project(), containerPool, sourceName)
-	err = s.btrfs.btrfsPoolVolumesSnapshot(containerMntPoint, migrationSendSnapshot, true, true)
-	if err != nil {
-		return err
-	}
-	defer btrfsSubVolumesDelete(migrationSendSnapshot)
-
-	btrfsParent := ""
-	if len(s.btrfsSnapshotNames) > 0 {
-		btrfsParent = s.btrfsSnapshotNames[len(s.btrfsSnapshotNames)-1]
-	}
-
-	wrapper := migration.ProgressReader(op, "fs_progress", containerName)
-	return s.send(conn, migrationSendSnapshot, btrfsParent, wrapper)
-}
-
-func (s *btrfsMigrationSourceDriver) SendAfterCheckpoint(conn *websocket.Conn, bwlimit string) error {
-	tmpPath := driver.GetSnapshotMountPoint(s.container.Project(), s.btrfs.pool.Name,
-		fmt.Sprintf("%s/.migration-send", s.container.Name()))
-	err := os.MkdirAll(tmpPath, 0711)
-	if err != nil {
-		return err
-	}
-
-	err = os.Chmod(tmpPath, 0100)
-	if err != nil {
-		return err
-	}
-
-	s.stoppedSnapName = fmt.Sprintf("%s/.root", tmpPath)
-	parentName, _, _ := shared.InstanceGetParentAndSnapshotName(s.container.Name())
-	containerMntPt := driver.GetContainerMountPoint(s.container.Project(), s.btrfs.pool.Name, parentName)
-	err = s.btrfs.btrfsPoolVolumesSnapshot(containerMntPt, s.stoppedSnapName, true, true)
-	if err != nil {
-		return err
-	}
-
-	return s.send(conn, s.stoppedSnapName, s.runningSnapName, nil)
-}
-
-func (s *btrfsMigrationSourceDriver) Cleanup() {
-	if s.stoppedSnapName != "" {
-		btrfsSubVolumesDelete(s.stoppedSnapName)
-	}
-
-	if s.runningSnapName != "" {
-		btrfsSubVolumesDelete(s.runningSnapName)
-	}
-}
-
-func (s *btrfsMigrationSourceDriver) SendStorageVolume(conn *websocket.Conn, op *operations.Operation, bwlimit string, storage storage, volumeOnly bool) error {
-	msg := fmt.Sprintf("Function not implemented")
-	logger.Errorf(msg)
-	return fmt.Errorf(msg)
-}

From fb47b97c17b9804feb4040d6a328d530ef678beb Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?St=C3=A9phane=20Graber?= <stgraber at ubuntu.com>
Date: Wed, 8 Jan 2020 16:34:52 -0500
Subject: [PATCH 03/36] lxd/storage: Remove legacy zfs implementation
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Signed-off-by: Stéphane Graber <stgraber at ubuntu.com>
---
 lxd/api_internal.go            |    1 -
 lxd/container_lxc.go           |   19 +-
 lxd/main_init_interactive.go   |   14 -
 lxd/migrate_container.go       |   15 -
 lxd/migrate_storage_volumes.go |   12 -
 lxd/patches_utils.go           |  185 +-
 lxd/storage.go                 |   27 +-
 lxd/storage_migration_zfs.go   |  149 --
 lxd/storage_zfs.go             | 3342 --------------------------------
 lxd/storage_zfs_utils.go       |  839 --------
 10 files changed, 201 insertions(+), 4402 deletions(-)
 delete mode 100644 lxd/storage_migration_zfs.go
 delete mode 100644 lxd/storage_zfs.go
 delete mode 100644 lxd/storage_zfs_utils.go

diff --git a/lxd/api_internal.go b/lxd/api_internal.go
index 8e6cab8f35..b41fbf4406 100644
--- a/lxd/api_internal.go
+++ b/lxd/api_internal.go
@@ -627,7 +627,6 @@ func internalImport(d *Daemon, r *http.Request) response.Response {
 						v[len("snapshot-"):])
 				}
 			}
-
 		}
 	}
 
diff --git a/lxd/container_lxc.go b/lxd/container_lxc.go
index 7145db22a0..f44b368301 100644
--- a/lxd/container_lxc.go
+++ b/lxd/container_lxc.go
@@ -1863,6 +1863,19 @@ func (c *containerLXC) expandDevices(profiles []api.Profile) error {
 	return nil
 }
 
+func shiftZfsSkipper(dir string, absPath string, fi os.FileInfo) bool {
+	strippedPath := absPath
+	if dir != "" {
+		strippedPath = absPath[len(dir):]
+	}
+
+	if fi.IsDir() && strippedPath == "/.zfs/snapshot" {
+		return true
+	}
+
+	return false
+}
+
 func shiftBtrfsRootfs(path string, diskIdmap *idmap.IdmapSet, shift bool) error {
 	var err error
 	roSubvols := []string{}
@@ -1959,7 +1972,7 @@ func (c *containerLXC) startCommon() (string, []func() error, error) {
 
 		if diskIdmap != nil {
 			if storageType == "zfs" {
-				err = diskIdmap.UnshiftRootfs(c.RootfsPath(), zfsIdmapSetSkipper)
+				err = diskIdmap.UnshiftRootfs(c.RootfsPath(), shiftZfsSkipper)
 			} else if storageType == "btrfs" {
 				err = UnshiftBtrfsRootfs(c.RootfsPath(), diskIdmap)
 			} else {
@@ -1975,7 +1988,7 @@ func (c *containerLXC) startCommon() (string, []func() error, error) {
 
 		if nextIdmap != nil && !c.state.OS.Shiftfs {
 			if storageType == "zfs" {
-				err = nextIdmap.ShiftRootfs(c.RootfsPath(), zfsIdmapSetSkipper)
+				err = nextIdmap.ShiftRootfs(c.RootfsPath(), shiftZfsSkipper)
 			} else if storageType == "btrfs" {
 				err = ShiftBtrfsRootfs(c.RootfsPath(), nextIdmap)
 			} else {
@@ -4994,7 +5007,7 @@ func (c *containerLXC) Migrate(args *CriuMigrationArgs) error {
 			}
 
 			if storageType == "zfs" {
-				err = idmapset.ShiftRootfs(args.stateDir, zfsIdmapSetSkipper)
+				err = idmapset.ShiftRootfs(args.stateDir, shiftZfsSkipper)
 			} else if storageType == "btrfs" {
 				err = ShiftBtrfsRootfs(args.stateDir, idmapset)
 			} else {
diff --git a/lxd/main_init_interactive.go b/lxd/main_init_interactive.go
index 110c823913..be1a28fa1c 100644
--- a/lxd/main_init_interactive.go
+++ b/lxd/main_init_interactive.go
@@ -497,13 +497,6 @@ func (c *cmdInit) askStoragePool(config *cmdInitData, d lxd.InstanceServer, pool
 		}
 
 		if cli.AskBool(fmt.Sprintf("Create a new %s pool? (yes/no) [default=yes]: ", strings.ToUpper(pool.Driver)), "yes") {
-			if pool.Driver == "zfs" && os.Geteuid() == 0 {
-				poolVolumeExists, err := zfsPoolVolumeExists(pool.Name)
-				if err == nil && poolVolumeExists {
-					return fmt.Errorf("'%s' ZFS pool already exists", pool.Name)
-				}
-			}
-
 			if pool.Driver == "ceph" {
 				// Ask for the name of the cluster
 				pool.Config["ceph.cluster_name"] = cli.AskString("Name of the existing CEPH cluster [default=ceph]: ", "ceph", nil)
@@ -579,13 +572,6 @@ func (c *cmdInit) askStoragePool(config *cmdInitData, d lxd.InstanceServer, pool
 				question := fmt.Sprintf("Name of the existing %s pool or dataset: ", strings.ToUpper(pool.Driver))
 				pool.Config["source"] = cli.AskString(question, "", nil)
 			}
-
-			if pool.Driver == "zfs" && os.Geteuid() == 0 {
-				poolVolumeExists, err := zfsPoolVolumeExists(pool.Config["source"])
-				if err == nil && !poolVolumeExists {
-					return fmt.Errorf("'%s' ZFS pool or dataset does not exist", pool.Config["source"])
-				}
-			}
 		}
 
 		if pool.Driver == "lvm" {
diff --git a/lxd/migrate_container.go b/lxd/migrate_container.go
index 8af9bd8b38..39ded5915a 100644
--- a/lxd/migrate_container.go
+++ b/lxd/migrate_container.go
@@ -387,12 +387,6 @@ func (s *migrationSourceWs) Do(state *state.State, migrateOp *operations.Operati
 				Bidirectional: &hasFeature,
 			},
 		}
-
-		if len(zfsVersion) >= 3 && zfsVersion[0:3] != "0.6" {
-			offerHeader.ZfsFeatures = &migration.ZfsFeatures{
-				Compress: &hasFeature,
-			}
-		}
 	} else {
 		return fmt.Errorf("Instance type not supported")
 	}
@@ -1037,15 +1031,6 @@ func (c *migrationSink) Do(state *state.State, migrateOp *operations.Operation)
 			}
 		}
 
-		// Return those ZFS features we know about (with the value sent by the remote).
-		if len(zfsVersion) >= 3 && zfsVersion[0:3] != "0.6" {
-			if offerHeader.ZfsFeatures != nil && offerHeader.ZfsFeatures.Compress != nil {
-				respHeader.ZfsFeatures = &migration.ZfsFeatures{
-					Compress: offerHeader.ZfsFeatures.Compress,
-				}
-			}
-		}
-
 		// If refresh mode or the storage type the source has doesn't match what we have,
 		// then we have to use rsync.
 		if c.refresh || *offerHeader.Fs != *respHeader.Fs {
diff --git a/lxd/migrate_storage_volumes.go b/lxd/migrate_storage_volumes.go
index 52a5dde446..ad2c5d15e1 100644
--- a/lxd/migrate_storage_volumes.go
+++ b/lxd/migrate_storage_volumes.go
@@ -79,12 +79,6 @@ func (s *migrationSourceWs) DoStorage(state *state.State, poolName string, volNa
 			},
 		}
 
-		if len(zfsVersion) >= 3 && zfsVersion[0:3] != "0.6" {
-			offerHeader.ZfsFeatures = &migration.ZfsFeatures{
-				Compress: &hasFeature,
-			}
-		}
-
 		// Storage needs to start unconditionally now, since we need to initialize a new
 		// storage interface.
 		ourMount, err := s.storage.StoragePoolVolumeMount()
@@ -406,12 +400,6 @@ func (c *migrationSink) DoStorage(state *state.State, poolName string, req *api.
 			},
 		}
 
-		if len(zfsVersion) >= 3 && zfsVersion[0:3] != "0.6" {
-			respHeader.ZfsFeatures = &migration.ZfsFeatures{
-				Compress: &hasFeature,
-			}
-		}
-
 		// If the storage type the source has doesn't match what we have, then we have to
 		// use rsync.
 		if *offerHeader.Fs != *respHeader.Fs {
diff --git a/lxd/patches_utils.go b/lxd/patches_utils.go
index 217d684973..17775e6105 100644
--- a/lxd/patches_utils.go
+++ b/lxd/patches_utils.go
@@ -8,13 +8,14 @@ import (
 	"sort"
 	"strings"
 
+	"github.com/pborman/uuid"
+	"github.com/pkg/errors"
 	"golang.org/x/sys/unix"
 
 	"github.com/lxc/lxd/lxd/project"
 	"github.com/lxc/lxd/lxd/state"
 	driver "github.com/lxc/lxd/lxd/storage"
 	"github.com/lxc/lxd/shared"
-	"github.com/lxc/lxd/shared/logger"
 )
 
 // For 'dir' storage backend.
@@ -64,7 +65,6 @@ func btrfsSubVolumeCreate(subvol string) error {
 		"create",
 		subvol)
 	if err != nil {
-		logger.Errorf("Failed to create BTRFS subvolume \"%s\": %v", subvol, err)
 		return err
 	}
 
@@ -282,3 +282,184 @@ func btrfsSubVolumesGet(path string) ([]string, error) {
 
 	return result, nil
 }
+
+// For 'zfs' storage backend.
+func zfsPoolListSnapshots(pool string, path string) ([]string, error) {
+	path = strings.TrimRight(path, "/")
+	fullPath := pool
+	if path != "" {
+		fullPath = fmt.Sprintf("%s/%s", pool, path)
+	}
+
+	output, err := shared.RunCommand("zfs", "list", "-t", "snapshot", "-o", "name", "-H", "-d", "1", "-s", "creation", "-r", fullPath)
+	if err != nil {
+		return []string{}, errors.Wrap(err, "Failed to list ZFS snapshots")
+	}
+
+	children := []string{}
+	for _, entry := range strings.Split(output, "\n") {
+		if entry == "" {
+			continue
+		}
+
+		if entry == fullPath {
+			continue
+		}
+
+		children = append(children, strings.SplitN(entry, "@", 2)[1])
+	}
+
+	return children, nil
+}
+
+func zfsSnapshotDeleteInternal(projectName, poolName string, ctName string, onDiskPoolName string) error {
+	sourceContainerName, sourceContainerSnapOnlyName, _ := shared.InstanceGetParentAndSnapshotName(ctName)
+	snapName := fmt.Sprintf("snapshot-%s", sourceContainerSnapOnlyName)
+
+	if zfsFilesystemEntityExists(onDiskPoolName,
+		fmt.Sprintf("containers/%s@%s",
+			project.Prefix(projectName, sourceContainerName), snapName)) {
+		removable, err := zfsPoolVolumeSnapshotRemovable(onDiskPoolName,
+			fmt.Sprintf("containers/%s",
+				project.Prefix(projectName, sourceContainerName)),
+			snapName)
+		if err != nil {
+			return err
+		}
+
+		if removable {
+			err = zfsPoolVolumeSnapshotDestroy(onDiskPoolName,
+				fmt.Sprintf("containers/%s",
+					project.Prefix(projectName, sourceContainerName)),
+				snapName)
+		} else {
+			err = zfsPoolVolumeSnapshotRename(onDiskPoolName,
+				fmt.Sprintf("containers/%s",
+					project.Prefix(projectName, sourceContainerName)),
+				snapName,
+				fmt.Sprintf("copy-%s", uuid.NewRandom().String()))
+		}
+		if err != nil {
+			return err
+		}
+	}
+
+	// Delete the snapshot on its storage pool:
+	// ${POOL}/snapshots/<snapshot_name>
+	snapshotContainerMntPoint := driver.GetSnapshotMountPoint(projectName, poolName, ctName)
+	if shared.PathExists(snapshotContainerMntPoint) {
+		err := os.RemoveAll(snapshotContainerMntPoint)
+		if err != nil {
+			return err
+		}
+	}
+
+	// Check if we can remove the snapshot symlink:
+	// ${LXD_DIR}/snapshots/<container_name> to ${POOL}/snapshots/<container_name>
+	// by checking if the directory is empty.
+	snapshotContainerPath := driver.GetSnapshotMountPoint(projectName, poolName, sourceContainerName)
+	empty, _ := shared.PathIsEmpty(snapshotContainerPath)
+	if empty == true {
+		// Remove the snapshot directory for the container:
+		// ${POOL}/snapshots/<source_container_name>
+		err := os.Remove(snapshotContainerPath)
+		if err != nil {
+			return err
+		}
+
+		snapshotSymlink := shared.VarPath("snapshots", project.Prefix(projectName, sourceContainerName))
+		if shared.PathExists(snapshotSymlink) {
+			err := os.Remove(snapshotSymlink)
+			if err != nil {
+				return err
+			}
+		}
+	}
+
+	// Legacy
+	snapPath := shared.VarPath(fmt.Sprintf("snapshots/%s/%s.zfs", project.Prefix(projectName, sourceContainerName), sourceContainerSnapOnlyName))
+	if shared.PathExists(snapPath) {
+		err := os.Remove(snapPath)
+		if err != nil {
+			return err
+		}
+	}
+
+	// Legacy
+	parent := shared.VarPath(fmt.Sprintf("snapshots/%s", project.Prefix(projectName, sourceContainerName)))
+	if ok, _ := shared.PathIsEmpty(parent); ok {
+		err := os.Remove(parent)
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func zfsFilesystemEntityExists(pool string, path string) bool {
+	vdev := pool
+	if path != "" {
+		vdev = fmt.Sprintf("%s/%s", pool, path)
+	}
+
+	output, err := shared.RunCommand("zfs", "get", "-H", "-o", "name", "type", vdev)
+	if err != nil {
+		return false
+	}
+
+	detectedName := strings.TrimSpace(output)
+	return detectedName == vdev
+}
+
+func zfsPoolVolumeSnapshotRemovable(pool string, path string, name string) (bool, error) {
+	var snap string
+	if name == "" {
+		snap = path
+	} else {
+		snap = fmt.Sprintf("%s@%s", path, name)
+	}
+
+	clones, err := zfsFilesystemEntityPropertyGet(pool, snap, "clones")
+	if err != nil {
+		return false, err
+	}
+
+	if clones == "-" || clones == "" {
+		return true, nil
+	}
+
+	return false, nil
+}
+
+func zfsFilesystemEntityPropertyGet(pool string, path string, key string) (string, error) {
+	entity := pool
+	if path != "" {
+		entity = fmt.Sprintf("%s/%s", pool, path)
+	}
+
+	output, err := shared.RunCommand("zfs", "get", "-H", "-p", "-o", "value", key, entity)
+	if err != nil {
+		return "", errors.Wrap(err, "Failed to get ZFS config")
+	}
+
+	return strings.TrimRight(output, "\n"), nil
+}
+
+func zfsPoolVolumeSnapshotDestroy(pool, path string, name string) error {
+	_, err := shared.RunCommand("zfs", "destroy", "-r", fmt.Sprintf("%s/%s@%s", pool, path, name))
+	if err != nil {
+		return errors.Wrap(err, "Failed to destroy ZFS snapshot")
+	}
+
+	return nil
+}
+
+func zfsPoolVolumeSnapshotRename(pool string, path string, oldName string, newName string) error {
+	_, err := shared.RunCommand("zfs", "rename", "-r", fmt.Sprintf("%s/%s@%s", pool, path, oldName), fmt.Sprintf("%s/%s@%s", pool, path, newName))
+	if err != nil {
+		return errors.Wrap(err, "Failed to rename ZFS snapshot")
+	}
+
+	return nil
+}
diff --git a/lxd/storage.go b/lxd/storage.go
index b58034a345..61d2d6ed2f 100644
--- a/lxd/storage.go
+++ b/lxd/storage.go
@@ -99,7 +99,6 @@ const (
 	storageTypeCeph storageType = iota
 	storageTypeLvm
 	storageTypeMock
-	storageTypeZfs
 )
 
 var supportedStoragePoolDrivers = []string{"btrfs", "ceph", "cephfs", "dir", "lvm", "zfs"}
@@ -112,8 +111,6 @@ func storageTypeToString(sType storageType) (string, error) {
 		return "lvm", nil
 	case storageTypeMock:
 		return "mock", nil
-	case storageTypeZfs:
-		return "zfs", nil
 	}
 
 	return "", fmt.Errorf("Invalid storage type")
@@ -127,8 +124,6 @@ func storageStringToType(sName string) (storageType, error) {
 		return storageTypeLvm, nil
 	case "mock":
 		return storageTypeMock, nil
-	case "zfs":
-		return storageTypeZfs, nil
 	}
 
 	return -1, fmt.Errorf("Invalid storage type name")
@@ -268,13 +263,6 @@ func storageCoreInit(driver string) (storage, error) {
 			return nil, err
 		}
 		return &mock, nil
-	case storageTypeZfs:
-		zfs := storageZfs{}
-		err = zfs.StorageCoreInit()
-		if err != nil {
-			return nil, err
-		}
-		return &zfs, nil
 	}
 
 	return nil, fmt.Errorf("invalid storage type")
@@ -342,17 +330,6 @@ func storageInit(s *state.State, project, poolName, volumeName string, volumeTyp
 			return nil, err
 		}
 		return &mock, nil
-	case storageTypeZfs:
-		zfs := storageZfs{}
-		zfs.poolID = poolID
-		zfs.pool = pool
-		zfs.volume = volume
-		zfs.s = s
-		err = zfs.StoragePoolInit()
-		if err != nil {
-			return nil, err
-		}
-		return &zfs, nil
 	}
 
 	return nil, fmt.Errorf("invalid storage type")
@@ -468,7 +445,7 @@ func storagePoolVolumeAttachPrepare(s *state.State, poolName string, volumeName
 			var err error
 
 			if pool.Driver == "zfs" {
-				err = lastIdmap.UnshiftRootfs(remapPath, zfsIdmapSetSkipper)
+				err = lastIdmap.UnshiftRootfs(remapPath, shiftZfsSkipper)
 			} else {
 				err = lastIdmap.UnshiftRootfs(remapPath, nil)
 			}
@@ -486,7 +463,7 @@ func storagePoolVolumeAttachPrepare(s *state.State, poolName string, volumeName
 			var err error
 
 			if pool.Driver == "zfs" {
-				err = nextIdmap.ShiftRootfs(remapPath, zfsIdmapSetSkipper)
+				err = nextIdmap.ShiftRootfs(remapPath, shiftZfsSkipper)
 			} else {
 				err = nextIdmap.ShiftRootfs(remapPath, nil)
 			}
diff --git a/lxd/storage_migration_zfs.go b/lxd/storage_migration_zfs.go
deleted file mode 100644
index 8bbb56a511..0000000000
--- a/lxd/storage_migration_zfs.go
+++ /dev/null
@@ -1,149 +0,0 @@
-package main
-
-import (
-	"fmt"
-	"io"
-	"io/ioutil"
-	"os/exec"
-
-	"github.com/gorilla/websocket"
-	"github.com/pborman/uuid"
-
-	"github.com/lxc/lxd/lxd/instance"
-	"github.com/lxc/lxd/lxd/migration"
-	"github.com/lxc/lxd/lxd/operations"
-	"github.com/lxc/lxd/lxd/project"
-	"github.com/lxc/lxd/shared"
-	"github.com/lxc/lxd/shared/logger"
-)
-
-type zfsMigrationSourceDriver struct {
-	instance         instance.Instance
-	snapshots        []instance.Instance
-	zfsSnapshotNames []string
-	zfs              *storageZfs
-	runningSnapName  string
-	stoppedSnapName  string
-	zfsFeatures      []string
-}
-
-func (s *zfsMigrationSourceDriver) send(conn *websocket.Conn, zfsName string, zfsParent string, readWrapper func(io.ReadCloser) io.ReadCloser) error {
-	sourceParentName, _, _ := shared.InstanceGetParentAndSnapshotName(s.instance.Name())
-	poolName := s.zfs.getOnDiskPoolName()
-	args := []string{"send"}
-
-	// Negotiated options
-	if s.zfsFeatures != nil && len(s.zfsFeatures) > 0 {
-		if shared.StringInSlice("compress", s.zfsFeatures) {
-			args = append(args, "-c")
-			args = append(args, "-L")
-		}
-	}
-
-	args = append(args, []string{fmt.Sprintf("%s/containers/%s@%s", poolName, project.Prefix(s.instance.Project(), sourceParentName), zfsName)}...)
-	if zfsParent != "" {
-		args = append(args, "-i", fmt.Sprintf("%s/containers/%s@%s", poolName, project.Prefix(s.instance.Project(), s.instance.Name()), zfsParent))
-	}
-
-	cmd := exec.Command("zfs", args...)
-
-	stdout, err := cmd.StdoutPipe()
-	if err != nil {
-		return err
-	}
-
-	readPipe := io.ReadCloser(stdout)
-	if readWrapper != nil {
-		readPipe = readWrapper(stdout)
-	}
-
-	stderr, err := cmd.StderrPipe()
-	if err != nil {
-		return err
-	}
-
-	if err := cmd.Start(); err != nil {
-		return err
-	}
-
-	<-shared.WebsocketSendStream(conn, readPipe, 4*1024*1024)
-
-	output, err := ioutil.ReadAll(stderr)
-	if err != nil {
-		logger.Errorf("Problem reading zfs send stderr: %s", err)
-	}
-
-	err = cmd.Wait()
-	if err != nil {
-		logger.Errorf("Problem with zfs send: %s", string(output))
-	}
-
-	return err
-}
-
-func (s *zfsMigrationSourceDriver) SendWhileRunning(conn *websocket.Conn, op *operations.Operation, bwlimit string, containerOnly bool) error {
-	if s.instance.IsSnapshot() {
-		_, snapOnlyName, _ := shared.InstanceGetParentAndSnapshotName(s.instance.Name())
-		snapshotName := fmt.Sprintf("snapshot-%s", snapOnlyName)
-		wrapper := migration.ProgressReader(op, "fs_progress", s.instance.Name())
-		return s.send(conn, snapshotName, "", wrapper)
-	}
-
-	lastSnap := ""
-	if !containerOnly {
-		for i, snap := range s.zfsSnapshotNames {
-			prev := ""
-			if i > 0 {
-				prev = s.zfsSnapshotNames[i-1]
-			}
-
-			lastSnap = snap
-
-			wrapper := migration.ProgressReader(op, "fs_progress", snap)
-			if err := s.send(conn, snap, prev, wrapper); err != nil {
-				return err
-			}
-		}
-	}
-
-	s.runningSnapName = fmt.Sprintf("migration-send-%s", uuid.NewRandom().String())
-	if err := zfsPoolVolumeSnapshotCreate(s.zfs.getOnDiskPoolName(), fmt.Sprintf("containers/%s", project.Prefix(s.instance.Project(), s.instance.Name())), s.runningSnapName); err != nil {
-		return err
-	}
-
-	wrapper := migration.ProgressReader(op, "fs_progress", s.instance.Name())
-	if err := s.send(conn, s.runningSnapName, lastSnap, wrapper); err != nil {
-		return err
-	}
-
-	return nil
-}
-
-func (s *zfsMigrationSourceDriver) SendAfterCheckpoint(conn *websocket.Conn, bwlimit string) error {
-	s.stoppedSnapName = fmt.Sprintf("migration-send-%s", uuid.NewRandom().String())
-	if err := zfsPoolVolumeSnapshotCreate(s.zfs.getOnDiskPoolName(), fmt.Sprintf("containers/%s", project.Prefix(s.instance.Project(), s.instance.Name())), s.stoppedSnapName); err != nil {
-		return err
-	}
-
-	if err := s.send(conn, s.stoppedSnapName, s.runningSnapName, nil); err != nil {
-		return err
-	}
-
-	return nil
-}
-
-func (s *zfsMigrationSourceDriver) Cleanup() {
-	poolName := s.zfs.getOnDiskPoolName()
-	if s.stoppedSnapName != "" {
-		zfsPoolVolumeSnapshotDestroy(poolName, fmt.Sprintf("containers/%s", project.Prefix(s.instance.Project(), s.instance.Name())), s.stoppedSnapName)
-	}
-	if s.runningSnapName != "" {
-		zfsPoolVolumeSnapshotDestroy(poolName, fmt.Sprintf("containers/%s", project.Prefix(s.instance.Project(), s.instance.Name())), s.runningSnapName)
-	}
-}
-
-func (s *zfsMigrationSourceDriver) SendStorageVolume(conn *websocket.Conn, op *operations.Operation, bwlimit string, storage storage, volumeOnly bool) error {
-	msg := fmt.Sprintf("Function not implemented")
-	logger.Errorf(msg)
-	return fmt.Errorf(msg)
-}
diff --git a/lxd/storage_zfs.go b/lxd/storage_zfs.go
deleted file mode 100644
index 3231dc85c3..0000000000
--- a/lxd/storage_zfs.go
+++ /dev/null
@@ -1,3342 +0,0 @@
-package main
-
-import (
-	"fmt"
-	"io"
-	"io/ioutil"
-	"os"
-	"os/exec"
-	"path/filepath"
-	"strconv"
-	"strings"
-
-	"github.com/gorilla/websocket"
-	"github.com/pkg/errors"
-	"golang.org/x/sys/unix"
-
-	"github.com/lxc/lxd/lxd/backup"
-	"github.com/lxc/lxd/lxd/instance"
-	"github.com/lxc/lxd/lxd/instance/instancetype"
-	"github.com/lxc/lxd/lxd/migration"
-	"github.com/lxc/lxd/lxd/operations"
-	"github.com/lxc/lxd/lxd/project"
-	"github.com/lxc/lxd/lxd/rsync"
-	driver "github.com/lxc/lxd/lxd/storage"
-	"github.com/lxc/lxd/lxd/util"
-	"github.com/lxc/lxd/shared"
-	"github.com/lxc/lxd/shared/api"
-	"github.com/lxc/lxd/shared/ioprogress"
-	"github.com/lxc/lxd/shared/logger"
-	"github.com/lxc/lxd/shared/units"
-
-	"github.com/pborman/uuid"
-)
-
-// Global defaults
-var zfsUseRefquota = "false"
-var zfsRemoveSnapshots = "false"
-
-// Cache
-var zfsVersion = ""
-
-type storageZfs struct {
-	dataset string
-	storageShared
-}
-
-func (s *storageZfs) getOnDiskPoolName() string {
-	if s.dataset != "" {
-		return s.dataset
-	}
-
-	return s.pool.Name
-}
-
-// Only initialize the minimal information we need about a given storage type.
-func (s *storageZfs) StorageCoreInit() error {
-	s.sType = storageTypeZfs
-	typeName, err := storageTypeToString(s.sType)
-	if err != nil {
-		return err
-	}
-	s.sTypeName = typeName
-
-	if zfsVersion != "" {
-		s.sTypeVersion = zfsVersion
-		return nil
-	}
-
-	util.LoadModule("zfs")
-
-	if !zfsIsEnabled() {
-		return fmt.Errorf("The \"zfs\" tool is not enabled")
-	}
-
-	s.sTypeVersion, err = zfsToolVersionGet()
-	if err != nil {
-		s.sTypeVersion, err = zfsModuleVersionGet()
-		if err != nil {
-			return err
-		}
-	}
-
-	zfsVersion = s.sTypeVersion
-
-	return nil
-}
-
-// Functions dealing with storage pools.
-func (s *storageZfs) StoragePoolInit() error {
-	err := s.StorageCoreInit()
-	if err != nil {
-		return err
-	}
-
-	// Detect whether we have been given a zfs dataset as source.
-	if s.pool.Config["zfs.pool_name"] != "" {
-		s.dataset = s.pool.Config["zfs.pool_name"]
-	}
-
-	return nil
-}
-
-func (s *storageZfs) StoragePoolCheck() error {
-	logger.Debugf("Checking ZFS storage pool \"%s\"", s.pool.Name)
-
-	source := s.pool.Config["source"]
-	if source == "" {
-		return fmt.Errorf("no \"source\" property found for the storage pool")
-	}
-
-	poolName := s.getOnDiskPoolName()
-	purePoolName := strings.Split(poolName, "/")[0]
-	exists := zfsFilesystemEntityExists(purePoolName, "")
-	if exists {
-		return nil
-	}
-
-	logger.Debugf("ZFS storage pool \"%s\" does not exist, trying to import it", poolName)
-
-	var err error
-	if filepath.IsAbs(source) {
-		disksPath := shared.VarPath("disks")
-		_, err = shared.RunCommand("zpool", "import", "-f", "-d", disksPath, poolName)
-	} else {
-		_, err = shared.RunCommand("zpool", "import", purePoolName)
-	}
-
-	if err != nil {
-		return fmt.Errorf("ZFS storage pool \"%s\" could not be imported: %s", poolName, err)
-	}
-
-	logger.Debugf("ZFS storage pool \"%s\" successfully imported", poolName)
-	return nil
-}
-
-func (s *storageZfs) StoragePoolCreate() error {
-	logger.Infof("Creating ZFS storage pool \"%s\"", s.pool.Name)
-
-	err := s.zfsPoolCreate()
-	if err != nil {
-		return err
-	}
-	revert := true
-	defer func() {
-		if !revert {
-			return
-		}
-		s.StoragePoolDelete()
-	}()
-
-	storagePoolMntPoint := driver.GetStoragePoolMountPoint(s.pool.Name)
-	err = os.MkdirAll(storagePoolMntPoint, 0711)
-	if err != nil {
-		return err
-	}
-
-	err = s.StoragePoolCheck()
-	if err != nil {
-		return err
-	}
-
-	revert = false
-
-	logger.Infof("Created ZFS storage pool \"%s\"", s.pool.Name)
-	return nil
-}
-
-func (s *storageZfs) zfsPoolCreate() error {
-	s.pool.Config["volatile.initial_source"] = s.pool.Config["source"]
-
-	zpoolName := s.getOnDiskPoolName()
-	vdev := s.pool.Config["source"]
-	defaultVdev := filepath.Join(shared.VarPath("disks"), fmt.Sprintf("%s.img", s.pool.Name))
-	if vdev == "" || vdev == defaultVdev {
-		vdev = defaultVdev
-		s.pool.Config["source"] = vdev
-
-		if s.pool.Config["zfs.pool_name"] == "" {
-			s.pool.Config["zfs.pool_name"] = zpoolName
-		}
-
-		f, err := os.Create(vdev)
-		if err != nil {
-			return fmt.Errorf("Failed to open %s: %s", vdev, err)
-		}
-		defer f.Close()
-
-		err = f.Chmod(0600)
-		if err != nil {
-			return fmt.Errorf("Failed to chmod %s: %s", vdev, err)
-		}
-
-		size, err := units.ParseByteSizeString(s.pool.Config["size"])
-		if err != nil {
-			return err
-		}
-		err = f.Truncate(size)
-		if err != nil {
-			return fmt.Errorf("Failed to create sparse file %s: %s", vdev, err)
-		}
-
-		err = zfsPoolCreate(zpoolName, vdev)
-		if err != nil {
-			return err
-		}
-	} else {
-		// Unset size property since it doesn't make sense.
-		s.pool.Config["size"] = ""
-
-		if filepath.IsAbs(vdev) {
-			if !shared.IsBlockdevPath(vdev) {
-				return fmt.Errorf("Custom loop file locations are not supported")
-			}
-
-			if s.pool.Config["zfs.pool_name"] == "" {
-				s.pool.Config["zfs.pool_name"] = zpoolName
-			}
-
-			// This is a block device. Note, that we do not store the
-			// block device path or UUID or PARTUUID or similar in
-			// the database. All of those might change or might be
-			// used in a special way (For example, zfs uses a single
-			// UUID in a multi-device pool for all devices.). The
-			// safest way is to just store the name of the zfs pool
-			// we create.
-			s.pool.Config["source"] = zpoolName
-			err := zfsPoolCreate(zpoolName, vdev)
-			if err != nil {
-				return err
-			}
-		} else {
-			if s.pool.Config["zfs.pool_name"] != "" && s.pool.Config["zfs.pool_name"] != vdev {
-				return fmt.Errorf("Invalid combination of \"source\" and \"zfs.pool_name\" property")
-			}
-
-			s.pool.Config["zfs.pool_name"] = vdev
-			s.dataset = vdev
-
-			if strings.Contains(vdev, "/") {
-				if !zfsFilesystemEntityExists(vdev, "") {
-					err := zfsPoolCreate("", vdev)
-					if err != nil {
-						return err
-					}
-				}
-			} else {
-				err := zfsPoolCheck(vdev)
-				if err != nil {
-					return err
-				}
-			}
-
-			subvols, err := zfsPoolListSubvolumes(zpoolName, vdev)
-			if err != nil {
-				return err
-			}
-
-			if len(subvols) > 0 {
-				return fmt.Errorf("Provided ZFS pool (or dataset) isn't empty")
-			}
-
-			err = zfsPoolApplyDefaults(vdev)
-			if err != nil {
-				return err
-			}
-		}
-	}
-
-	// Create default dummy datasets to avoid zfs races during container
-	// creation.
-	poolName := s.getOnDiskPoolName()
-	dataset := fmt.Sprintf("%s/containers", poolName)
-	msg, err := zfsPoolVolumeCreate(dataset, "mountpoint=none")
-	if err != nil {
-		logger.Errorf("Failed to create containers dataset: %s", msg)
-		return err
-	}
-
-	fixperms := shared.VarPath("storage-pools", s.pool.Name, "containers")
-	err = os.MkdirAll(fixperms, driver.ContainersDirMode)
-	if err != nil && !os.IsNotExist(err) {
-		return err
-	}
-
-	err = os.Chmod(fixperms, driver.ContainersDirMode)
-	if err != nil {
-		logger.Warnf("Failed to chmod \"%s\" to \"0%s\": %s", fixperms, strconv.FormatInt(int64(driver.ContainersDirMode), 8), err)
-	}
-
-	dataset = fmt.Sprintf("%s/images", poolName)
-	msg, err = zfsPoolVolumeCreate(dataset, "mountpoint=none")
-	if err != nil {
-		logger.Errorf("Failed to create images dataset: %s", msg)
-		return err
-	}
-
-	fixperms = shared.VarPath("storage-pools", s.pool.Name, "images")
-	err = os.MkdirAll(fixperms, driver.ImagesDirMode)
-	if err != nil && !os.IsNotExist(err) {
-		return err
-	}
-	err = os.Chmod(fixperms, driver.ImagesDirMode)
-	if err != nil {
-		logger.Warnf("Failed to chmod \"%s\" to \"0%s\": %s", fixperms, strconv.FormatInt(int64(driver.ImagesDirMode), 8), err)
-	}
-
-	dataset = fmt.Sprintf("%s/custom", poolName)
-	msg, err = zfsPoolVolumeCreate(dataset, "mountpoint=none")
-	if err != nil {
-		logger.Errorf("Failed to create custom dataset: %s", msg)
-		return err
-	}
-
-	fixperms = shared.VarPath("storage-pools", s.pool.Name, "custom")
-	err = os.MkdirAll(fixperms, driver.CustomDirMode)
-	if err != nil && !os.IsNotExist(err) {
-		return err
-	}
-	err = os.Chmod(fixperms, driver.CustomDirMode)
-	if err != nil {
-		logger.Warnf("Failed to chmod \"%s\" to \"0%s\": %s", fixperms, strconv.FormatInt(int64(driver.CustomDirMode), 8), err)
-	}
-
-	dataset = fmt.Sprintf("%s/deleted", poolName)
-	msg, err = zfsPoolVolumeCreate(dataset, "mountpoint=none")
-	if err != nil {
-		logger.Errorf("Failed to create deleted dataset: %s", msg)
-		return err
-	}
-
-	dataset = fmt.Sprintf("%s/snapshots", poolName)
-	msg, err = zfsPoolVolumeCreate(dataset, "mountpoint=none")
-	if err != nil {
-		logger.Errorf("Failed to create snapshots dataset: %s", msg)
-		return err
-	}
-
-	fixperms = shared.VarPath("storage-pools", s.pool.Name, "containers-snapshots")
-	err = os.MkdirAll(fixperms, driver.SnapshotsDirMode)
-	if err != nil && !os.IsNotExist(err) {
-		return err
-	}
-	err = os.Chmod(fixperms, driver.SnapshotsDirMode)
-	if err != nil {
-		logger.Warnf("Failed to chmod \"%s\" to \"0%s\": %s", fixperms, strconv.FormatInt(int64(driver.SnapshotsDirMode), 8), err)
-	}
-
-	dataset = fmt.Sprintf("%s/custom-snapshots", poolName)
-	msg, err = zfsPoolVolumeCreate(dataset, "mountpoint=none")
-	if err != nil {
-		logger.Errorf("Failed to create snapshots dataset: %s", msg)
-		return err
-	}
-
-	fixperms = shared.VarPath("storage-pools", s.pool.Name, "custom-snapshots")
-	err = os.MkdirAll(fixperms, driver.SnapshotsDirMode)
-	if err != nil && !os.IsNotExist(err) {
-		return err
-	}
-	err = os.Chmod(fixperms, driver.SnapshotsDirMode)
-	if err != nil {
-		logger.Warnf("Failed to chmod \"%s\" to \"0%s\": %s", fixperms, strconv.FormatInt(int64(driver.SnapshotsDirMode), 8), err)
-	}
-
-	return nil
-}
-
-func (s *storageZfs) StoragePoolDelete() error {
-	logger.Infof("Deleting ZFS storage pool \"%s\"", s.pool.Name)
-
-	poolName := s.getOnDiskPoolName()
-	if zfsFilesystemEntityExists(poolName, "") {
-		err := zfsFilesystemEntityDelete(s.pool.Config["source"], poolName)
-		if err != nil {
-			return err
-		}
-	}
-
-	storagePoolMntPoint := driver.GetStoragePoolMountPoint(s.pool.Name)
-	if shared.PathExists(storagePoolMntPoint) {
-		err := os.RemoveAll(storagePoolMntPoint)
-		if err != nil {
-			return err
-		}
-	}
-
-	logger.Infof("Deleted ZFS storage pool \"%s\"", s.pool.Name)
-	return nil
-}
-
-func (s *storageZfs) StoragePoolMount() (bool, error) {
-	return true, nil
-}
-
-func (s *storageZfs) StoragePoolUmount() (bool, error) {
-	return true, nil
-}
-
-func (s *storageZfs) StoragePoolVolumeCreate() error {
-	logger.Infof("Creating ZFS storage volume \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-
-	isSnapshot := shared.IsSnapshot(s.volume.Name)
-
-	var fs string
-
-	if isSnapshot {
-		fs = fmt.Sprintf("custom-snapshots/%s", s.volume.Name)
-	} else {
-		fs = fmt.Sprintf("custom/%s", s.volume.Name)
-	}
-	poolName := s.getOnDiskPoolName()
-	dataset := fmt.Sprintf("%s/%s", poolName, fs)
-
-	var customPoolVolumeMntPoint string
-
-	if isSnapshot {
-		customPoolVolumeMntPoint = driver.GetStoragePoolVolumeSnapshotMountPoint(s.pool.Name, s.volume.Name)
-	} else {
-		customPoolVolumeMntPoint = driver.GetStoragePoolVolumeMountPoint(s.pool.Name, s.volume.Name)
-	}
-
-	msg, err := zfsPoolVolumeCreate(dataset, "mountpoint=none", "canmount=noauto")
-	if err != nil {
-		logger.Errorf("Failed to create ZFS storage volume \"%s\" on storage pool \"%s\": %s", s.volume.Name, s.pool.Name, msg)
-		return err
-	}
-	revert := true
-	defer func() {
-		if !revert {
-			return
-		}
-		s.StoragePoolVolumeDelete()
-	}()
-
-	err = zfsPoolVolumeSet(poolName, fs, "mountpoint", customPoolVolumeMntPoint)
-	if err != nil {
-		return err
-	}
-
-	if !shared.IsMountPoint(customPoolVolumeMntPoint) {
-		err := zfsMount(poolName, fs)
-		if err != nil {
-			return err
-		}
-		defer zfsUmount(poolName, fs, customPoolVolumeMntPoint)
-	}
-
-	// apply quota
-	if s.volume.Config["size"] != "" {
-		size, err := units.ParseByteSizeString(s.volume.Config["size"])
-		if err != nil {
-			return err
-		}
-
-		err = s.StorageEntitySetQuota(storagePoolVolumeTypeCustom, size, nil)
-		if err != nil {
-			return err
-		}
-	}
-
-	revert = false
-
-	logger.Infof("Created ZFS storage volume \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-	return nil
-}
-
-func (s *storageZfs) StoragePoolVolumeDelete() error {
-	logger.Infof("Deleting ZFS storage volume \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-
-	fs := fmt.Sprintf("custom/%s", s.volume.Name)
-	customPoolVolumeMntPoint := driver.GetStoragePoolVolumeMountPoint(s.pool.Name, s.volume.Name)
-
-	poolName := s.getOnDiskPoolName()
-	if zfsFilesystemEntityExists(poolName, fs) {
-		removable := true
-		snaps, err := zfsPoolListSnapshots(poolName, fs)
-		if err != nil {
-			return err
-		}
-
-		for _, snap := range snaps {
-			var err error
-			removable, err = zfsPoolVolumeSnapshotRemovable(poolName, fs, snap)
-			if err != nil {
-				return err
-			}
-
-			if !removable {
-				break
-			}
-		}
-
-		if removable {
-			origin, err := zfsFilesystemEntityPropertyGet(poolName, fs, "origin")
-			if err != nil {
-				return err
-			}
-			poolName := s.getOnDiskPoolName()
-			origin = strings.TrimPrefix(origin, fmt.Sprintf("%s/", poolName))
-
-			err = zfsPoolVolumeDestroy(poolName, fs)
-			if err != nil {
-				return err
-			}
-
-			err = zfsPoolVolumeCleanup(poolName, origin)
-			if err != nil {
-				return err
-			}
-		} else {
-			err := zfsPoolVolumeSet(poolName, fs, "mountpoint", "none")
-			if err != nil {
-				return err
-			}
-
-			err = zfsPoolVolumeRename(poolName, fs, fmt.Sprintf("deleted/custom/%s", uuid.NewRandom().String()), true)
-			if err != nil {
-				return err
-			}
-		}
-	}
-
-	if shared.PathExists(customPoolVolumeMntPoint) {
-		err := os.RemoveAll(customPoolVolumeMntPoint)
-		if err != nil {
-			return err
-		}
-	}
-
-	err := s.s.Cluster.StoragePoolVolumeDelete(
-		"default",
-		s.volume.Name,
-		storagePoolVolumeTypeCustom,
-		s.poolID)
-	if err != nil {
-		logger.Errorf(`Failed to delete database entry for ZFS storage volume "%s" on storage pool "%s"`, s.volume.Name, s.pool.Name)
-	}
-
-	logger.Infof("Deleted ZFS storage volume \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-	return nil
-}
-
-func (s *storageZfs) StoragePoolVolumeMount() (bool, error) {
-	logger.Debugf("Mounting ZFS storage volume \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-
-	fs := fmt.Sprintf("custom/%s", s.volume.Name)
-	customPoolVolumeMntPoint := driver.GetStoragePoolVolumeMountPoint(s.pool.Name, s.volume.Name)
-
-	customMountLockID := getCustomMountLockID(s.pool.Name, s.volume.Name)
-	lxdStorageMapLock.Lock()
-	if waitChannel, ok := lxdStorageOngoingOperationMap[customMountLockID]; ok {
-		lxdStorageMapLock.Unlock()
-		if _, ok := <-waitChannel; ok {
-			logger.Warnf("Received value over semaphore, this should not have happened")
-		}
-		// Give the benefit of the doubt and assume that the other
-		// thread actually succeeded in mounting the storage volume.
-		return false, nil
-	}
-
-	lxdStorageOngoingOperationMap[customMountLockID] = make(chan bool)
-	lxdStorageMapLock.Unlock()
-
-	var customerr error
-	ourMount := false
-	if !shared.IsMountPoint(customPoolVolumeMntPoint) {
-		customerr = zfsMount(s.getOnDiskPoolName(), fs)
-		ourMount = true
-	}
-
-	lxdStorageMapLock.Lock()
-	if waitChannel, ok := lxdStorageOngoingOperationMap[customMountLockID]; ok {
-		close(waitChannel)
-		delete(lxdStorageOngoingOperationMap, customMountLockID)
-	}
-	lxdStorageMapLock.Unlock()
-
-	if customerr != nil {
-		return false, customerr
-	}
-
-	logger.Debugf("Mounted ZFS storage volume \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-	return ourMount, nil
-}
-
-func (s *storageZfs) StoragePoolVolumeUmount() (bool, error) {
-	logger.Debugf("Unmounting ZFS storage volume \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-
-	fs := fmt.Sprintf("custom/%s", s.volume.Name)
-	customPoolVolumeMntPoint := driver.GetStoragePoolVolumeMountPoint(s.pool.Name, s.volume.Name)
-
-	customUmountLockID := getCustomUmountLockID(s.pool.Name, s.volume.Name)
-	lxdStorageMapLock.Lock()
-	if waitChannel, ok := lxdStorageOngoingOperationMap[customUmountLockID]; ok {
-		lxdStorageMapLock.Unlock()
-		if _, ok := <-waitChannel; ok {
-			logger.Warnf("Received value over semaphore, this should not have happened")
-		}
-		// Give the benefit of the doubt and assume that the other
-		// thread actually succeeded in unmounting the storage volume.
-		return false, nil
-	}
-
-	lxdStorageOngoingOperationMap[customUmountLockID] = make(chan bool)
-	lxdStorageMapLock.Unlock()
-
-	var customerr error
-	ourUmount := false
-	if shared.IsMountPoint(customPoolVolumeMntPoint) {
-		customerr = zfsUmount(s.getOnDiskPoolName(), fs, customPoolVolumeMntPoint)
-		ourUmount = true
-	}
-
-	lxdStorageMapLock.Lock()
-	if waitChannel, ok := lxdStorageOngoingOperationMap[customUmountLockID]; ok {
-		close(waitChannel)
-		delete(lxdStorageOngoingOperationMap, customUmountLockID)
-	}
-	lxdStorageMapLock.Unlock()
-
-	if customerr != nil {
-		return false, customerr
-	}
-
-	logger.Debugf("Unmounted ZFS storage volume \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-	return ourUmount, nil
-}
-
-func (s *storageZfs) GetContainerPoolInfo() (int64, string, string) {
-	return s.poolID, s.pool.Name, s.getOnDiskPoolName()
-}
-
-func (s *storageZfs) StoragePoolUpdate(writable *api.StoragePoolPut, changedConfig []string) error {
-	logger.Infof(`Updating ZFS storage pool "%s"`, s.pool.Name)
-
-	changeable := changeableStoragePoolProperties["zfs"]
-	unchangeable := []string{}
-	for _, change := range changedConfig {
-		if !shared.StringInSlice(change, changeable) {
-			unchangeable = append(unchangeable, change)
-		}
-	}
-
-	if len(unchangeable) > 0 {
-		return updateStoragePoolError(unchangeable, "zfs")
-	}
-
-	// "rsync.bwlimit" requires no on-disk modifications.
-	// "volume.zfs.remove_snapshots" requires no on-disk modifications.
-	// "volume.zfs.use_refquota" requires no on-disk modifications.
-
-	logger.Infof(`Updated ZFS storage pool "%s"`, s.pool.Name)
-	return nil
-}
-
-func (s *storageZfs) StoragePoolVolumeUpdate(writable *api.StorageVolumePut, changedConfig []string) error {
-	if writable.Restore != "" {
-		logger.Infof(`Restoring ZFS storage volume "%s" from snapshot "%s"`,
-			s.volume.Name, writable.Restore)
-
-		// Check that we can remove the snapshot
-		poolID, err := s.s.Cluster.StoragePoolGetID(s.pool.Name)
-		if err != nil {
-			return err
-		}
-
-		// Get the names of all storage volume snapshots of a given volume
-		volumes, err := s.s.Cluster.StoragePoolVolumeSnapshotsGetType(s.volume.Name, storagePoolVolumeTypeCustom, poolID)
-		if err != nil {
-			return err
-		}
-
-		if volumes[len(volumes)-1].Name != fmt.Sprintf("%s/%s", s.volume.Name, writable.Restore) {
-			return fmt.Errorf("ZFS can only restore from the latest snapshot. Delete newer snapshots or copy the snapshot into a new volume instead")
-		}
-
-		s.volume.Description = writable.Description
-		s.volume.Config = writable.Config
-
-		targetSnapshotDataset := fmt.Sprintf("%s/custom/%s at snapshot-%s", s.getOnDiskPoolName(), s.volume.Name, writable.Restore)
-		msg, err := shared.RunCommand("zfs", "rollback", "-r", "-R", targetSnapshotDataset)
-		if err != nil {
-			logger.Errorf("Failed to rollback ZFS dataset: %s", msg)
-			return err
-		}
-
-		logger.Infof(`Restored ZFS storage volume "%s" from snapshot "%s"`,
-			s.volume.Name, writable.Restore)
-		return nil
-	}
-
-	logger.Infof(`Updating ZFS storage volume "%s"`, s.volume.Name)
-
-	changeable := changeableStoragePoolVolumeProperties["zfs"]
-	unchangeable := []string{}
-	for _, change := range changedConfig {
-		if !shared.StringInSlice(change, changeable) {
-			unchangeable = append(unchangeable, change)
-		}
-	}
-
-	if len(unchangeable) > 0 {
-		return updateStoragePoolVolumeError(unchangeable, "zfs")
-	}
-
-	if shared.StringInSlice("size", changedConfig) {
-		if s.volume.Type != storagePoolVolumeTypeNameCustom {
-			return updateStoragePoolVolumeError([]string{"size"}, "zfs")
-		}
-
-		if s.volume.Config["size"] != writable.Config["size"] {
-			size, err := units.ParseByteSizeString(writable.Config["size"])
-			if err != nil {
-				return err
-			}
-
-			err = s.StorageEntitySetQuota(storagePoolVolumeTypeCustom, size, nil)
-			if err != nil {
-				return err
-			}
-		}
-	}
-
-	logger.Infof(`Updated ZFS storage volume "%s"`, s.volume.Name)
-	return nil
-}
-
-func (s *storageZfs) StoragePoolVolumeRename(newName string) error {
-	logger.Infof(`Renaming ZFS storage volume on storage pool "%s" from "%s" to "%s`,
-		s.pool.Name, s.volume.Name, newName)
-
-	usedBy, err := storagePoolVolumeUsedByInstancesGet(s.s, "default", s.pool.Name, s.volume.Name)
-	if err != nil {
-		return err
-	}
-	if len(usedBy) > 0 {
-		return fmt.Errorf(`ZFS storage volume "%s" on storage pool "%s" is attached to containers`,
-			s.volume.Name, s.pool.Name)
-	}
-
-	isSnapshot := shared.IsSnapshot(s.volume.Name)
-
-	var oldPath string
-	var newPath string
-
-	if isSnapshot {
-		oldPath = fmt.Sprintf("custom-snapshots/%s", s.volume.Name)
-		newPath = fmt.Sprintf("custom-snapshots/%s", newName)
-	} else {
-		oldPath = fmt.Sprintf("custom/%s", s.volume.Name)
-		newPath = fmt.Sprintf("custom/%s", newName)
-	}
-	poolName := s.getOnDiskPoolName()
-	err = zfsPoolVolumeRename(poolName, oldPath, newPath, false)
-	if err != nil {
-		return err
-	}
-
-	logger.Infof(`Renamed ZFS storage volume on storage pool "%s" from "%s" to "%s`,
-		s.pool.Name, s.volume.Name, newName)
-
-	return s.s.Cluster.StoragePoolVolumeRename("default", s.volume.Name, newName,
-		storagePoolVolumeTypeCustom, s.poolID)
-}
-
-// Things we don't need to care about
-func (s *storageZfs) ContainerMount(c instance.Instance) (bool, error) {
-	return s.doContainerMount(c.Project(), c.Name(), c.IsPrivileged())
-}
-
-func (s *storageZfs) ContainerUmount(c instance.Instance, path string) (bool, error) {
-	logger.Debugf("Unmounting ZFS storage volume for container \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-	name := c.Name()
-
-	fs := fmt.Sprintf("containers/%s", project.Prefix(c.Project(), name))
-	containerPoolVolumeMntPoint := driver.GetContainerMountPoint(c.Project(), s.pool.Name, name)
-
-	containerUmountLockID := getContainerUmountLockID(s.pool.Name, name)
-	lxdStorageMapLock.Lock()
-	if waitChannel, ok := lxdStorageOngoingOperationMap[containerUmountLockID]; ok {
-		lxdStorageMapLock.Unlock()
-		if _, ok := <-waitChannel; ok {
-			logger.Warnf("Received value over semaphore, this should not have happened")
-		}
-		// Give the benefit of the doubt and assume that the other
-		// thread actually succeeded in unmounting the storage volume.
-		return false, nil
-	}
-
-	lxdStorageOngoingOperationMap[containerUmountLockID] = make(chan bool)
-	lxdStorageMapLock.Unlock()
-
-	var imgerr error
-	ourUmount := false
-	if shared.IsMountPoint(containerPoolVolumeMntPoint) {
-		imgerr = zfsUmount(s.getOnDiskPoolName(), fs, containerPoolVolumeMntPoint)
-		ourUmount = true
-	}
-
-	lxdStorageMapLock.Lock()
-	if waitChannel, ok := lxdStorageOngoingOperationMap[containerUmountLockID]; ok {
-		close(waitChannel)
-		delete(lxdStorageOngoingOperationMap, containerUmountLockID)
-	}
-	lxdStorageMapLock.Unlock()
-
-	if imgerr != nil {
-		return false, imgerr
-	}
-
-	logger.Debugf("Unmounted ZFS storage volume for container \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-	return ourUmount, nil
-}
-
-// Things we do have to care about
-func (s *storageZfs) ContainerStorageReady(container instance.Instance) bool {
-	volumeName := project.Prefix(container.Project(), container.Name())
-	fs := fmt.Sprintf("containers/%s", volumeName)
-	return zfsFilesystemEntityExists(s.getOnDiskPoolName(), fs)
-}
-
-func (s *storageZfs) ContainerCreate(container instance.Instance) error {
-	err := s.doContainerCreate(container.Project(), container.Name(), container.IsPrivileged())
-	if err != nil {
-		s.doContainerDelete(container.Project(), container.Name())
-		return err
-	}
-
-	ourMount, err := s.ContainerMount(container)
-	if err != nil {
-		return err
-	}
-	if ourMount {
-		defer s.ContainerUmount(container, container.Path())
-	}
-
-	err = container.DeferTemplateApply("create")
-	if err != nil {
-		return err
-	}
-
-	return nil
-}
-
-func (s *storageZfs) ContainerCreateFromImage(container instance.Instance, fingerprint string, tracker *ioprogress.ProgressTracker) error {
-	logger.Debugf("Creating ZFS storage volume for container \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-
-	containerPath := container.Path()
-	containerName := container.Name()
-	volumeName := project.Prefix(container.Project(), containerName)
-	fs := fmt.Sprintf("containers/%s", volumeName)
-	containerPoolVolumeMntPoint := driver.GetContainerMountPoint(container.Project(), s.pool.Name, containerName)
-
-	poolName := s.getOnDiskPoolName()
-	fsImage := fmt.Sprintf("images/%s", fingerprint)
-
-	imageStoragePoolLockID := getImageCreateLockID(s.pool.Name, fingerprint)
-	lxdStorageMapLock.Lock()
-	if waitChannel, ok := lxdStorageOngoingOperationMap[imageStoragePoolLockID]; ok {
-		lxdStorageMapLock.Unlock()
-		if _, ok := <-waitChannel; ok {
-			logger.Warnf("Received value over semaphore, this should not have happened")
-		}
-	} else {
-		lxdStorageOngoingOperationMap[imageStoragePoolLockID] = make(chan bool)
-		lxdStorageMapLock.Unlock()
-
-		var imgerr error
-		if !zfsFilesystemEntityExists(poolName, fmt.Sprintf("%s at readonly", fsImage)) {
-			imgerr = s.ImageCreate(fingerprint, tracker)
-		}
-
-		lxdStorageMapLock.Lock()
-		if waitChannel, ok := lxdStorageOngoingOperationMap[imageStoragePoolLockID]; ok {
-			close(waitChannel)
-			delete(lxdStorageOngoingOperationMap, imageStoragePoolLockID)
-		}
-		lxdStorageMapLock.Unlock()
-
-		if imgerr != nil {
-			return imgerr
-		}
-	}
-
-	err := zfsPoolVolumeClone(container.Project(), poolName, fsImage, "readonly", fs, containerPoolVolumeMntPoint)
-	if err != nil {
-		return err
-	}
-
-	revert := true
-	defer func() {
-		if !revert {
-			return
-		}
-		s.ContainerDelete(container)
-	}()
-
-	ourMount, err := s.ContainerMount(container)
-	if err != nil {
-		return err
-	}
-	if ourMount {
-		defer s.ContainerUmount(container, containerPath)
-	}
-
-	privileged := container.IsPrivileged()
-	err = driver.CreateContainerMountpoint(containerPoolVolumeMntPoint, containerPath, privileged)
-	if err != nil {
-		return err
-	}
-
-	err = container.DeferTemplateApply("create")
-	if err != nil {
-		return err
-	}
-
-	revert = false
-
-	logger.Debugf("Created ZFS storage volume for container \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-	return nil
-}
-
-func (s *storageZfs) ContainerDelete(container instance.Instance) error {
-	err := s.doContainerDelete(container.Project(), container.Name())
-	if err != nil {
-		return err
-	}
-
-	return nil
-}
-
-func (s *storageZfs) copyWithoutSnapshotsSparse(target instance.Instance, source instance.Instance) error {
-	poolName := s.getOnDiskPoolName()
-
-	sourceContainerName := source.Name()
-	sourceContainerPath := source.Path()
-
-	targetContainerName := target.Name()
-	targetContainerPath := target.Path()
-	targetContainerMountPoint := driver.GetContainerMountPoint(target.Project(), s.pool.Name, targetContainerName)
-
-	sourceZfsDataset := ""
-	sourceZfsDatasetSnapshot := ""
-	sourceName, sourceSnapOnlyName, isSnapshotName := shared.InstanceGetParentAndSnapshotName(sourceContainerName)
-
-	targetZfsDataset := fmt.Sprintf("containers/%s", project.Prefix(target.Project(), targetContainerName))
-
-	if isSnapshotName {
-		sourceZfsDatasetSnapshot = sourceSnapOnlyName
-	}
-
-	revert := true
-	if sourceZfsDatasetSnapshot == "" {
-		if zfsFilesystemEntityExists(poolName, fmt.Sprintf("containers/%s", project.Prefix(source.Project(), sourceName))) {
-			sourceZfsDatasetSnapshot = fmt.Sprintf("copy-%s", uuid.NewRandom().String())
-			sourceZfsDataset = fmt.Sprintf("containers/%s", project.Prefix(source.Project(), sourceName))
-			err := zfsPoolVolumeSnapshotCreate(poolName, sourceZfsDataset, sourceZfsDatasetSnapshot)
-			if err != nil {
-				return err
-			}
-			defer func() {
-				if !revert {
-					return
-				}
-				zfsPoolVolumeSnapshotDestroy(poolName, sourceZfsDataset, sourceZfsDatasetSnapshot)
-			}()
-		}
-	} else {
-		if zfsFilesystemEntityExists(poolName, fmt.Sprintf("containers/%s at snapshot-%s", project.Prefix(source.Project(), sourceName), sourceZfsDatasetSnapshot)) {
-			sourceZfsDataset = fmt.Sprintf("containers/%s", project.Prefix(source.Project(), sourceName))
-			sourceZfsDatasetSnapshot = fmt.Sprintf("snapshot-%s", sourceZfsDatasetSnapshot)
-		}
-	}
-
-	if sourceZfsDataset != "" {
-		err := zfsPoolVolumeClone(target.Project(), poolName, sourceZfsDataset, sourceZfsDatasetSnapshot, targetZfsDataset, targetContainerMountPoint)
-		if err != nil {
-			return err
-		}
-		defer func() {
-			if !revert {
-				return
-			}
-			zfsPoolVolumeDestroy(poolName, targetZfsDataset)
-		}()
-
-		ourMount, err := s.ContainerMount(target)
-		if err != nil {
-			return err
-		}
-		if ourMount {
-			defer s.ContainerUmount(target, targetContainerPath)
-		}
-
-		err = driver.CreateContainerMountpoint(targetContainerMountPoint, targetContainerPath, target.IsPrivileged())
-		if err != nil {
-			return err
-		}
-		defer func() {
-			if !revert {
-				return
-			}
-			deleteContainerMountpoint(targetContainerMountPoint, targetContainerPath, s.GetStorageTypeName())
-		}()
-	} else {
-		err := s.ContainerCreate(target)
-		if err != nil {
-			return err
-		}
-		defer func() {
-			if !revert {
-				return
-			}
-			s.ContainerDelete(target)
-		}()
-
-		bwlimit := s.pool.Config["rsync.bwlimit"]
-		output, err := rsync.LocalCopy(sourceContainerPath, targetContainerPath, bwlimit, true)
-		if err != nil {
-			return fmt.Errorf("rsync failed: %s", string(output))
-		}
-	}
-
-	revert = false
-
-	return nil
-}
-
-func (s *storageZfs) copyWithoutSnapshotFull(target instance.Instance, source instance.Instance) error {
-	logger.Debugf("Creating full ZFS copy \"%s\" to \"%s\"", source.Name(), target.Name())
-
-	sourceIsSnapshot := source.IsSnapshot()
-	poolName := s.getOnDiskPoolName()
-
-	sourceName := source.Name()
-	sourceDataset := ""
-	snapshotSuffix := ""
-
-	targetName := target.Name()
-	targetDataset := fmt.Sprintf("%s/containers/%s", poolName, project.Prefix(target.Project(), targetName))
-	targetSnapshotDataset := ""
-
-	if sourceIsSnapshot {
-		sourceParentName, sourceSnapOnlyName, _ := shared.InstanceGetParentAndSnapshotName(source.Name())
-		snapshotSuffix = fmt.Sprintf("snapshot-%s", sourceSnapOnlyName)
-		sourceDataset = fmt.Sprintf("%s/containers/%s@%s", poolName, project.Prefix(source.Project(), sourceParentName), snapshotSuffix)
-		targetSnapshotDataset = fmt.Sprintf("%s/containers/%s at snapshot-%s", poolName, project.Prefix(target.Project(), targetName), sourceSnapOnlyName)
-	} else {
-		snapshotSuffix = uuid.NewRandom().String()
-		sourceDataset = fmt.Sprintf("%s/containers/%s@%s", poolName, project.Prefix(source.Project(), sourceName), snapshotSuffix)
-		targetSnapshotDataset = fmt.Sprintf("%s/containers/%s@%s", poolName, project.Prefix(target.Project(), targetName), snapshotSuffix)
-
-		fs := fmt.Sprintf("containers/%s", project.Prefix(source.Project(), sourceName))
-		err := zfsPoolVolumeSnapshotCreate(poolName, fs, snapshotSuffix)
-		if err != nil {
-			return err
-		}
-		defer func() {
-			err := zfsPoolVolumeSnapshotDestroy(poolName, fs, snapshotSuffix)
-			if err != nil {
-				logger.Warnf("Failed to delete temporary ZFS snapshot \"%s\", manual cleanup needed", sourceDataset)
-			}
-		}()
-	}
-
-	zfsSendCmd := exec.Command("zfs", "send", sourceDataset)
-
-	zfsRecvCmd := exec.Command("zfs", "receive", targetDataset)
-
-	zfsRecvCmd.Stdin, _ = zfsSendCmd.StdoutPipe()
-	zfsRecvCmd.Stdout = os.Stdout
-	zfsRecvCmd.Stderr = os.Stderr
-
-	err := zfsRecvCmd.Start()
-	if err != nil {
-		return err
-	}
-
-	err = zfsSendCmd.Run()
-	if err != nil {
-		return err
-	}
-
-	err = zfsRecvCmd.Wait()
-	if err != nil {
-		return err
-	}
-
-	msg, err := shared.RunCommand("zfs", "rollback", "-r", "-R", targetSnapshotDataset)
-	if err != nil {
-		logger.Errorf("Failed to rollback ZFS dataset: %s", msg)
-		return err
-	}
-
-	targetContainerMountPoint := driver.GetContainerMountPoint(target.Project(), s.pool.Name, targetName)
-	targetfs := fmt.Sprintf("containers/%s", project.Prefix(target.Project(), targetName))
-
-	err = zfsPoolVolumeSet(poolName, targetfs, "canmount", "noauto")
-	if err != nil {
-		return err
-	}
-
-	err = zfsPoolVolumeSet(poolName, targetfs, "mountpoint", targetContainerMountPoint)
-	if err != nil {
-		return err
-	}
-
-	err = zfsPoolVolumeSnapshotDestroy(poolName, targetfs, snapshotSuffix)
-	if err != nil {
-		return err
-	}
-
-	ourMount, err := s.ContainerMount(target)
-	if err != nil {
-		return err
-	}
-	if ourMount {
-		defer s.ContainerUmount(target, targetContainerMountPoint)
-	}
-
-	err = driver.CreateContainerMountpoint(targetContainerMountPoint, target.Path(), target.IsPrivileged())
-	if err != nil {
-		return err
-	}
-
-	logger.Debugf("Created full ZFS copy \"%s\" to \"%s\"", source.Name(), target.Name())
-	return nil
-}
-
-func (s *storageZfs) copyWithSnapshots(target instance.Instance, source instance.Instance, parentSnapshot string) error {
-	sourceName := source.Name()
-	targetParentName, targetSnapOnlyName, _ := shared.InstanceGetParentAndSnapshotName(target.Name())
-	containersPath := driver.GetSnapshotMountPoint(target.Project(), s.pool.Name, targetParentName)
-	snapshotMntPointSymlinkTarget := shared.VarPath("storage-pools", s.pool.Name, "containers-snapshots", project.Prefix(target.Project(), targetParentName))
-	snapshotMntPointSymlink := shared.VarPath("snapshots", project.Prefix(target.Project(), targetParentName))
-	err := driver.CreateSnapshotMountpoint(containersPath, snapshotMntPointSymlinkTarget, snapshotMntPointSymlink)
-	if err != nil {
-		return err
-	}
-
-	poolName := s.getOnDiskPoolName()
-	sourceParentName, sourceSnapOnlyName, _ := shared.InstanceGetParentAndSnapshotName(sourceName)
-	currentSnapshotDataset := fmt.Sprintf("%s/containers/%s at snapshot-%s", poolName, project.Prefix(source.Project(), sourceParentName), sourceSnapOnlyName)
-	args := []string{"send", currentSnapshotDataset}
-	if parentSnapshot != "" {
-		parentName, parentSnaponlyName, _ := shared.InstanceGetParentAndSnapshotName(parentSnapshot)
-		parentSnapshotDataset := fmt.Sprintf("%s/containers/%s at snapshot-%s", poolName, project.Prefix(source.Project(), parentName), parentSnaponlyName)
-		args = append(args, "-i", parentSnapshotDataset)
-	}
-
-	zfsSendCmd := exec.Command("zfs", args...)
-	targetSnapshotDataset := fmt.Sprintf("%s/containers/%s at snapshot-%s", poolName, project.Prefix(target.Project(), targetParentName), targetSnapOnlyName)
-	zfsRecvCmd := exec.Command("zfs", "receive", "-F", targetSnapshotDataset)
-
-	zfsRecvCmd.Stdin, _ = zfsSendCmd.StdoutPipe()
-	zfsRecvCmd.Stdout = os.Stdout
-	zfsRecvCmd.Stderr = os.Stderr
-
-	err = zfsRecvCmd.Start()
-	if err != nil {
-		return err
-	}
-
-	err = zfsSendCmd.Run()
-	if err != nil {
-		return err
-	}
-
-	err = zfsRecvCmd.Wait()
-	if err != nil {
-		return err
-	}
-
-	return nil
-}
-
-func (s *storageZfs) doCrossPoolContainerCopy(target instance.Instance, source instance.Instance, containerOnly bool, refresh bool, refreshSnapshots []instance.Instance) error {
-	sourcePool, err := source.StoragePool()
-	if err != nil {
-		return err
-	}
-
-	// setup storage for the source volume
-	srcStorage, err := storagePoolVolumeInit(s.s, "default", sourcePool, source.Name(), storagePoolVolumeTypeContainer)
-	if err != nil {
-		return err
-	}
-
-	ourMount, err := srcStorage.StoragePoolMount()
-	if err != nil {
-		return err
-	}
-	if ourMount {
-		defer srcStorage.StoragePoolUmount()
-	}
-
-	targetPool, err := target.StoragePool()
-	if err != nil {
-		return err
-	}
-
-	var snapshots []instance.Instance
-
-	if refresh {
-		snapshots = refreshSnapshots
-	} else {
-		snapshots, err = source.Snapshots()
-		if err != nil {
-			return err
-		}
-
-		// create the main container
-		err = s.doContainerCreate(target.Project(), target.Name(), target.IsPrivileged())
-		if err != nil {
-			return err
-		}
-	}
-
-	_, err = s.doContainerMount(target.Project(), target.Name(), target.IsPrivileged())
-	if err != nil {
-		return err
-	}
-	defer s.ContainerUmount(target, shared.VarPath("containers", project.Prefix(target.Project(), target.Name())))
-
-	destContainerMntPoint := driver.GetContainerMountPoint(target.Project(), targetPool, target.Name())
-	bwlimit := s.pool.Config["rsync.bwlimit"]
-	if !containerOnly {
-		for _, snap := range snapshots {
-			srcSnapshotMntPoint := driver.GetSnapshotMountPoint(target.Project(), sourcePool, snap.Name())
-			_, err = rsync.LocalCopy(srcSnapshotMntPoint, destContainerMntPoint, bwlimit, true)
-			if err != nil {
-				logger.Errorf("Failed to rsync into ZFS storage volume \"%s\" on storage pool \"%s\": %s", s.volume.Name, s.pool.Name, err)
-				return err
-			}
-
-			// create snapshot
-			_, snapOnlyName, _ := shared.InstanceGetParentAndSnapshotName(snap.Name())
-			err = s.doContainerSnapshotCreate(snap.Project(), fmt.Sprintf("%s/%s", target.Name(), snapOnlyName), target.Name())
-			if err != nil {
-				return err
-			}
-		}
-	}
-
-	srcContainerMntPoint := driver.GetContainerMountPoint(source.Project(), sourcePool, source.Name())
-	_, err = rsync.LocalCopy(srcContainerMntPoint, destContainerMntPoint, bwlimit, true)
-	if err != nil {
-		logger.Errorf("Failed to rsync into ZFS storage volume \"%s\" on storage pool \"%s\": %s", s.volume.Name, s.pool.Name, err)
-		return err
-	}
-
-	return nil
-}
-
-func (s *storageZfs) ContainerCopy(target instance.Instance, source instance.Instance, containerOnly bool) error {
-	logger.Debugf("Copying ZFS container storage %s to %s", source.Name(), target.Name())
-
-	ourStart, err := source.StorageStart()
-	if err != nil {
-		return err
-	}
-	if ourStart {
-		defer source.StorageStop()
-	}
-
-	if source.Type() != instancetype.Container {
-		return fmt.Errorf("Source Instance type must be container")
-	}
-
-	if target.Type() != instancetype.Container {
-		return fmt.Errorf("Target Instance type must be container")
-	}
-
-	srcCt := source.(*containerLXC)
-	targetCt := target.(*containerLXC)
-
-	sourcePool, err := srcCt.StoragePool()
-	if err != nil {
-		return err
-	}
-
-	targetPool, err := targetCt.StoragePool()
-	if err != nil {
-		return err
-	}
-
-	if sourcePool != targetPool {
-		err := s.doCrossPoolContainerCopy(target, source, containerOnly, false, nil)
-		if err != nil {
-			return err
-		}
-
-		return target.DeferTemplateApply("copy")
-	}
-
-	snapshots, err := source.Snapshots()
-	if err != nil {
-		return err
-	}
-
-	if containerOnly || len(snapshots) == 0 {
-		if s.pool.Config["zfs.clone_copy"] != "" && !shared.IsTrue(s.pool.Config["zfs.clone_copy"]) {
-			err = s.copyWithoutSnapshotFull(target, source)
-			if err != nil {
-				return err
-			}
-		} else {
-			err = s.copyWithoutSnapshotsSparse(target, source)
-			if err != nil {
-				return err
-			}
-		}
-	} else {
-		targetContainerName := target.Name()
-		targetContainerPath := target.Path()
-		targetContainerMountPoint := driver.GetContainerMountPoint(target.Project(), s.pool.Name, targetContainerName)
-		err = driver.CreateContainerMountpoint(targetContainerMountPoint, targetContainerPath, target.IsPrivileged())
-		if err != nil {
-			return err
-		}
-
-		prev := ""
-		prevSnapOnlyName := ""
-		for i, snap := range snapshots {
-			if i > 0 {
-				prev = snapshots[i-1].Name()
-			}
-
-			sourceSnapshot, err := instance.LoadByProjectAndName(s.s, source.Project(), snap.Name())
-			if err != nil {
-				return err
-			}
-
-			_, snapOnlyName, _ := shared.InstanceGetParentAndSnapshotName(snap.Name())
-			prevSnapOnlyName = snapOnlyName
-			newSnapName := fmt.Sprintf("%s/%s", target.Name(), snapOnlyName)
-			targetSnapshot, err := instance.LoadByProjectAndName(s.s, target.Project(), newSnapName)
-			if err != nil {
-				return err
-			}
-
-			err = s.copyWithSnapshots(targetSnapshot, sourceSnapshot, prev)
-			if err != nil {
-				return err
-			}
-		}
-
-		poolName := s.getOnDiskPoolName()
-
-		// send actual container
-		tmpSnapshotName := fmt.Sprintf("copy-send-%s", uuid.NewRandom().String())
-		err = zfsPoolVolumeSnapshotCreate(poolName, fmt.Sprintf("containers/%s", project.Prefix(source.Project(), source.Name())), tmpSnapshotName)
-		if err != nil {
-			return err
-		}
-
-		currentSnapshotDataset := fmt.Sprintf("%s/containers/%s@%s", poolName, project.Prefix(source.Project(), source.Name()), tmpSnapshotName)
-		args := []string{"send", currentSnapshotDataset}
-		if prevSnapOnlyName != "" {
-			parentSnapshotDataset := fmt.Sprintf("%s/containers/%s at snapshot-%s", poolName, project.Prefix(source.Project(), source.Name()), prevSnapOnlyName)
-			args = append(args, "-i", parentSnapshotDataset)
-		}
-
-		zfsSendCmd := exec.Command("zfs", args...)
-		targetSnapshotDataset := fmt.Sprintf("%s/containers/%s@%s", poolName, project.Prefix(target.Project(), target.Name()), tmpSnapshotName)
-		zfsRecvCmd := exec.Command("zfs", "receive", "-F", targetSnapshotDataset)
-
-		zfsRecvCmd.Stdin, _ = zfsSendCmd.StdoutPipe()
-		zfsRecvCmd.Stdout = os.Stdout
-		zfsRecvCmd.Stderr = os.Stderr
-
-		err = zfsRecvCmd.Start()
-		if err != nil {
-			return err
-		}
-
-		err = zfsSendCmd.Run()
-		if err != nil {
-			return err
-		}
-
-		err = zfsRecvCmd.Wait()
-		if err != nil {
-			return err
-		}
-
-		zfsPoolVolumeSnapshotDestroy(poolName, fmt.Sprintf("containers/%s", project.Prefix(source.Project(), source.Name())), tmpSnapshotName)
-		zfsPoolVolumeSnapshotDestroy(poolName, fmt.Sprintf("containers/%s", project.Prefix(target.Project(), target.Name())), tmpSnapshotName)
-
-		fs := fmt.Sprintf("containers/%s", project.Prefix(target.Project(), target.Name()))
-		err = zfsPoolVolumeSet(poolName, fs, "canmount", "noauto")
-		if err != nil {
-			return err
-		}
-
-		err = zfsPoolVolumeSet(poolName, fs, "mountpoint", targetContainerMountPoint)
-		if err != nil {
-			return err
-		}
-	}
-
-	err = target.DeferTemplateApply("copy")
-	if err != nil {
-		return err
-	}
-
-	logger.Debugf("Copied ZFS container storage %s to %s", source.Name(), target.Name())
-	return nil
-}
-
-func (s *storageZfs) ContainerRefresh(target instance.Instance, source instance.Instance, snapshots []instance.Instance) error {
-	logger.Debugf("Refreshing ZFS container storage for %s from %s", target.Name(), source.Name())
-
-	ourStart, err := source.StorageStart()
-	if err != nil {
-		return err
-	}
-	if ourStart {
-		defer source.StorageStop()
-	}
-
-	return s.doCrossPoolContainerCopy(target, source, len(snapshots) == 0, true, snapshots)
-}
-
-func (s *storageZfs) ContainerRename(container instance.Instance, newName string) error {
-	logger.Debugf("Renaming ZFS storage volume for container \"%s\" from %s to %s", s.volume.Name, s.volume.Name, newName)
-
-	poolName := s.getOnDiskPoolName()
-	oldName := container.Name()
-
-	// Unmount the dataset.
-	_, err := s.ContainerUmount(container, "")
-	if err != nil {
-		return err
-	}
-
-	// Rename the dataset.
-	oldZfsDataset := fmt.Sprintf("containers/%s", project.Prefix(container.Project(), oldName))
-	newZfsDataset := fmt.Sprintf("containers/%s", project.Prefix(container.Project(), newName))
-	err = zfsPoolVolumeRename(poolName, oldZfsDataset, newZfsDataset, false)
-	if err != nil {
-		return err
-	}
-	revert := true
-	defer func() {
-		if !revert {
-			return
-		}
-		s.ContainerRename(container, oldName)
-	}()
-
-	// Set the new mountpoint for the dataset.
-	newContainerMntPoint := driver.GetContainerMountPoint(container.Project(), s.pool.Name, newName)
-	err = zfsPoolVolumeSet(poolName, newZfsDataset, "mountpoint", newContainerMntPoint)
-	if err != nil {
-		return err
-	}
-
-	// Unmount the dataset.
-	container.(*containerLXC).name = newName
-	_, err = s.ContainerUmount(container, "")
-	if err != nil {
-		return err
-	}
-
-	// Create new mountpoint on the storage pool.
-	oldContainerMntPoint := driver.GetContainerMountPoint(container.Project(), s.pool.Name, oldName)
-	oldContainerMntPointSymlink := container.Path()
-	newContainerMntPointSymlink := shared.VarPath("containers", project.Prefix(container.Project(), newName))
-	err = renameContainerMountpoint(oldContainerMntPoint, oldContainerMntPointSymlink, newContainerMntPoint, newContainerMntPointSymlink)
-	if err != nil {
-		return err
-	}
-
-	// Rename the snapshot mountpoint on the storage pool.
-	oldSnapshotMntPoint := driver.GetSnapshotMountPoint(container.Project(), s.pool.Name, oldName)
-	newSnapshotMntPoint := driver.GetSnapshotMountPoint(container.Project(), s.pool.Name, newName)
-	if shared.PathExists(oldSnapshotMntPoint) {
-		err := os.Rename(oldSnapshotMntPoint, newSnapshotMntPoint)
-		if err != nil {
-			return err
-		}
-	}
-
-	// Remove old symlink.
-	oldSnapshotPath := shared.VarPath("snapshots", project.Prefix(container.Project(), oldName))
-	if shared.PathExists(oldSnapshotPath) {
-		err := os.Remove(oldSnapshotPath)
-		if err != nil {
-			return err
-		}
-	}
-
-	// Create new symlink.
-	newSnapshotPath := shared.VarPath("snapshots", project.Prefix(container.Project(), newName))
-	if shared.PathExists(newSnapshotPath) {
-		err := os.Symlink(newSnapshotMntPoint, newSnapshotPath)
-		if err != nil {
-			return err
-		}
-	}
-
-	revert = false
-
-	logger.Debugf("Renamed ZFS storage volume for container \"%s\" from %s to %s", s.volume.Name, s.volume.Name, newName)
-	return nil
-}
-
-func (s *storageZfs) ContainerRestore(target instance.Instance, source instance.Instance) error {
-	logger.Debugf("Restoring ZFS storage volume for container \"%s\" from %s to %s", s.volume.Name, source.Name(), target.Name())
-
-	snaps, err := target.Snapshots()
-	if err != nil {
-		return err
-	}
-
-	if snaps[len(snaps)-1].Name() != source.Name() {
-		if s.pool.Config["volume.zfs.remove_snapshots"] != "" {
-			zfsRemoveSnapshots = s.pool.Config["volume.zfs.remove_snapshots"]
-		}
-
-		if s.volume.Config["zfs.remove_snapshots"] != "" {
-			zfsRemoveSnapshots = s.volume.Config["zfs.remove_snapshots"]
-		}
-
-		if !shared.IsTrue(zfsRemoveSnapshots) {
-			return fmt.Errorf("ZFS can only restore from the latest snapshot. Delete newer snapshots or copy the snapshot into a new container instead")
-		}
-	}
-
-	// Start storage for source container
-	ourSourceStart, err := source.StorageStart()
-	if err != nil {
-		return err
-	}
-	if ourSourceStart {
-		defer source.StorageStop()
-	}
-
-	// Start storage for target container
-	ourTargetStart, err := target.StorageStart()
-	if err != nil {
-		return err
-	}
-	if ourTargetStart {
-		defer target.StorageStop()
-	}
-
-	for i := len(snaps) - 1; i != 0; i-- {
-		if snaps[i].Name() == source.Name() {
-			break
-		}
-
-		err := snaps[i].Delete()
-		if err != nil {
-			return err
-		}
-	}
-
-	// Restore the snapshot
-	cName, snapOnlyName, _ := shared.InstanceGetParentAndSnapshotName(source.Name())
-	snapName := fmt.Sprintf("snapshot-%s", snapOnlyName)
-
-	err = zfsPoolVolumeSnapshotRestore(s.getOnDiskPoolName(), fmt.Sprintf("containers/%s", project.Prefix(source.Project(), cName)), snapName)
-	if err != nil {
-		return err
-	}
-
-	logger.Debugf("Restored ZFS storage volume for container \"%s\" from %s to %s", s.volume.Name, source.Name(), target.Name())
-	return nil
-}
-
-func (s *storageZfs) ContainerGetUsage(container instance.Instance) (int64, error) {
-	var err error
-
-	fs := fmt.Sprintf("containers/%s", project.Prefix(container.Project(), container.Name()))
-
-	property := "used"
-
-	if s.pool.Config["volume.zfs.use_refquota"] != "" {
-		zfsUseRefquota = s.pool.Config["volume.zfs.use_refquota"]
-	}
-	if s.volume.Config["zfs.use_refquota"] != "" {
-		zfsUseRefquota = s.volume.Config["zfs.use_refquota"]
-	}
-
-	if shared.IsTrue(zfsUseRefquota) {
-		property = "referenced"
-	}
-
-	// Shortcut for refquota
-	mountpoint := driver.GetContainerMountPoint(container.Project(), s.pool.Name, container.Name())
-	if property == "referenced" && shared.IsMountPoint(mountpoint) {
-		var stat unix.Statfs_t
-		err := unix.Statfs(mountpoint, &stat)
-		if err != nil {
-			return -1, err
-		}
-
-		return int64(stat.Blocks-stat.Bfree) * int64(stat.Bsize), nil
-	}
-
-	value, err := zfsFilesystemEntityPropertyGet(s.getOnDiskPoolName(), fs, property)
-	if err != nil {
-		return -1, err
-	}
-
-	valueInt, err := strconv.ParseInt(value, 10, 64)
-	if err != nil {
-		return -1, err
-	}
-
-	return valueInt, nil
-}
-
-func (s *storageZfs) doContainerSnapshotCreate(projectName, targetName string, sourceName string) error {
-	snapshotContainerName := targetName
-	logger.Debugf("Creating ZFS storage volume for snapshot \"%s\" on storage pool \"%s\"", snapshotContainerName, s.pool.Name)
-
-	sourceContainerName := sourceName
-
-	cName, snapshotSnapOnlyName, _ := shared.InstanceGetParentAndSnapshotName(snapshotContainerName)
-	snapName := fmt.Sprintf("snapshot-%s", snapshotSnapOnlyName)
-
-	sourceZfsDataset := fmt.Sprintf("containers/%s", project.Prefix(projectName, cName))
-	err := zfsPoolVolumeSnapshotCreate(s.getOnDiskPoolName(), sourceZfsDataset, snapName)
-	if err != nil {
-		return err
-	}
-
-	snapshotMntPoint := driver.GetSnapshotMountPoint(projectName, s.pool.Name, snapshotContainerName)
-	if !shared.PathExists(snapshotMntPoint) {
-		err := os.MkdirAll(snapshotMntPoint, 0100)
-		if err != nil {
-			return err
-		}
-	}
-
-	snapshotMntPointSymlinkTarget := shared.VarPath("storage-pools", s.pool.Name, "containers-snapshots", project.Prefix(projectName, sourceName))
-	snapshotMntPointSymlink := shared.VarPath("snapshots", project.Prefix(projectName, sourceContainerName))
-	if !shared.PathExists(snapshotMntPointSymlink) {
-		err := os.Symlink(snapshotMntPointSymlinkTarget, snapshotMntPointSymlink)
-		if err != nil {
-			return err
-		}
-	}
-
-	logger.Debugf("Created ZFS storage volume for snapshot \"%s\" on storage pool \"%s\"", snapshotContainerName, s.pool.Name)
-	return nil
-}
-
-func (s *storageZfs) ContainerSnapshotCreate(snapshotContainer instance.Instance, sourceContainer instance.Instance) error {
-	err := s.doContainerSnapshotCreate(sourceContainer.Project(), snapshotContainer.Name(), sourceContainer.Name())
-	if err != nil {
-		s.ContainerSnapshotDelete(snapshotContainer)
-		return err
-	}
-	return nil
-}
-
-func zfsSnapshotDeleteInternal(projectName, poolName string, ctName string, onDiskPoolName string) error {
-	sourceContainerName, sourceContainerSnapOnlyName, _ := shared.InstanceGetParentAndSnapshotName(ctName)
-	snapName := fmt.Sprintf("snapshot-%s", sourceContainerSnapOnlyName)
-
-	if zfsFilesystemEntityExists(onDiskPoolName,
-		fmt.Sprintf("containers/%s@%s",
-			project.Prefix(projectName, sourceContainerName), snapName)) {
-		removable, err := zfsPoolVolumeSnapshotRemovable(onDiskPoolName,
-			fmt.Sprintf("containers/%s",
-				project.Prefix(projectName, sourceContainerName)),
-			snapName)
-		if err != nil {
-			return err
-		}
-
-		if removable {
-			err = zfsPoolVolumeSnapshotDestroy(onDiskPoolName,
-				fmt.Sprintf("containers/%s",
-					project.Prefix(projectName, sourceContainerName)),
-				snapName)
-		} else {
-			err = zfsPoolVolumeSnapshotRename(onDiskPoolName,
-				fmt.Sprintf("containers/%s",
-					project.Prefix(projectName, sourceContainerName)),
-				snapName,
-				fmt.Sprintf("copy-%s", uuid.NewRandom().String()))
-		}
-		if err != nil {
-			return err
-		}
-	}
-
-	// Delete the snapshot on its storage pool:
-	// ${POOL}/snapshots/<snapshot_name>
-	snapshotContainerMntPoint := driver.GetSnapshotMountPoint(projectName, poolName, ctName)
-	if shared.PathExists(snapshotContainerMntPoint) {
-		err := os.RemoveAll(snapshotContainerMntPoint)
-		if err != nil {
-			return err
-		}
-	}
-
-	// Check if we can remove the snapshot symlink:
-	// ${LXD_DIR}/snapshots/<container_name> to ${POOL}/snapshots/<container_name>
-	// by checking if the directory is empty.
-	snapshotContainerPath := driver.GetSnapshotMountPoint(projectName, poolName, sourceContainerName)
-	empty, _ := shared.PathIsEmpty(snapshotContainerPath)
-	if empty == true {
-		// Remove the snapshot directory for the container:
-		// ${POOL}/snapshots/<source_container_name>
-		err := os.Remove(snapshotContainerPath)
-		if err != nil {
-			return err
-		}
-
-		snapshotSymlink := shared.VarPath("snapshots", project.Prefix(projectName, sourceContainerName))
-		if shared.PathExists(snapshotSymlink) {
-			err := os.Remove(snapshotSymlink)
-			if err != nil {
-				return err
-			}
-		}
-	}
-
-	// Legacy
-	snapPath := shared.VarPath(fmt.Sprintf("snapshots/%s/%s.zfs", project.Prefix(projectName, sourceContainerName), sourceContainerSnapOnlyName))
-	if shared.PathExists(snapPath) {
-		err := os.Remove(snapPath)
-		if err != nil {
-			return err
-		}
-	}
-
-	// Legacy
-	parent := shared.VarPath(fmt.Sprintf("snapshots/%s", project.Prefix(projectName, sourceContainerName)))
-	if ok, _ := shared.PathIsEmpty(parent); ok {
-		err := os.Remove(parent)
-		if err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (s *storageZfs) ContainerSnapshotDelete(snapshotContainer instance.Instance) error {
-	logger.Debugf("Deleting ZFS storage volume for snapshot \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-
-	poolName := s.getOnDiskPoolName()
-	err := zfsSnapshotDeleteInternal(snapshotContainer.Project(), s.pool.Name, snapshotContainer.Name(),
-		poolName)
-	if err != nil {
-		return err
-	}
-
-	logger.Debugf("Deleted ZFS storage volume for snapshot \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-	return nil
-}
-
-func (s *storageZfs) ContainerSnapshotRename(snapshotContainer instance.Instance, newName string) error {
-	logger.Debugf("Renaming ZFS storage volume for snapshot \"%s\" from %s to %s", s.volume.Name, s.volume.Name, newName)
-
-	oldName := snapshotContainer.Name()
-
-	oldcName, oldSnapOnlyName, _ := shared.InstanceGetParentAndSnapshotName(snapshotContainer.Name())
-	oldZfsDatasetName := fmt.Sprintf("snapshot-%s", oldSnapOnlyName)
-
-	_, newSnapOnlyName, _ := shared.InstanceGetParentAndSnapshotName(newName)
-	newZfsDatasetName := fmt.Sprintf("snapshot-%s", newSnapOnlyName)
-
-	if oldZfsDatasetName != newZfsDatasetName {
-		err := zfsPoolVolumeSnapshotRename(
-			s.getOnDiskPoolName(), fmt.Sprintf("containers/%s", project.Prefix(snapshotContainer.Project(), oldcName)), oldZfsDatasetName, newZfsDatasetName)
-		if err != nil {
-			return err
-		}
-	}
-	revert := true
-	defer func() {
-		if !revert {
-			return
-		}
-		//s.ContainerSnapshotRename(snapshotContainer, oldName)
-	}()
-
-	oldStyleSnapshotMntPoint := shared.VarPath(fmt.Sprintf("snapshots/%s/%s.zfs", project.Prefix(snapshotContainer.Project(), oldcName), oldSnapOnlyName))
-	if shared.PathExists(oldStyleSnapshotMntPoint) {
-		err := os.Remove(oldStyleSnapshotMntPoint)
-		if err != nil {
-			return err
-		}
-	}
-
-	oldSnapshotMntPoint := driver.GetSnapshotMountPoint(snapshotContainer.Project(), s.pool.Name, oldName)
-	if shared.PathExists(oldSnapshotMntPoint) {
-		err := os.Remove(oldSnapshotMntPoint)
-		if err != nil {
-			return err
-		}
-	}
-
-	newSnapshotMntPoint := driver.GetSnapshotMountPoint(snapshotContainer.Project(), s.pool.Name, newName)
-	if !shared.PathExists(newSnapshotMntPoint) {
-		err := os.MkdirAll(newSnapshotMntPoint, 0100)
-		if err != nil {
-			return err
-		}
-	}
-
-	snapshotMntPointSymlinkTarget := shared.VarPath("storage-pools", s.pool.Name, "containers-snapshots", project.Prefix(snapshotContainer.Project(), oldcName))
-	snapshotMntPointSymlink := shared.VarPath("snapshots", project.Prefix(snapshotContainer.Project(), oldcName))
-	if !shared.PathExists(snapshotMntPointSymlink) {
-		err := os.Symlink(snapshotMntPointSymlinkTarget, snapshotMntPointSymlink)
-		if err != nil {
-			return err
-		}
-	}
-
-	revert = false
-
-	logger.Debugf("Renamed ZFS storage volume for snapshot \"%s\" from %s to %s", s.volume.Name, s.volume.Name, newName)
-	return nil
-}
-
-func (s *storageZfs) ContainerSnapshotStart(container instance.Instance) (bool, error) {
-	logger.Debugf("Initializing ZFS storage volume for snapshot \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-
-	cName, sName, _ := shared.InstanceGetParentAndSnapshotName(container.Name())
-	sourceFs := fmt.Sprintf("containers/%s", project.Prefix(container.Project(), cName))
-	sourceSnap := fmt.Sprintf("snapshot-%s", sName)
-	destFs := fmt.Sprintf("snapshots/%s/%s", project.Prefix(container.Project(), cName), sName)
-
-	poolName := s.getOnDiskPoolName()
-	snapshotMntPoint := driver.GetSnapshotMountPoint(container.Project(), s.pool.Name, container.Name())
-	err := zfsPoolVolumeClone(container.Project(), poolName, sourceFs, sourceSnap, destFs, snapshotMntPoint)
-	if err != nil {
-		return false, err
-	}
-
-	err = zfsMount(poolName, destFs)
-	if err != nil {
-		return false, err
-	}
-
-	logger.Debugf("Initialized ZFS storage volume for snapshot \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-	return true, nil
-}
-
-func (s *storageZfs) ContainerSnapshotStop(container instance.Instance) (bool, error) {
-	logger.Debugf("Stopping ZFS storage volume for snapshot \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-
-	cName, sName, _ := shared.InstanceGetParentAndSnapshotName(container.Name())
-	destFs := fmt.Sprintf("snapshots/%s/%s", project.Prefix(container.Project(), cName), sName)
-
-	err := zfsPoolVolumeDestroy(s.getOnDiskPoolName(), destFs)
-	if err != nil {
-		return false, err
-	}
-
-	logger.Debugf("Stopped ZFS storage volume for snapshot \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-	return true, nil
-}
-
-func (s *storageZfs) ContainerSnapshotCreateEmpty(snapshotContainer instance.Instance) error {
-	/* don't touch the fs yet, as migration will do that for us */
-	return nil
-}
-
-func (s *storageZfs) doContainerOnlyBackup(tmpPath string, backup backup.Backup, source instance.Instance) error {
-	sourceIsSnapshot := source.IsSnapshot()
-	poolName := s.getOnDiskPoolName()
-
-	sourceName := source.Name()
-	sourceDataset := ""
-	snapshotSuffix := ""
-
-	if sourceIsSnapshot {
-		sourceParentName, sourceSnapOnlyName, _ := shared.InstanceGetParentAndSnapshotName(source.Name())
-		snapshotSuffix = fmt.Sprintf("backup-%s", sourceSnapOnlyName)
-		sourceDataset = fmt.Sprintf("%s/containers/%s@%s", poolName, project.Prefix(source.Project(), sourceParentName), snapshotSuffix)
-	} else {
-		snapshotSuffix = uuid.NewRandom().String()
-		sourceDataset = fmt.Sprintf("%s/containers/%s@%s", poolName, project.Prefix(source.Project(), sourceName), snapshotSuffix)
-
-		fs := fmt.Sprintf("containers/%s", project.Prefix(source.Project(), sourceName))
-		err := zfsPoolVolumeSnapshotCreate(poolName, fs, snapshotSuffix)
-		if err != nil {
-			return err
-		}
-
-		defer func() {
-			err := zfsPoolVolumeSnapshotDestroy(poolName, fs, snapshotSuffix)
-			if err != nil {
-				logger.Warnf("Failed to delete temporary ZFS snapshot \"%s\", manual cleanup needed", sourceDataset)
-			}
-		}()
-	}
-
-	// Dump the container to a file
-	backupFile := fmt.Sprintf("%s/%s", tmpPath, "container.bin")
-	f, err := os.OpenFile(backupFile, os.O_RDWR|os.O_CREATE, 0644)
-	if err != nil {
-		return err
-	}
-	defer f.Close()
-
-	zfsSendCmd := exec.Command("zfs", "send", sourceDataset)
-	zfsSendCmd.Stdout = f
-	err = zfsSendCmd.Run()
-	if err != nil {
-		return err
-	}
-
-	return nil
-}
-
-func (s *storageZfs) doSnapshotBackup(tmpPath string, backup backup.Backup, source instance.Instance, parentSnapshot string) error {
-	sourceName := source.Name()
-	snapshotsPath := fmt.Sprintf("%s/snapshots", tmpPath)
-
-	// Create backup path for snapshots
-	err := os.MkdirAll(snapshotsPath, 0711)
-	if err != nil {
-		return err
-	}
-
-	poolName := s.getOnDiskPoolName()
-	sourceParentName, sourceSnapOnlyName, _ := shared.InstanceGetParentAndSnapshotName(sourceName)
-	currentSnapshotDataset := fmt.Sprintf("%s/containers/%s at snapshot-%s", poolName, project.Prefix(source.Project(), sourceParentName), sourceSnapOnlyName)
-	args := []string{"send", currentSnapshotDataset}
-	if parentSnapshot != "" {
-		parentName, parentSnaponlyName, _ := shared.InstanceGetParentAndSnapshotName(parentSnapshot)
-		parentSnapshotDataset := fmt.Sprintf("%s/containers/%s at snapshot-%s", poolName, project.Prefix(source.Project(), parentName), parentSnaponlyName)
-		args = append(args, "-i", parentSnapshotDataset)
-	}
-
-	backupFile := fmt.Sprintf("%s/%s.bin", snapshotsPath, sourceSnapOnlyName)
-	f, err := os.OpenFile(backupFile, os.O_RDWR|os.O_CREATE, 0644)
-	if err != nil {
-		return err
-	}
-	defer f.Close()
-
-	zfsSendCmd := exec.Command("zfs", args...)
-	zfsSendCmd.Stdout = f
-	return zfsSendCmd.Run()
-}
-
-func (s *storageZfs) doContainerBackupCreateOptimized(tmpPath string, backup backup.Backup, source instance.Instance) error {
-	// Handle snapshots
-	snapshots, err := source.Snapshots()
-	if err != nil {
-		return err
-	}
-
-	if backup.InstanceOnly() || len(snapshots) == 0 {
-		err = s.doContainerOnlyBackup(tmpPath, backup, source)
-	} else {
-		prev := ""
-		prevSnapOnlyName := ""
-		for i, snap := range snapshots {
-			if i > 0 {
-				prev = snapshots[i-1].Name()
-			}
-
-			sourceSnapshot, err := instance.LoadByProjectAndName(s.s, source.Project(), snap.Name())
-			if err != nil {
-				return err
-			}
-
-			_, snapOnlyName, _ := shared.InstanceGetParentAndSnapshotName(snap.Name())
-			prevSnapOnlyName = snapOnlyName
-			err = s.doSnapshotBackup(tmpPath, backup, sourceSnapshot, prev)
-			if err != nil {
-				return err
-			}
-		}
-
-		// Dump the container to a file
-		poolName := s.getOnDiskPoolName()
-		tmpSnapshotName := fmt.Sprintf("backup-%s", uuid.NewRandom().String())
-		err = zfsPoolVolumeSnapshotCreate(poolName, fmt.Sprintf("containers/%s", project.Prefix(source.Project(), source.Name())), tmpSnapshotName)
-		if err != nil {
-			return err
-		}
-
-		currentSnapshotDataset := fmt.Sprintf("%s/containers/%s@%s", poolName, project.Prefix(source.Project(), source.Name()), tmpSnapshotName)
-		args := []string{"send", currentSnapshotDataset}
-		if prevSnapOnlyName != "" {
-			parentSnapshotDataset := fmt.Sprintf("%s/containers/%s at snapshot-%s", poolName, project.Prefix(source.Project(), source.Name()), prevSnapOnlyName)
-			args = append(args, "-i", parentSnapshotDataset)
-		}
-
-		backupFile := fmt.Sprintf("%s/container.bin", tmpPath)
-		f, err := os.OpenFile(backupFile, os.O_RDWR|os.O_CREATE, 0644)
-		if err != nil {
-			return err
-		}
-		defer f.Close()
-
-		zfsSendCmd := exec.Command("zfs", args...)
-		zfsSendCmd.Stdout = f
-
-		err = zfsSendCmd.Run()
-		if err != nil {
-			return err
-		}
-
-		zfsPoolVolumeSnapshotDestroy(poolName, fmt.Sprintf("containers/%s", project.Prefix(source.Project(), source.Name())), tmpSnapshotName)
-	}
-	if err != nil {
-		return err
-	}
-
-	return nil
-}
-
-func (s *storageZfs) doContainerBackupCreateVanilla(tmpPath string, backup backup.Backup, source instance.Instance) error {
-	// Prepare for rsync
-	rsync := func(oldPath string, newPath string, bwlimit string) error {
-		output, err := rsync.LocalCopy(oldPath, newPath, bwlimit, true)
-		if err != nil {
-			return fmt.Errorf("Failed to rsync: %s: %s", string(output), err)
-		}
-
-		return nil
-	}
-
-	bwlimit := s.pool.Config["rsync.bwlimit"]
-	projectName := source.Project()
-
-	// Handle snapshots
-	if !backup.InstanceOnly() {
-		snapshotsPath := fmt.Sprintf("%s/snapshots", tmpPath)
-
-		// Retrieve the snapshots
-		snapshots, err := source.Snapshots()
-		if err != nil {
-			return errors.Wrap(err, "Retrieve snaphots")
-		}
-
-		// Create the snapshot path
-		if len(snapshots) > 0 {
-			err = os.MkdirAll(snapshotsPath, 0711)
-			if err != nil {
-				return errors.Wrap(err, "Create snapshot path")
-			}
-		}
-
-		for _, snap := range snapshots {
-			_, snapName, _ := shared.InstanceGetParentAndSnapshotName(snap.Name())
-
-			// Mount the snapshot to a usable path
-			_, err := s.ContainerSnapshotStart(snap)
-			if err != nil {
-				return errors.Wrap(err, "Mount snapshot")
-			}
-
-			snapshotMntPoint := driver.GetSnapshotMountPoint(projectName, s.pool.Name, snap.Name())
-			target := fmt.Sprintf("%s/%s", snapshotsPath, snapName)
-
-			// Copy the snapshot
-			err = rsync(snapshotMntPoint, target, bwlimit)
-			s.ContainerSnapshotStop(snap)
-			if err != nil {
-				return errors.Wrap(err, "Copy snapshot")
-			}
-		}
-	}
-
-	// Make a temporary copy of the container
-	containersPath := driver.GetContainerMountPoint("default", s.pool.Name, "")
-	tmpContainerMntPoint, err := ioutil.TempDir(containersPath, source.Name())
-	if err != nil {
-		return errors.Wrap(err, "Create temporary copy dir")
-	}
-	defer os.RemoveAll(tmpContainerMntPoint)
-
-	err = os.Chmod(tmpContainerMntPoint, 0100)
-	if err != nil {
-		return errors.Wrap(err, "Change temporary mount point permissions")
-	}
-
-	snapshotSuffix := uuid.NewRandom().String()
-	sourceName := source.Name()
-	fs := fmt.Sprintf("containers/%s", project.Prefix(projectName, sourceName))
-	sourceZfsDatasetSnapshot := fmt.Sprintf("snapshot-%s", snapshotSuffix)
-	poolName := s.getOnDiskPoolName()
-	err = zfsPoolVolumeSnapshotCreate(poolName, fs, sourceZfsDatasetSnapshot)
-	if err != nil {
-		return err
-	}
-	defer zfsPoolVolumeSnapshotDestroy(poolName, fs, sourceZfsDatasetSnapshot)
-
-	targetZfsDataset := fmt.Sprintf("containers/%s", snapshotSuffix)
-	err = zfsPoolVolumeClone(source.Project(), poolName, fs, sourceZfsDatasetSnapshot, targetZfsDataset, tmpContainerMntPoint)
-	if err != nil {
-		return errors.Wrap(err, "Clone volume")
-	}
-	defer zfsPoolVolumeDestroy(poolName, targetZfsDataset)
-
-	// Mount the temporary copy
-	if !shared.IsMountPoint(tmpContainerMntPoint) {
-		err = zfsMount(poolName, targetZfsDataset)
-		if err != nil {
-			return errors.Wrap(err, "Mount temporary copy")
-		}
-		defer zfsUmount(poolName, targetZfsDataset, tmpContainerMntPoint)
-	}
-
-	// Copy the container
-	containerPath := fmt.Sprintf("%s/container", tmpPath)
-	err = rsync(tmpContainerMntPoint, containerPath, bwlimit)
-	if err != nil {
-		return errors.Wrap(err, "Copy container")
-	}
-
-	return nil
-}
-
-func (s *storageZfs) ContainerBackupCreate(path string, backup backup.Backup, source instance.Instance) error {
-	// Generate the actual backup
-	if backup.OptimizedStorage() {
-		err := s.doContainerBackupCreateOptimized(path, backup, source)
-		if err != nil {
-			return errors.Wrap(err, "Optimized backup")
-		}
-	} else {
-		err := s.doContainerBackupCreateVanilla(path, backup, source)
-		if err != nil {
-			return errors.Wrap(err, "Vanilla backup")
-		}
-	}
-
-	return nil
-}
-
-func (s *storageZfs) doContainerBackupLoadOptimized(info backup.Info, data io.ReadSeeker, tarArgs []string) error {
-	containerName, _, _ := shared.InstanceGetParentAndSnapshotName(info.Name)
-	containerMntPoint := driver.GetContainerMountPoint(info.Project, s.pool.Name, containerName)
-	err := driver.CreateContainerMountpoint(containerMntPoint, driver.InstancePath(instancetype.Container, info.Project, info.Name, false), info.Privileged)
-	if err != nil {
-		return err
-	}
-
-	unpackPath := fmt.Sprintf("%s/.backup", containerMntPoint)
-	err = os.MkdirAll(unpackPath, 0711)
-	if err != nil {
-		return err
-	}
-
-	err = os.Chmod(unpackPath, 0100)
-	if err != nil {
-		// can't use defer because it needs to run before the mount
-		os.RemoveAll(unpackPath)
-		return err
-	}
-
-	// Prepare tar arguments
-	args := append(tarArgs, []string{
-		"-",
-		"--strip-components=1",
-		"-C", unpackPath, "backup",
-	}...)
-
-	// Extract container
-	data.Seek(0, 0)
-	err = shared.RunCommandWithFds(data, nil, "tar", args...)
-	if err != nil {
-		// can't use defer because it needs to run before the mount
-		os.RemoveAll(unpackPath)
-		logger.Errorf("Failed to untar \"%s\" into \"%s\": %s", info.Name, unpackPath, err)
-		return err
-	}
-
-	poolName := s.getOnDiskPoolName()
-	for _, snapshotOnlyName := range info.Snapshots {
-		snapshotBackup := fmt.Sprintf("%s/snapshots/%s.bin", unpackPath, snapshotOnlyName)
-		feeder, err := os.Open(snapshotBackup)
-		if err != nil {
-			// can't use defer because it needs to run before the mount
-			os.RemoveAll(unpackPath)
-			return err
-		}
-
-		snapshotDataset := fmt.Sprintf("%s/containers/%s at snapshot-%s", poolName, project.Prefix(info.Project, containerName), snapshotOnlyName)
-		zfsRecvCmd := exec.Command("zfs", "receive", "-F", snapshotDataset)
-		zfsRecvCmd.Stdin = feeder
-		err = zfsRecvCmd.Run()
-		feeder.Close()
-		if err != nil {
-			// can't use defer because it needs to run before the mount
-			os.RemoveAll(unpackPath)
-			return err
-		}
-
-		// create mountpoint
-		snapshotMntPoint := driver.GetSnapshotMountPoint(info.Project, s.pool.Name, fmt.Sprintf("%s/%s", containerName, snapshotOnlyName))
-		snapshotMntPointSymlinkTarget := shared.VarPath("storage-pools", s.pool.Name, "containers-snapshots", project.Prefix(info.Project, containerName))
-		snapshotMntPointSymlink := shared.VarPath("snapshots", project.Prefix(info.Project, containerName))
-		err = driver.CreateSnapshotMountpoint(snapshotMntPoint, snapshotMntPointSymlinkTarget, snapshotMntPointSymlink)
-		if err != nil {
-			// can't use defer because it needs to run before the mount
-			os.RemoveAll(unpackPath)
-			return err
-		}
-	}
-
-	containerBackup := fmt.Sprintf("%s/container.bin", unpackPath)
-	feeder, err := os.Open(containerBackup)
-	if err != nil {
-		// can't use defer because it needs to run before the mount
-		os.RemoveAll(unpackPath)
-		return err
-	}
-	defer feeder.Close()
-
-	containerSnapshotDataset := fmt.Sprintf("%s/containers/%s at backup", poolName, project.Prefix(info.Project, containerName))
-	zfsRecvCmd := exec.Command("zfs", "receive", "-F", containerSnapshotDataset)
-	zfsRecvCmd.Stdin = feeder
-
-	err = zfsRecvCmd.Run()
-	os.RemoveAll(unpackPath)
-	zfsPoolVolumeSnapshotDestroy(poolName, fmt.Sprintf("containers/%s", project.Prefix(info.Project, containerName)), "backup")
-	if err != nil {
-		return err
-	}
-
-	fs := fmt.Sprintf("containers/%s", project.Prefix(info.Project, containerName))
-	err = zfsPoolVolumeSet(poolName, fs, "canmount", "noauto")
-	if err != nil {
-		return err
-	}
-
-	err = zfsPoolVolumeSet(poolName, fs, "mountpoint", containerMntPoint)
-	if err != nil {
-		return err
-	}
-
-	_, err = s.doContainerMount(info.Project, containerName, info.Privileged)
-	if err != nil {
-		return err
-	}
-
-	return nil
-}
-
-func (s *storageZfs) doContainerBackupLoadVanilla(info backup.Info, data io.ReadSeeker, tarArgs []string) error {
-	// create the main container
-	err := s.doContainerCreate(info.Project, info.Name, info.Privileged)
-	if err != nil {
-		s.doContainerDelete(info.Project, info.Name)
-		return errors.Wrap(err, "Create container")
-	}
-
-	_, err = s.doContainerMount(info.Project, info.Name, info.Privileged)
-	if err != nil {
-		return errors.Wrap(err, "Mount container")
-	}
-
-	containerMntPoint := driver.GetContainerMountPoint(info.Project, s.pool.Name, info.Name)
-	// Extract container
-	for _, snap := range info.Snapshots {
-		// Extract snapshots
-		cur := fmt.Sprintf("backup/snapshots/%s", snap)
-
-		// Prepare tar arguments
-		args := append(tarArgs, []string{
-			"-",
-			"--recursive-unlink",
-			"--strip-components=3",
-			"--xattrs-include=*",
-			"-C", containerMntPoint, cur,
-		}...)
-
-		// Unpack
-		data.Seek(0, 0)
-		err = shared.RunCommandWithFds(data, nil, "tar", args...)
-		if err != nil {
-			logger.Errorf("Failed to untar \"%s\" into \"%s\": %s", cur, containerMntPoint, err)
-			return errors.Wrap(err, "Unpack")
-		}
-
-		// create snapshot
-		err = s.doContainerSnapshotCreate(info.Project, fmt.Sprintf("%s/%s", info.Name, snap), info.Name)
-		if err != nil {
-			return errors.Wrap(err, "Create snapshot")
-		}
-	}
-
-	// Prepare tar arguments
-	args := append(tarArgs, []string{
-		"-",
-		"--strip-components=2",
-		"--xattrs-include=*",
-		"-C", containerMntPoint, "backup/container",
-	}...)
-
-	// Extract container
-	data.Seek(0, 0)
-	err = shared.RunCommandWithFds(data, nil, "tar", args...)
-	if err != nil {
-		logger.Errorf("Failed to untar \"backup/container\" into \"%s\": %s", containerMntPoint, err)
-		return errors.Wrap(err, "Extract")
-	}
-
-	return nil
-}
-
-func (s *storageZfs) ContainerBackupLoad(info backup.Info, data io.ReadSeeker, tarArgs []string) error {
-	logger.Debugf("Loading ZFS storage volume for backup \"%s\" on storage pool \"%s\"", info.Name, s.pool.Name)
-
-	if info.OptimizedStorage {
-		return s.doContainerBackupLoadOptimized(info, data, tarArgs)
-	}
-
-	return s.doContainerBackupLoadVanilla(info, data, tarArgs)
-}
-
-// - create temporary directory ${LXD_DIR}/images/lxd_images_
-// - create new zfs volume images/<fingerprint>
-// - mount the zfs volume on ${LXD_DIR}/images/lxd_images_
-// - unpack the downloaded image in ${LXD_DIR}/images/lxd_images_
-// - mark new zfs volume images/<fingerprint> readonly
-// - remove mountpoint property from zfs volume images/<fingerprint>
-// - create read-write snapshot from zfs volume images/<fingerprint>
-func (s *storageZfs) ImageCreate(fingerprint string, tracker *ioprogress.ProgressTracker) error {
-	logger.Debugf("Creating ZFS storage volume for image \"%s\" on storage pool \"%s\"", fingerprint, s.pool.Name)
-
-	// Common variables
-	poolName := s.getOnDiskPoolName()
-	imageMntPoint := driver.GetImageMountPoint(s.pool.Name, fingerprint)
-	fs := fmt.Sprintf("images/%s", fingerprint)
-
-	// Revert flags
-	revertDB := true
-	revertMountpoint := true
-	revertDataset := true
-
-	// Deal with bad/partial unpacks
-	if zfsFilesystemEntityExists(poolName, fs) {
-		zfsPoolVolumeDestroy(poolName, fmt.Sprintf("%s at readonly", fs))
-		zfsPoolVolumeDestroy(poolName, fs)
-		s.deleteImageDbPoolVolume(fingerprint)
-	}
-
-	// Create the image volume entry
-	err := s.createImageDbPoolVolume(fingerprint)
-	if err != nil {
-		return err
-	}
-
-	defer func() {
-		if !revertDB {
-			return
-		}
-
-		s.deleteImageDbPoolVolume(fingerprint)
-	}()
-
-	// Create mountpoint if missing
-	if !shared.PathExists(imageMntPoint) {
-		err := os.MkdirAll(imageMntPoint, 0700)
-		if err != nil {
-			return err
-		}
-
-		defer func() {
-			if !revertMountpoint {
-				return
-			}
-
-			os.RemoveAll(imageMntPoint)
-		}()
-	}
-
-	// Check for deleted images
-	if zfsFilesystemEntityExists(poolName, fmt.Sprintf("deleted/%s", fmt.Sprintf("%s at readonly", fs))) {
-		// Restore deleted image
-		err := zfsPoolVolumeRename(poolName, fmt.Sprintf("deleted/%s", fs), fs, true)
-		if err != nil {
-			return err
-		}
-
-		// In case this is an image from an older lxd instance, wipe the mountpoint.
-		err = zfsPoolVolumeSet(poolName, fs, "mountpoint", "none")
-		if err != nil {
-			return err
-		}
-
-		revertDB = false
-		revertMountpoint = false
-		return nil
-	}
-
-	// Create temporary mountpoint directory.
-	tmp := driver.GetImageMountPoint(s.pool.Name, "")
-	tmpImageDir, err := ioutil.TempDir(tmp, "")
-	if err != nil {
-		return err
-	}
-	defer os.RemoveAll(tmpImageDir)
-
-	imagePath := shared.VarPath("images", fingerprint)
-
-	// Create a new dataset for the image
-	dataset := fmt.Sprintf("%s/%s", poolName, fs)
-	msg, err := zfsPoolVolumeCreate(dataset, "mountpoint=none")
-	if err != nil {
-		logger.Errorf("Failed to create ZFS dataset \"%s\" on storage pool \"%s\": %s", dataset, s.pool.Name, msg)
-		return err
-	}
-
-	defer func() {
-		if !revertDataset {
-			return
-		}
-
-		zfsPoolVolumeDestroy(poolName, fs)
-	}()
-
-	// Set a temporary mountpoint for the image.
-	err = zfsPoolVolumeSet(poolName, fs, "mountpoint", tmpImageDir)
-	if err != nil {
-		return err
-	}
-
-	// Make sure that the image actually got mounted.
-	if !shared.IsMountPoint(tmpImageDir) {
-		zfsMount(poolName, fs)
-	}
-
-	// Unpack the image into the temporary mountpoint.
-	err = driver.ImageUnpack(imagePath, tmpImageDir, "", false, s.s.OS.RunningInUserNS, nil)
-	if err != nil {
-		return err
-	}
-
-	// Mark the new storage volume for the image as readonly.
-	if err = zfsPoolVolumeSet(poolName, fs, "readonly", "on"); err != nil {
-		return err
-	}
-
-	// Remove the temporary mountpoint from the image storage volume.
-	if err = zfsPoolVolumeSet(poolName, fs, "mountpoint", "none"); err != nil {
-		return err
-	}
-
-	// Make sure that the image actually got unmounted.
-	if shared.IsMountPoint(tmpImageDir) {
-		zfsUmount(poolName, fs, tmpImageDir)
-	}
-
-	// Create a snapshot of that image on the storage pool which we clone for
-	// container creation.
-	err = zfsPoolVolumeSnapshotCreate(poolName, fs, "readonly")
-	if err != nil {
-		return err
-	}
-
-	revertDB = false
-	revertMountpoint = false
-	revertDataset = false
-
-	logger.Debugf("Created ZFS storage volume for image \"%s\" on storage pool \"%s\"", fingerprint, s.pool.Name)
-	return nil
-}
-
-func (s *storageZfs) ImageDelete(fingerprint string) error {
-	logger.Debugf("Deleting ZFS storage volume for image \"%s\" on storage pool \"%s\"", fingerprint, s.pool.Name)
-
-	poolName := s.getOnDiskPoolName()
-	fs := fmt.Sprintf("images/%s", fingerprint)
-
-	if zfsFilesystemEntityExists(poolName, fs) {
-		removable, err := zfsPoolVolumeSnapshotRemovable(poolName, fs, "readonly")
-		if err != nil && zfsFilesystemEntityExists(poolName, fmt.Sprintf("%s at readonly", fs)) {
-			return err
-		}
-
-		if removable {
-			err := zfsPoolVolumeDestroy(poolName, fs)
-			if err != nil {
-				return err
-			}
-		} else {
-			if err := zfsPoolVolumeSet(poolName, fs, "mountpoint", "none"); err != nil {
-				return err
-			}
-
-			if err := zfsPoolVolumeRename(poolName, fs, fmt.Sprintf("deleted/%s", fs), true); err != nil {
-				return err
-			}
-		}
-	}
-
-	err := s.deleteImageDbPoolVolume(fingerprint)
-	if err != nil {
-		return err
-	}
-
-	imageMntPoint := driver.GetImageMountPoint(s.pool.Name, fingerprint)
-	if shared.PathExists(imageMntPoint) {
-		err := os.RemoveAll(imageMntPoint)
-		if err != nil {
-			return err
-		}
-	}
-
-	if shared.PathExists(shared.VarPath(fs + ".zfs")) {
-		err := os.RemoveAll(shared.VarPath(fs + ".zfs"))
-		if err != nil {
-			return err
-		}
-	}
-
-	logger.Debugf("Deleted ZFS storage volume for image \"%s\" on storage pool \"%s\"", fingerprint, s.pool.Name)
-	return nil
-}
-
-func (s *storageZfs) ImageMount(fingerprint string) (bool, error) {
-	return true, nil
-}
-
-func (s *storageZfs) ImageUmount(fingerprint string) (bool, error) {
-	return true, nil
-}
-
-func (s *storageZfs) MigrationType() migration.MigrationFSType {
-	return migration.MigrationFSType_ZFS
-}
-
-func (s *storageZfs) PreservesInodes() bool {
-	return true
-}
-
-func (s *storageZfs) MigrationSource(args MigrationSourceArgs) (MigrationStorageSourceDriver, error) {
-	/* If the container is a snapshot, let's just send that; we don't need
-	* to send anything else, because that's all the user asked for.
-	 */
-	if args.Instance.IsSnapshot() {
-		return &zfsMigrationSourceDriver{instance: args.Instance, zfs: s, zfsFeatures: args.ZfsFeatures}, nil
-	}
-
-	driver := zfsMigrationSourceDriver{
-		instance:         args.Instance,
-		snapshots:        []instance.Instance{},
-		zfsSnapshotNames: []string{},
-		zfs:              s,
-		zfsFeatures:      args.ZfsFeatures,
-	}
-
-	if args.InstanceOnly {
-		return &driver, nil
-	}
-
-	/* List all the snapshots in order of reverse creation. The idea here
-	* is that we send the oldest to newest snapshot, hopefully saving on
-	* xfer costs. Then, after all that, we send the container itself.
-	 */
-	snapshots, err := zfsPoolListSnapshots(s.getOnDiskPoolName(), fmt.Sprintf("containers/%s", project.Prefix(args.Instance.Project(), args.Instance.Name())))
-	if err != nil {
-		return nil, err
-	}
-
-	for _, snap := range snapshots {
-		/* In the case of e.g. multiple copies running at the same
-		* time, we will have potentially multiple migration-send
-		* snapshots. (Or in the case of the test suite, sometimes one
-		* will take too long to delete.)
-		 */
-		if !strings.HasPrefix(snap, "snapshot-") {
-			continue
-		}
-
-		lxdName := fmt.Sprintf("%s%s%s", args.Instance.Name(), shared.SnapshotDelimiter, snap[len("snapshot-"):])
-		snapshot, err := instance.LoadByProjectAndName(s.s, args.Instance.Project(), lxdName)
-		if err != nil {
-			return nil, err
-		}
-
-		driver.snapshots = append(driver.snapshots, snapshot)
-		driver.zfsSnapshotNames = append(driver.zfsSnapshotNames, snap)
-	}
-
-	return &driver, nil
-}
-
-func (s *storageZfs) MigrationSink(conn *websocket.Conn, op *operations.Operation, args MigrationSinkArgs) error {
-	poolName := s.getOnDiskPoolName()
-	zfsName := fmt.Sprintf("containers/%s", project.Prefix(args.Instance.Project(), args.Instance.Name()))
-	zfsRecv := func(zfsName string, writeWrapper func(io.WriteCloser) io.WriteCloser) error {
-		zfsFsName := fmt.Sprintf("%s/%s", poolName, zfsName)
-		args := []string{"receive", "-F", "-u", zfsFsName}
-		cmd := exec.Command("zfs", args...)
-
-		stdin, err := cmd.StdinPipe()
-		if err != nil {
-			return err
-		}
-
-		stderr, err := cmd.StderrPipe()
-		if err != nil {
-			return err
-		}
-
-		if err := cmd.Start(); err != nil {
-			return err
-		}
-
-		writePipe := io.WriteCloser(stdin)
-		if writeWrapper != nil {
-			writePipe = writeWrapper(stdin)
-		}
-
-		<-shared.WebsocketRecvStream(writePipe, conn)
-
-		output, err := ioutil.ReadAll(stderr)
-		if err != nil {
-			logger.Debugf("Problem reading zfs recv stderr %s", err)
-		}
-
-		err = cmd.Wait()
-		if err != nil {
-			logger.Errorf("Problem with zfs recv: %s", string(output))
-			return err
-		}
-
-		if !strings.Contains(zfsName, "@") {
-			err = zfsPoolVolumeSet(poolName, zfsName, "canmount", "noauto")
-			if err != nil {
-				return err
-			}
-
-			err = zfsPoolVolumeSet(poolName, zfsName, "mountpoint", "none")
-			if err != nil {
-				return err
-			}
-		}
-
-		return nil
-	}
-
-	// Destroy the pre-existing (empty) dataset, this avoids issues with encryption
-	err := zfsPoolVolumeDestroy(poolName, zfsName)
-	if err != nil {
-		return err
-	}
-
-	if len(args.Snapshots) > 0 {
-		snapshotMntPointSymlinkTarget := shared.VarPath("storage-pools", s.pool.Name, "containers-snapshots", project.Prefix(args.Instance.Project(), s.volume.Name))
-		snapshotMntPointSymlink := shared.VarPath("snapshots", project.Prefix(args.Instance.Project(), args.Instance.Name()))
-		if !shared.PathExists(snapshotMntPointSymlink) {
-			err := os.Symlink(snapshotMntPointSymlinkTarget, snapshotMntPointSymlink)
-			if err != nil {
-				return err
-			}
-		}
-	}
-
-	// At this point we have already figured out the parent
-	// container's root disk device so we can simply
-	// retrieve it from the expanded devices.
-	parentStoragePool := ""
-	parentExpandedDevices := args.Instance.ExpandedDevices()
-	parentLocalRootDiskDeviceKey, parentLocalRootDiskDevice, _ := shared.GetRootDiskDevice(parentExpandedDevices.CloneNative())
-	if parentLocalRootDiskDeviceKey != "" {
-		parentStoragePool = parentLocalRootDiskDevice["pool"]
-	}
-
-	// A little neuroticism.
-	if parentStoragePool == "" {
-		return fmt.Errorf("detected that the container's root device is missing the pool property during BTRFS migration")
-	}
-
-	for _, snap := range args.Snapshots {
-		ctArgs := snapshotProtobufToInstanceArgs(args.Instance.Project(), args.Instance.Name(), snap)
-
-		// Ensure that snapshot and parent container have the
-		// same storage pool in their local root disk device.
-		// If the root disk device for the snapshot comes from a
-		// profile on the new instance as well we don't need to
-		// do anything.
-		if ctArgs.Devices != nil {
-			snapLocalRootDiskDeviceKey, _, _ := shared.GetRootDiskDevice(ctArgs.Devices.CloneNative())
-			if snapLocalRootDiskDeviceKey != "" {
-				ctArgs.Devices[snapLocalRootDiskDeviceKey]["pool"] = parentStoragePool
-			}
-		}
-		_, err := containerCreateEmptySnapshot(args.Instance.DaemonState(), ctArgs)
-		if err != nil {
-			return err
-		}
-
-		wrapper := migration.ProgressWriter(op, "fs_progress", snap.GetName())
-		name := fmt.Sprintf("containers/%s at snapshot-%s", project.Prefix(args.Instance.Project(), args.Instance.Name()), snap.GetName())
-		if err := zfsRecv(name, wrapper); err != nil {
-			return err
-		}
-
-		snapshotMntPoint := driver.GetSnapshotMountPoint(args.Instance.Project(), poolName, fmt.Sprintf("%s/%s", args.Instance.Name(), *snap.Name))
-		if !shared.PathExists(snapshotMntPoint) {
-			err := os.MkdirAll(snapshotMntPoint, 0100)
-			if err != nil {
-				return err
-			}
-		}
-	}
-
-	defer func() {
-		/* clean up our migration-send snapshots that we got from recv. */
-		zfsSnapshots, err := zfsPoolListSnapshots(poolName, fmt.Sprintf("containers/%s", project.Prefix(args.Instance.Project(), args.Instance.Name())))
-		if err != nil {
-			logger.Errorf("Failed listing snapshots post migration: %s", err)
-			return
-		}
-
-		for _, snap := range zfsSnapshots {
-			// If we received a bunch of snapshots, remove the migration-send-* ones, if not, wipe any snapshot we got
-			if args.Snapshots != nil && len(args.Snapshots) > 0 && !strings.HasPrefix(snap, "migration-send") {
-				continue
-			}
-
-			zfsPoolVolumeSnapshotDestroy(poolName, fmt.Sprintf("containers/%s", project.Prefix(args.Instance.Project(), args.Instance.Name())), snap)
-		}
-	}()
-
-	/* finally, do the real container */
-	wrapper := migration.ProgressWriter(op, "fs_progress", args.Instance.Name())
-	if err := zfsRecv(zfsName, wrapper); err != nil {
-		return err
-	}
-
-	if args.Live {
-		/* and again for the post-running snapshot if this was a live migration */
-		wrapper := migration.ProgressWriter(op, "fs_progress", args.Instance.Name())
-		if err := zfsRecv(zfsName, wrapper); err != nil {
-			return err
-		}
-	}
-
-	/* Sometimes, zfs recv mounts this anyway, even if we pass -u
-	 * (https://forums.freebsd.org/threads/zfs-receive-u-shouldnt-mount-received-filesystem-right.36844/)
-	 * but sometimes it doesn't. Let's try to mount, but not complain about
-	 * failure.
-	 */
-	zfsMount(poolName, zfsName)
-	return nil
-}
-
-func (s *storageZfs) StorageEntitySetQuota(volumeType int, size int64, data interface{}) error {
-	logger.Debugf(`Setting ZFS quota for "%s"`, s.volume.Name)
-
-	if !shared.IntInSlice(volumeType, supportedVolumeTypes) {
-		return fmt.Errorf("Invalid storage type")
-	}
-
-	var c instance.Instance
-	var fs string
-	switch volumeType {
-	case storagePoolVolumeTypeContainer:
-		c = data.(instance.Instance)
-		fs = fmt.Sprintf("containers/%s", project.Prefix(c.Project(), c.Name()))
-	case storagePoolVolumeTypeCustom:
-		fs = fmt.Sprintf("custom/%s", s.volume.Name)
-	}
-
-	property := "quota"
-
-	if s.pool.Config["volume.zfs.use_refquota"] != "" {
-		zfsUseRefquota = s.pool.Config["volume.zfs.use_refquota"]
-	}
-	if s.volume.Config["zfs.use_refquota"] != "" {
-		zfsUseRefquota = s.volume.Config["zfs.use_refquota"]
-	}
-
-	if shared.IsTrue(zfsUseRefquota) {
-		property = "refquota"
-	}
-
-	poolName := s.getOnDiskPoolName()
-	var err error
-	if size > 0 {
-		err = zfsPoolVolumeSet(poolName, fs, property, fmt.Sprintf("%d", size))
-	} else {
-		err = zfsPoolVolumeSet(poolName, fs, property, "none")
-	}
-
-	if err != nil {
-		return err
-	}
-
-	logger.Debugf(`Set ZFS quota for "%s"`, s.volume.Name)
-	return nil
-}
-
-func (s *storageZfs) StoragePoolResources() (*api.ResourcesStoragePool, error) {
-	poolName := s.getOnDiskPoolName()
-
-	totalBuf, err := zfsFilesystemEntityPropertyGet(poolName, "", "available")
-	if err != nil {
-		return nil, err
-	}
-
-	totalStr := string(totalBuf)
-	totalStr = strings.TrimSpace(totalStr)
-	total, err := strconv.ParseUint(totalStr, 10, 64)
-	if err != nil {
-		return nil, err
-	}
-
-	usedBuf, err := zfsFilesystemEntityPropertyGet(poolName, "", "used")
-	if err != nil {
-		return nil, err
-	}
-
-	usedStr := string(usedBuf)
-	usedStr = strings.TrimSpace(usedStr)
-	used, err := strconv.ParseUint(usedStr, 10, 64)
-	if err != nil {
-		return nil, err
-	}
-
-	res := api.ResourcesStoragePool{}
-	res.Space.Total = total
-	res.Space.Used = used
-
-	// Inode allocation is dynamic so no use in reporting them.
-
-	return &res, nil
-}
-
-func (s *storageZfs) doCrossPoolStorageVolumeCopy(source *api.StorageVolumeSource) error {
-	successMsg := fmt.Sprintf("Copied ZFS storage volume \"%s\" on storage pool \"%s\" as \"%s\" to storage pool \"%s\"", source.Name, source.Pool, s.volume.Name, s.pool.Name)
-	// setup storage for the source volume
-	srcStorage, err := storagePoolVolumeInit(s.s, "default", source.Pool, source.Name, storagePoolVolumeTypeCustom)
-	if err != nil {
-		logger.Errorf("Failed to initialize ZFS storage volume \"%s\" on storage pool \"%s\": %s", s.volume.Name, s.pool.Name, err)
-		return err
-	}
-
-	ourMount, err := srcStorage.StoragePoolMount()
-	if err != nil {
-		return err
-	}
-	if ourMount {
-		defer srcStorage.StoragePoolUmount()
-	}
-
-	// Create the main volume
-	err = s.StoragePoolVolumeCreate()
-	if err != nil {
-		logger.Errorf("Failed to create ZFS storage volume \"%s\" on storage pool \"%s\": %s", s.volume.Name, s.pool.Name, err)
-		return err
-	}
-
-	ourMount, err = s.StoragePoolVolumeMount()
-	if err != nil {
-		logger.Errorf("Failed to mount ZFS storage volume \"%s\" on storage pool \"%s\": %s", s.volume.Name, s.pool.Name, err)
-		return err
-	}
-	if ourMount {
-		defer s.StoragePoolVolumeUmount()
-	}
-
-	dstMountPoint := driver.GetStoragePoolVolumeMountPoint(s.pool.Name, s.volume.Name)
-	bwlimit := s.pool.Config["rsync.bwlimit"]
-
-	snapshots, err := driver.VolumeSnapshotsGet(s.s, source.Pool, source.Name, storagePoolVolumeTypeCustom)
-	if err != nil {
-		return err
-	}
-
-	if !source.VolumeOnly {
-		for _, snap := range snapshots {
-			srcMountPoint := driver.GetStoragePoolVolumeSnapshotMountPoint(source.Pool, snap.Name)
-
-			_, err = rsync.LocalCopy(srcMountPoint, dstMountPoint, bwlimit, true)
-			if err != nil {
-				logger.Errorf("Failed to rsync into ZFS storage volume \"%s\" on storage pool \"%s\": %s", s.volume.Name, s.pool.Name, err)
-				return err
-			}
-
-			_, snapOnlyName, _ := shared.InstanceGetParentAndSnapshotName(source.Name)
-
-			s.StoragePoolVolumeSnapshotCreate(&api.StorageVolumeSnapshotsPost{Name: fmt.Sprintf("%s/%s", s.volume.Name, snapOnlyName)})
-		}
-	}
-
-	var srcMountPoint string
-
-	if strings.Contains(source.Name, "/") {
-		srcMountPoint = driver.GetStoragePoolVolumeSnapshotMountPoint(source.Pool, source.Name)
-	} else {
-		srcMountPoint = driver.GetStoragePoolVolumeMountPoint(source.Pool, source.Name)
-	}
-
-	_, err = rsync.LocalCopy(srcMountPoint, dstMountPoint, bwlimit, true)
-	if err != nil {
-		os.RemoveAll(dstMountPoint)
-		logger.Errorf("Failed to rsync into ZFS storage volume \"%s\" on storage pool \"%s\": %s", s.volume.Name, s.pool.Name, err)
-		return err
-	}
-
-	logger.Infof(successMsg)
-	return nil
-}
-
-func (s *storageZfs) copyVolumeWithoutSnapshotsFull(source *api.StorageVolumeSource) error {
-	sourceIsSnapshot := shared.IsSnapshot(source.Name)
-
-	var snapshotSuffix string
-	var sourceDataset string
-	var targetDataset string
-	var targetSnapshotDataset string
-
-	poolName := s.getOnDiskPoolName()
-
-	if sourceIsSnapshot {
-		sourceVolumeName, sourceSnapOnlyName, _ := shared.InstanceGetParentAndSnapshotName(source.Name)
-		snapshotSuffix = fmt.Sprintf("snapshot-%s", sourceSnapOnlyName)
-		sourceDataset = fmt.Sprintf("%s/custom/%s@%s", source.Pool, sourceVolumeName, snapshotSuffix)
-		targetSnapshotDataset = fmt.Sprintf("%s/custom/%s at snapshot-%s", poolName, s.volume.Name, sourceSnapOnlyName)
-	} else {
-		snapshotSuffix = uuid.NewRandom().String()
-		sourceDataset = fmt.Sprintf("%s/custom/%s@%s", poolName, source.Name, snapshotSuffix)
-		targetSnapshotDataset = fmt.Sprintf("%s/custom/%s@%s", poolName, s.volume.Name, snapshotSuffix)
-
-		fs := fmt.Sprintf("custom/%s", source.Name)
-		err := zfsPoolVolumeSnapshotCreate(poolName, fs, snapshotSuffix)
-		if err != nil {
-			return err
-		}
-		defer func() {
-			err := zfsPoolVolumeSnapshotDestroy(poolName, fs, snapshotSuffix)
-			if err != nil {
-				logger.Warnf("Failed to delete temporary ZFS snapshot \"%s\", manual cleanup needed", sourceDataset)
-			}
-		}()
-	}
-
-	zfsSendCmd := exec.Command("zfs", "send", sourceDataset)
-
-	zfsRecvCmd := exec.Command("zfs", "receive", targetDataset)
-
-	zfsRecvCmd.Stdin, _ = zfsSendCmd.StdoutPipe()
-	zfsRecvCmd.Stdout = os.Stdout
-	zfsRecvCmd.Stderr = os.Stderr
-
-	err := zfsRecvCmd.Start()
-	if err != nil {
-		return err
-	}
-
-	err = zfsSendCmd.Run()
-	if err != nil {
-		return err
-	}
-
-	err = zfsRecvCmd.Wait()
-	if err != nil {
-		return err
-	}
-
-	msg, err := shared.RunCommand("zfs", "rollback", "-r", "-R", targetSnapshotDataset)
-	if err != nil {
-		logger.Errorf("Failed to rollback ZFS dataset: %s", msg)
-		return err
-	}
-
-	targetContainerMountPoint := driver.GetStoragePoolVolumeMountPoint(s.pool.Name, s.volume.Name)
-	targetfs := fmt.Sprintf("custom/%s", s.volume.Name)
-
-	err = zfsPoolVolumeSet(poolName, targetfs, "canmount", "noauto")
-	if err != nil {
-		return err
-	}
-
-	err = zfsPoolVolumeSet(poolName, targetfs, "mountpoint", targetContainerMountPoint)
-	if err != nil {
-		return err
-	}
-
-	err = zfsPoolVolumeSnapshotDestroy(poolName, targetfs, snapshotSuffix)
-	if err != nil {
-		return err
-	}
-
-	return nil
-}
-
-func (s *storageZfs) copyVolumeWithoutSnapshotsSparse(source *api.StorageVolumeSource) error {
-	poolName := s.getOnDiskPoolName()
-
-	sourceVolumeName := source.Name
-	sourceVolumePath := driver.GetStoragePoolVolumeMountPoint(source.Pool, source.Name)
-
-	targetVolumeName := s.volume.Name
-	targetVolumePath := driver.GetStoragePoolVolumeMountPoint(s.pool.Name, s.volume.Name)
-
-	sourceZfsDataset := ""
-	sourceZfsDatasetSnapshot := ""
-	sourceName, sourceSnapOnlyName, isSnapshotName := shared.InstanceGetParentAndSnapshotName(sourceVolumeName)
-
-	targetZfsDataset := fmt.Sprintf("custom/%s", targetVolumeName)
-
-	if isSnapshotName {
-		sourceZfsDatasetSnapshot = sourceSnapOnlyName
-	}
-
-	revert := true
-	if sourceZfsDatasetSnapshot == "" {
-		if zfsFilesystemEntityExists(poolName, fmt.Sprintf("custom/%s", sourceName)) {
-			sourceZfsDatasetSnapshot = fmt.Sprintf("copy-%s", uuid.NewRandom().String())
-			sourceZfsDataset = fmt.Sprintf("custom/%s", sourceName)
-
-			err := zfsPoolVolumeSnapshotCreate(poolName, sourceZfsDataset, sourceZfsDatasetSnapshot)
-			if err != nil {
-				return err
-			}
-
-			defer func() {
-				if !revert {
-					return
-				}
-				zfsPoolVolumeSnapshotDestroy(poolName, sourceZfsDataset, sourceZfsDatasetSnapshot)
-			}()
-		}
-	} else {
-		if zfsFilesystemEntityExists(poolName, fmt.Sprintf("custom/%s at snapshot-%s", sourceName, sourceZfsDatasetSnapshot)) {
-			sourceZfsDataset = fmt.Sprintf("custom/%s", sourceName)
-			sourceZfsDatasetSnapshot = fmt.Sprintf("snapshot-%s", sourceZfsDatasetSnapshot)
-		}
-	}
-
-	if sourceZfsDataset != "" {
-		err := zfsPoolVolumeClone("default", poolName, sourceZfsDataset, sourceZfsDatasetSnapshot, targetZfsDataset, targetVolumePath)
-		if err != nil {
-			return err
-		}
-
-		defer func() {
-			if !revert {
-				return
-			}
-			zfsPoolVolumeDestroy(poolName, targetZfsDataset)
-		}()
-	} else {
-		bwlimit := s.pool.Config["rsync.bwlimit"]
-
-		output, err := rsync.LocalCopy(sourceVolumePath, targetVolumePath, bwlimit, true)
-		if err != nil {
-			return fmt.Errorf("rsync failed: %s", string(output))
-		}
-	}
-
-	revert = false
-
-	return nil
-}
-
-func (s *storageZfs) StoragePoolVolumeCopy(source *api.StorageVolumeSource) error {
-	logger.Infof("Copying ZFS storage volume \"%s\" on storage pool \"%s\" as \"%s\" to storage pool \"%s\"", source.Name, source.Pool, s.volume.Name, s.pool.Name)
-	successMsg := fmt.Sprintf("Copied ZFS storage volume \"%s\" on storage pool \"%s\" as \"%s\" to storage pool \"%s\"", source.Name, source.Pool, s.volume.Name, s.pool.Name)
-
-	if source.Pool != s.pool.Name {
-		return s.doCrossPoolStorageVolumeCopy(source)
-	}
-
-	var snapshots []string
-
-	poolName := s.getOnDiskPoolName()
-
-	if !shared.IsSnapshot(source.Name) {
-		allSnapshots, err := zfsPoolListSnapshots(poolName, fmt.Sprintf("custom/%s", source.Name))
-		if err != nil {
-			return err
-		}
-
-		for _, snap := range allSnapshots {
-			if strings.HasPrefix(snap, "snapshot-") {
-				snapshots = append(snapshots, strings.TrimPrefix(snap, "snapshot-"))
-			}
-		}
-	}
-
-	targetStorageVolumeMountPoint := driver.GetStoragePoolVolumeMountPoint(s.pool.Name, s.volume.Name)
-	fs := fmt.Sprintf("custom/%s", s.volume.Name)
-
-	if source.VolumeOnly || len(snapshots) == 0 {
-		var err error
-
-		if s.pool.Config["zfs.clone_copy"] != "" && !shared.IsTrue(s.pool.Config["zfs.clone_copy"]) {
-			err = s.copyVolumeWithoutSnapshotsFull(source)
-		} else {
-			err = s.copyVolumeWithoutSnapshotsSparse(source)
-		}
-		if err != nil {
-			return err
-		}
-	} else {
-		targetVolumeMountPoint := driver.GetStoragePoolVolumeMountPoint(poolName, s.volume.Name)
-
-		err := os.MkdirAll(targetVolumeMountPoint, 0711)
-		if err != nil {
-			return err
-		}
-
-		prev := ""
-		prevSnapOnlyName := ""
-
-		for i, snap := range snapshots {
-			if i > 0 {
-				prev = snapshots[i-1]
-			}
-
-			sourceDataset := fmt.Sprintf("%s/custom/%s at snapshot-%s", poolName, source.Name, snap)
-			targetDataset := fmt.Sprintf("%s/custom/%s at snapshot-%s", poolName, s.volume.Name, snap)
-
-			snapshotMntPoint := driver.GetStoragePoolVolumeSnapshotMountPoint(poolName, fmt.Sprintf("%s/%s", s.volume.Name, snap))
-
-			err := os.MkdirAll(snapshotMntPoint, 0700)
-			if err != nil {
-				return err
-			}
-
-			prevSnapOnlyName = snap
-
-			args := []string{"send", sourceDataset}
-
-			if prev != "" {
-				parentDataset := fmt.Sprintf("%s/custom/%s/snapshot-%s", poolName, source.Name, prev)
-				args = append(args, "-i", parentDataset)
-			}
-
-			zfsSendCmd := exec.Command("zfs", args...)
-			zfsRecvCmd := exec.Command("zfs", "receive", "-F", targetDataset)
-
-			zfsRecvCmd.Stdin, _ = zfsSendCmd.StdoutPipe()
-			zfsRecvCmd.Stdout = os.Stdout
-			zfsRecvCmd.Stderr = os.Stderr
-
-			err = zfsRecvCmd.Start()
-			if err != nil {
-				return err
-			}
-
-			err = zfsSendCmd.Run()
-			if err != nil {
-				return err
-			}
-
-			err = zfsRecvCmd.Wait()
-			if err != nil {
-				return err
-			}
-		}
-
-		tmpSnapshotName := fmt.Sprintf("copy-send-%s", uuid.NewRandom().String())
-
-		err = zfsPoolVolumeSnapshotCreate(poolName, fmt.Sprintf("custom/%s", source.Name), tmpSnapshotName)
-		if err != nil {
-			return err
-		}
-
-		defer zfsPoolVolumeSnapshotDestroy(poolName, fmt.Sprintf("custom/%s", source.Name), tmpSnapshotName)
-
-		currentSnapshotDataset := fmt.Sprintf("%s/custom/%s@%s", poolName, source.Name, tmpSnapshotName)
-
-		args := []string{"send", currentSnapshotDataset}
-
-		if prevSnapOnlyName != "" {
-			args = append(args, "-i", fmt.Sprintf("%s/custom/%s at snapshot-%s", poolName, source.Name, prevSnapOnlyName))
-		}
-
-		zfsSendCmd := exec.Command("zfs", args...)
-		targetDataset := fmt.Sprintf("%s/custom/%s", poolName, s.volume.Name)
-		zfsRecvCmd := exec.Command("zfs", "receive", "-F", targetDataset)
-
-		zfsRecvCmd.Stdin, _ = zfsSendCmd.StdoutPipe()
-		zfsRecvCmd.Stdout = os.Stdout
-		zfsRecvCmd.Stderr = os.Stderr
-
-		err = zfsRecvCmd.Start()
-		if err != nil {
-			return err
-		}
-
-		err = zfsSendCmd.Run()
-		if err != nil {
-			return err
-		}
-
-		err = zfsRecvCmd.Wait()
-		if err != nil {
-			return err
-		}
-
-		defer zfsPoolVolumeSnapshotDestroy(poolName, fmt.Sprintf("custom/%s", s.volume.Name), tmpSnapshotName)
-
-		err = zfsPoolVolumeSet(poolName, fs, "canmount", "noauto")
-		if err != nil {
-			return err
-		}
-
-		err = zfsPoolVolumeSet(poolName, fs, "mountpoint", targetStorageVolumeMountPoint)
-		if err != nil {
-			return err
-		}
-	}
-
-	if !shared.IsMountPoint(targetStorageVolumeMountPoint) {
-		err := zfsMount(poolName, fs)
-		if err != nil {
-			return err
-		}
-		defer zfsUmount(poolName, fs, targetStorageVolumeMountPoint)
-	}
-
-	// apply quota
-	if s.volume.Config["size"] != "" {
-		size, err := units.ParseByteSizeString(s.volume.Config["size"])
-		if err != nil {
-			return err
-		}
-
-		err = s.StorageEntitySetQuota(storagePoolVolumeTypeCustom, size, nil)
-		if err != nil {
-			return err
-		}
-	}
-
-	logger.Infof(successMsg)
-	return nil
-}
-
-func (s *storageZfs) StorageMigrationSource(args MigrationSourceArgs) (MigrationStorageSourceDriver, error) {
-	return rsyncStorageMigrationSource(args)
-}
-
-func (s *storageZfs) StorageMigrationSink(conn *websocket.Conn, op *operations.Operation, args MigrationSinkArgs) error {
-	return rsyncStorageMigrationSink(conn, op, args)
-}
-
-func (s *storageZfs) StoragePoolVolumeSnapshotCreate(target *api.StorageVolumeSnapshotsPost) error {
-	logger.Infof("Creating ZFS storage volume snapshot \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-
-	sourceOnlyName, snapshotOnlyName, ok := shared.InstanceGetParentAndSnapshotName(target.Name)
-	if !ok {
-		return fmt.Errorf("Not a snapshot name")
-	}
-
-	sourceDataset := fmt.Sprintf("custom/%s", sourceOnlyName)
-	poolName := s.getOnDiskPoolName()
-	snapName := fmt.Sprintf("snapshot-%s", snapshotOnlyName)
-	err := zfsPoolVolumeSnapshotCreate(poolName, sourceDataset, snapName)
-	if err != nil {
-		return err
-	}
-
-	snapshotMntPoint := driver.GetStoragePoolVolumeSnapshotMountPoint(s.pool.Name, target.Name)
-	if !shared.PathExists(snapshotMntPoint) {
-		err := os.MkdirAll(snapshotMntPoint, 0700)
-		if err != nil {
-			return err
-		}
-	}
-
-	logger.Infof("Created ZFS storage volume snapshot \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-	return nil
-}
-
-func (s *storageZfs) StoragePoolVolumeSnapshotDelete() error {
-	logger.Infof("Deleting ZFS storage volume snapshot \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-
-	sourceName, snapshotOnlyName, _ := shared.InstanceGetParentAndSnapshotName(s.volume.Name)
-	snapshotName := fmt.Sprintf("snapshot-%s", snapshotOnlyName)
-
-	onDiskPoolName := s.getOnDiskPoolName()
-	if zfsFilesystemEntityExists(onDiskPoolName, fmt.Sprintf("custom/%s@%s", sourceName, snapshotName)) {
-		removable, err := zfsPoolVolumeSnapshotRemovable(onDiskPoolName, fmt.Sprintf("custom/%s", sourceName), snapshotName)
-		if err != nil {
-			return err
-		}
-
-		if removable {
-			err = zfsPoolVolumeSnapshotDestroy(onDiskPoolName, fmt.Sprintf("custom/%s", sourceName), snapshotName)
-		} else {
-			err = zfsPoolVolumeSnapshotRename(onDiskPoolName, fmt.Sprintf("custom/%s", sourceName), snapshotName, fmt.Sprintf("copy-%s", uuid.NewRandom().String()))
-		}
-		if err != nil {
-			return err
-		}
-	}
-
-	storageVolumePath := driver.GetStoragePoolVolumeSnapshotMountPoint(s.pool.Name, s.volume.Name)
-	err := os.RemoveAll(storageVolumePath)
-	if err != nil && !os.IsNotExist(err) {
-		return err
-	}
-
-	storageVolumeSnapshotPath := driver.GetStoragePoolVolumeSnapshotMountPoint(s.pool.Name, sourceName)
-	empty, err := shared.PathIsEmpty(storageVolumeSnapshotPath)
-	if err == nil && empty {
-		os.RemoveAll(storageVolumeSnapshotPath)
-	}
-
-	err = s.s.Cluster.StoragePoolVolumeDelete(
-		"default",
-		s.volume.Name,
-		storagePoolVolumeTypeCustom,
-		s.poolID)
-	if err != nil {
-		logger.Errorf(`Failed to delete database entry for DIR storage volume "%s" on storage pool "%s"`,
-			s.volume.Name, s.pool.Name)
-	}
-
-	logger.Infof("Deleted ZFS storage volume snapshot \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-	return nil
-}
-
-func (s *storageZfs) StoragePoolVolumeSnapshotRename(newName string) error {
-	sourceName, snapshotOnlyName, ok := shared.InstanceGetParentAndSnapshotName(s.volume.Name)
-	fullSnapshotName := fmt.Sprintf("%s%s%s", sourceName, shared.SnapshotDelimiter, newName)
-
-	logger.Infof("Renaming ZFS storage volume snapshot on storage pool \"%s\" from \"%s\" to \"%s\"", s.pool.Name, s.volume.Name, fullSnapshotName)
-
-	if !ok {
-		return fmt.Errorf("Not a snapshot name")
-	}
-
-	oldZfsDatasetName := fmt.Sprintf("snapshot-%s", snapshotOnlyName)
-	newZfsDatasetName := fmt.Sprintf("snapshot-%s", newName)
-	err := zfsPoolVolumeSnapshotRename(s.getOnDiskPoolName(), fmt.Sprintf("custom/%s", sourceName), oldZfsDatasetName, newZfsDatasetName)
-	if err != nil {
-		return err
-	}
-
-	logger.Infof("Renamed ZFS storage volume snapshot on storage pool \"%s\" from \"%s\" to \"%s\"", s.pool.Name, s.volume.Name, fullSnapshotName)
-
-	return s.s.Cluster.StoragePoolVolumeRename("default", s.volume.Name, fmt.Sprintf("%s/%s", sourceName, newName), storagePoolVolumeTypeCustom, s.poolID)
-}
diff --git a/lxd/storage_zfs_utils.go b/lxd/storage_zfs_utils.go
deleted file mode 100644
index 7cefa01bab..0000000000
--- a/lxd/storage_zfs_utils.go
+++ /dev/null
@@ -1,839 +0,0 @@
-package main
-
-import (
-	"fmt"
-	"io/ioutil"
-	"os"
-	"os/exec"
-	"path/filepath"
-	"strings"
-	"time"
-
-	"github.com/pborman/uuid"
-	"github.com/pkg/errors"
-	"golang.org/x/sys/unix"
-
-	"github.com/lxc/lxd/lxd/project"
-	driver "github.com/lxc/lxd/lxd/storage"
-	storageDrivers "github.com/lxc/lxd/lxd/storage/drivers"
-	"github.com/lxc/lxd/shared"
-	"github.com/lxc/lxd/shared/logger"
-)
-
-// zfsIsEnabled returns whether zfs backend is supported.
-func zfsIsEnabled() bool {
-	out, err := exec.LookPath("zfs")
-	if err != nil || len(out) == 0 {
-		return false
-	}
-
-	return true
-}
-
-// zfsToolVersionGet returns the ZFS tools version
-func zfsToolVersionGet() (string, error) {
-	// This function is only really ever relevant on Ubuntu as the only
-	// distro that ships out of sync tools and kernel modules
-	out, err := shared.RunCommand("dpkg-query", "--showformat=${Version}", "--show", "zfsutils-linux")
-	if err != nil {
-		return "", err
-	}
-
-	return strings.TrimSpace(string(out)), nil
-}
-
-// zfsModuleVersionGet returns the ZFS module version
-func zfsModuleVersionGet() (string, error) {
-	var zfsVersion string
-
-	if shared.PathExists("/sys/module/zfs/version") {
-		out, err := ioutil.ReadFile("/sys/module/zfs/version")
-		if err != nil {
-			return "", fmt.Errorf("Could not determine ZFS module version")
-		}
-
-		zfsVersion = string(out)
-	} else {
-		out, err := shared.RunCommand("modinfo", "-F", "version", "zfs")
-		if err != nil {
-			return "", fmt.Errorf("Could not determine ZFS module version")
-		}
-
-		zfsVersion = out
-	}
-
-	return strings.TrimSpace(zfsVersion), nil
-}
-
-// zfsPoolVolumeCreate creates a ZFS dataset with a set of given properties.
-func zfsPoolVolumeCreate(dataset string, properties ...string) (string, error) {
-	cmd := []string{"zfs", "create"}
-
-	for _, prop := range properties {
-		cmd = append(cmd, []string{"-o", prop}...)
-	}
-
-	cmd = append(cmd, []string{"-p", dataset}...)
-
-	return shared.RunCommand(cmd[0], cmd[1:]...)
-}
-
-func zfsPoolCheck(pool string) error {
-	output, err := shared.RunCommand(
-		"zfs", "get", "-H", "-o", "value", "type", pool)
-	if err != nil {
-		return err
-	}
-
-	poolType := strings.Split(output, "\n")[0]
-	if poolType != "filesystem" {
-		return fmt.Errorf("Unsupported pool type: %s", poolType)
-	}
-
-	return nil
-}
-
-// zfsPoolVolumeExists verifies if a specific ZFS pool or volume exists.
-func zfsPoolVolumeExists(dataset string) (bool, error) {
-	output, err := shared.RunCommand(
-		"zfs", "list", "-Ho", "name")
-
-	if err != nil {
-		return false, err
-	}
-
-	for _, name := range strings.Split(output, "\n") {
-		if name == dataset {
-			return true, nil
-		}
-	}
-	return false, nil
-}
-
-func zfsPoolCreate(pool string, vdev string) error {
-	var err error
-
-	dataset := ""
-
-	if pool == "" {
-		_, err := shared.RunCommand(
-			"zfs", "create", "-p", "-o", "mountpoint=none", vdev)
-		if err != nil {
-			logger.Errorf("zfs create failed: %v", err)
-			return errors.Wrap(err, "Failed to create ZFS filesystem")
-		}
-		dataset = vdev
-	} else {
-		_, err = shared.RunCommand(
-			"zpool", "create", "-f", "-m", "none", "-O", "compression=on", pool, vdev)
-		if err != nil {
-			logger.Errorf("zfs create failed: %v", err)
-			return errors.Wrap(err, "Failed to create the ZFS pool")
-		}
-
-		dataset = pool
-	}
-
-	err = zfsPoolApplyDefaults(dataset)
-	if err != nil {
-		return err
-	}
-
-	return nil
-}
-
-func zfsPoolApplyDefaults(dataset string) error {
-	err := zfsPoolVolumeSet(dataset, "", "mountpoint", "none")
-	if err != nil {
-		return err
-	}
-
-	err = zfsPoolVolumeSet(dataset, "", "setuid", "on")
-	if err != nil {
-		return err
-	}
-
-	err = zfsPoolVolumeSet(dataset, "", "exec", "on")
-	if err != nil {
-		return err
-	}
-
-	err = zfsPoolVolumeSet(dataset, "", "devices", "on")
-	if err != nil {
-		return err
-	}
-
-	err = zfsPoolVolumeSet(dataset, "", "acltype", "posixacl")
-	if err != nil {
-		return err
-	}
-
-	err = zfsPoolVolumeSet(dataset, "", "xattr", "sa")
-	if err != nil {
-		return err
-	}
-
-	return nil
-}
-
-func zfsPoolVolumeClone(project, pool string, source string, name string, dest string, mountpoint string) error {
-	_, err := shared.RunCommand(
-		"zfs",
-		"clone",
-		"-p",
-		"-o", fmt.Sprintf("mountpoint=%s", mountpoint),
-		"-o", "canmount=noauto",
-		fmt.Sprintf("%s/%s@%s", pool, source, name),
-		fmt.Sprintf("%s/%s", pool, dest))
-	if err != nil {
-		logger.Errorf("zfs clone failed: %v", err)
-		return errors.Wrap(err, "Failed to clone the filesystem")
-	}
-
-	subvols, err := zfsPoolListSubvolumes(pool, fmt.Sprintf("%s/%s", pool, source))
-	if err != nil {
-		return err
-	}
-
-	for _, sub := range subvols {
-		snaps, err := zfsPoolListSnapshots(pool, sub)
-		if err != nil {
-			return err
-		}
-
-		if !shared.StringInSlice(name, snaps) {
-			continue
-		}
-
-		destSubvol := dest + strings.TrimPrefix(sub, source)
-		snapshotMntPoint := driver.GetSnapshotMountPoint(project, pool, destSubvol)
-
-		_, err = shared.RunCommand(
-			"zfs",
-			"clone",
-			"-p",
-			"-o", fmt.Sprintf("mountpoint=%s", snapshotMntPoint),
-			"-o", "canmount=noauto",
-			fmt.Sprintf("%s/%s@%s", pool, sub, name),
-			fmt.Sprintf("%s/%s", pool, destSubvol))
-		if err != nil {
-			logger.Errorf("zfs clone failed: %v", err)
-			return errors.Wrap(err, "Failed to clone the sub-volume")
-		}
-	}
-
-	return nil
-}
-
-func zfsFilesystemEntityDelete(vdev string, pool string) error {
-	var err error
-	if strings.Contains(pool, "/") {
-		// Command to destroy a zfs dataset.
-		_, err = shared.RunCommand("zfs", "destroy", "-r", pool)
-	} else {
-		// Command to destroy a zfs pool.
-		_, err = shared.RunCommand("zpool", "destroy", "-f", pool)
-	}
-	if err != nil {
-		return errors.Wrap(err, "Failed to delete the ZFS pool")
-	}
-
-	// Cleanup storage
-	if filepath.IsAbs(vdev) && !shared.IsBlockdevPath(vdev) {
-		os.RemoveAll(vdev)
-	}
-
-	return nil
-}
-
-func zfsPoolVolumeDestroy(pool string, path string) error {
-	mountpoint, err := zfsFilesystemEntityPropertyGet(pool, path, "mountpoint")
-	if err != nil {
-		return err
-	}
-
-	if mountpoint != "none" && shared.IsMountPoint(mountpoint) {
-		// Make sure the filesystem isn't mounted anymore
-		err := unix.Unmount(mountpoint, 0)
-		if err != nil {
-			err := unix.Unmount(mountpoint, unix.MNT_DETACH)
-			if err != nil {
-				return err
-			}
-		}
-
-		// Give a chance for the kernel to notice (workaround for zfs slowness)
-		time.Sleep(1 * time.Second)
-	}
-
-	// Due to open fds or kernel refs, this may fail for a bit, give it 10s
-	_, err = shared.TryRunCommand(
-		"zfs",
-		"destroy",
-		"-r",
-		fmt.Sprintf("%s/%s", pool, path))
-
-	if err != nil {
-		return errors.Wrap(err, "Failed to destroy ZFS dataset")
-	}
-
-	return nil
-}
-
-func zfsPoolVolumeCleanup(pool string, path string) error {
-	if strings.HasPrefix(path, "deleted/") {
-		// Cleanup of filesystems kept for refcount reason
-		removablePath, err := zfsPoolVolumeSnapshotRemovable(pool, path, "")
-		if err != nil {
-			return err
-		}
-
-		// Confirm that there are no more clones
-		if removablePath {
-			if strings.Contains(path, "@") {
-				// Cleanup snapshots
-				err = zfsPoolVolumeDestroy(pool, path)
-				if err != nil {
-					return err
-				}
-
-				// Check if the parent can now be deleted
-				subPath := strings.SplitN(path, "@", 2)[0]
-				snaps, err := zfsPoolListSnapshots(pool, subPath)
-				if err != nil {
-					return err
-				}
-
-				if len(snaps) == 0 {
-					err := zfsPoolVolumeCleanup(pool, subPath)
-					if err != nil {
-						return err
-					}
-				}
-			} else {
-				// Cleanup filesystems
-				origin, err := zfsFilesystemEntityPropertyGet(pool, path, "origin")
-				if err != nil {
-					return err
-				}
-				origin = strings.TrimPrefix(origin, fmt.Sprintf("%s/", pool))
-
-				err = zfsPoolVolumeDestroy(pool, path)
-				if err != nil {
-					return err
-				}
-
-				// Attempt to remove its parent
-				if origin != "-" {
-					err := zfsPoolVolumeCleanup(pool, origin)
-					if err != nil {
-						return err
-					}
-				}
-			}
-
-			return nil
-		}
-	} else if strings.HasPrefix(path, "containers") && strings.Contains(path, "@copy-") {
-		// Just remove the copy- snapshot for copies of active containers
-		err := zfsPoolVolumeDestroy(pool, path)
-		if err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func zfsFilesystemEntityPropertyGet(pool string, path string, key string) (string, error) {
-	entity := pool
-	if path != "" {
-		entity = fmt.Sprintf("%s/%s", pool, path)
-	}
-	output, err := shared.RunCommand(
-		"zfs",
-		"get",
-		"-H",
-		"-p",
-		"-o", "value",
-		key,
-		entity)
-	if err != nil {
-		return "", errors.Wrap(err, "Failed to get ZFS config")
-	}
-
-	return strings.TrimRight(output, "\n"), nil
-}
-
-func zfsPoolVolumeRename(pool string, source string, dest string, ignoreMounts bool) error {
-	var err error
-
-	for i := 0; i < 20; i++ {
-		if ignoreMounts {
-			_, err = shared.RunCommand(
-				"/proc/self/exe",
-				"forkzfs",
-				"--",
-				"rename",
-				"-p",
-				fmt.Sprintf("%s/%s", pool, source),
-				fmt.Sprintf("%s/%s", pool, dest))
-		} else {
-			_, err = shared.RunCommand(
-				"zfs",
-				"rename",
-				"-p",
-				fmt.Sprintf("%s/%s", pool, source),
-				fmt.Sprintf("%s/%s", pool, dest))
-		}
-
-		// Success
-		if err == nil {
-			return nil
-		}
-
-		// zfs rename can fail because of descendants, yet still manage the rename
-		if !zfsFilesystemEntityExists(pool, source) && zfsFilesystemEntityExists(pool, dest) {
-			return nil
-		}
-
-		time.Sleep(500 * time.Millisecond)
-	}
-
-	// Timeout
-	logger.Errorf("zfs rename failed: %v", err)
-	return errors.Wrap(err, "Failed to rename ZFS filesystem")
-}
-
-func zfsPoolVolumeSet(pool string, path string, key string, value string) error {
-	vdev := pool
-	if path != "" {
-		vdev = fmt.Sprintf("%s/%s", pool, path)
-	}
-	_, err := shared.RunCommand(
-		"zfs",
-		"set",
-		fmt.Sprintf("%s=%s", key, value),
-		vdev)
-	if err != nil {
-		logger.Errorf("zfs set failed: %v", err)
-		return errors.Wrap(err, "Failed to set ZFS config")
-	}
-
-	return nil
-}
-
-func zfsPoolVolumeSnapshotCreate(pool string, path string, name string) error {
-	_, err := shared.RunCommand(
-		"zfs",
-		"snapshot",
-		"-r",
-		fmt.Sprintf("%s/%s@%s", pool, path, name))
-	if err != nil {
-		logger.Errorf("zfs snapshot failed: %v", err)
-		return errors.Wrap(err, "Failed to create ZFS snapshot")
-	}
-
-	return nil
-}
-
-func zfsPoolVolumeSnapshotDestroy(pool, path string, name string) error {
-	_, err := shared.RunCommand(
-		"zfs",
-		"destroy",
-		"-r",
-		fmt.Sprintf("%s/%s@%s", pool, path, name))
-	if err != nil {
-		logger.Errorf("zfs destroy failed: %v", err)
-		return errors.Wrap(err, "Failed to destroy ZFS snapshot")
-	}
-
-	return nil
-}
-
-func zfsPoolVolumeSnapshotRestore(pool string, path string, name string) error {
-	_, err := shared.TryRunCommand(
-		"zfs",
-		"rollback",
-		fmt.Sprintf("%s/%s@%s", pool, path, name))
-	if err != nil {
-		logger.Errorf("zfs rollback failed: %v", err)
-		return errors.Wrap(err, "Failed to restore ZFS snapshot")
-	}
-
-	subvols, err := zfsPoolListSubvolumes(pool, fmt.Sprintf("%s/%s", pool, path))
-	if err != nil {
-		return err
-	}
-
-	for _, sub := range subvols {
-		snaps, err := zfsPoolListSnapshots(pool, sub)
-		if err != nil {
-			return err
-		}
-
-		if !shared.StringInSlice(name, snaps) {
-			continue
-		}
-
-		_, err = shared.TryRunCommand(
-			"zfs",
-			"rollback",
-			fmt.Sprintf("%s/%s@%s", pool, sub, name))
-		if err != nil {
-			logger.Errorf("zfs rollback failed: %v", err)
-			return errors.Wrap(err, "Failed to restore ZFS sub-volume snapshot")
-		}
-	}
-
-	return nil
-}
-
-func zfsPoolVolumeSnapshotRename(pool string, path string, oldName string, newName string) error {
-	_, err := shared.RunCommand(
-		"zfs",
-		"rename",
-		"-r",
-		fmt.Sprintf("%s/%s@%s", pool, path, oldName),
-		fmt.Sprintf("%s/%s@%s", pool, path, newName))
-	if err != nil {
-		logger.Errorf("zfs snapshot rename failed: %v", err)
-		return errors.Wrap(err, "Failed to rename ZFS snapshot")
-	}
-
-	return nil
-}
-
-func zfsMount(poolName string, path string) error {
-	_, err := shared.TryRunCommand(
-		"zfs",
-		"mount",
-		fmt.Sprintf("%s/%s", poolName, path))
-	if err != nil {
-		return errors.Wrap(err, "Failed to mount ZFS filesystem")
-	}
-
-	return nil
-}
-
-func zfsUmount(poolName string, path string, mountpoint string) error {
-	output, err := shared.TryRunCommand(
-		"zfs",
-		"unmount",
-		fmt.Sprintf("%s/%s", poolName, path))
-	if err != nil {
-		logger.Warnf("Failed to unmount ZFS filesystem via zfs unmount: %s. Trying lazy umount (MNT_DETACH)...", output)
-		err := storageDrivers.TryUnmount(mountpoint, unix.MNT_DETACH)
-		if err != nil {
-			logger.Warnf("Failed to unmount ZFS filesystem via lazy umount (MNT_DETACH)...")
-			return err
-		}
-	}
-
-	return nil
-}
-
-func zfsPoolListSubvolumes(pool string, path string) ([]string, error) {
-	output, err := shared.RunCommand(
-		"zfs",
-		"list",
-		"-t", "filesystem",
-		"-o", "name",
-		"-H",
-		"-r", path)
-	if err != nil {
-		logger.Errorf("zfs list failed: %v", err)
-		return []string{}, errors.Wrap(err, "Failed to list ZFS filesystems")
-	}
-
-	children := []string{}
-	for _, entry := range strings.Split(output, "\n") {
-		if entry == "" {
-			continue
-		}
-
-		if entry == path {
-			continue
-		}
-
-		children = append(children, strings.TrimPrefix(entry, fmt.Sprintf("%s/", pool)))
-	}
-
-	return children, nil
-}
-
-func zfsPoolListSnapshots(pool string, path string) ([]string, error) {
-	path = strings.TrimRight(path, "/")
-	fullPath := pool
-	if path != "" {
-		fullPath = fmt.Sprintf("%s/%s", pool, path)
-	}
-
-	output, err := shared.RunCommand(
-		"zfs",
-		"list",
-		"-t", "snapshot",
-		"-o", "name",
-		"-H",
-		"-d", "1",
-		"-s", "creation",
-		"-r", fullPath)
-	if err != nil {
-		logger.Errorf("zfs list failed: %v", err)
-		return []string{}, errors.Wrap(err, "Failed to list ZFS snapshots")
-	}
-
-	children := []string{}
-	for _, entry := range strings.Split(output, "\n") {
-		if entry == "" {
-			continue
-		}
-
-		if entry == fullPath {
-			continue
-		}
-
-		children = append(children, strings.SplitN(entry, "@", 2)[1])
-	}
-
-	return children, nil
-}
-
-func zfsPoolVolumeSnapshotRemovable(pool string, path string, name string) (bool, error) {
-	var snap string
-	if name == "" {
-		snap = path
-	} else {
-		snap = fmt.Sprintf("%s@%s", path, name)
-	}
-
-	clones, err := zfsFilesystemEntityPropertyGet(pool, snap, "clones")
-	if err != nil {
-		return false, err
-	}
-
-	if clones == "-" || clones == "" {
-		return true, nil
-	}
-
-	return false, nil
-}
-
-func zfsFilesystemEntityExists(pool string, path string) bool {
-	vdev := pool
-	if path != "" {
-		vdev = fmt.Sprintf("%s/%s", pool, path)
-	}
-	output, err := shared.RunCommand(
-		"zfs",
-		"get",
-		"-H",
-		"-o",
-		"name",
-		"type",
-		vdev)
-	if err != nil {
-		return false
-	}
-
-	detectedName := strings.TrimSpace(output)
-	return detectedName == vdev
-}
-
-func (s *storageZfs) doContainerMount(projectName, name string, privileged bool) (bool, error) {
-	logger.Debugf("Mounting ZFS storage volume for container \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-
-	volumeName := project.Prefix(projectName, name)
-	fs := fmt.Sprintf("containers/%s", volumeName)
-	containerPoolVolumeMntPoint := driver.GetContainerMountPoint(projectName, s.pool.Name, name)
-
-	containerMountLockID := getContainerMountLockID(s.pool.Name, name)
-	lxdStorageMapLock.Lock()
-	if waitChannel, ok := lxdStorageOngoingOperationMap[containerMountLockID]; ok {
-		lxdStorageMapLock.Unlock()
-		if _, ok := <-waitChannel; ok {
-			logger.Warnf("Received value over semaphore, this should not have happened")
-		}
-		// Give the benefit of the doubt and assume that the other
-		// thread actually succeeded in mounting the storage volume.
-		return false, nil
-	}
-
-	lxdStorageOngoingOperationMap[containerMountLockID] = make(chan bool)
-	lxdStorageMapLock.Unlock()
-
-	removeLockFromMap := func() {
-		lxdStorageMapLock.Lock()
-		if waitChannel, ok := lxdStorageOngoingOperationMap[containerMountLockID]; ok {
-			close(waitChannel)
-			delete(lxdStorageOngoingOperationMap, containerMountLockID)
-		}
-		lxdStorageMapLock.Unlock()
-	}
-
-	defer removeLockFromMap()
-
-	// Since we're using mount() directly zfs will not automatically create
-	// the mountpoint for us. So let's check and do it if needed.
-	if !shared.PathExists(containerPoolVolumeMntPoint) {
-		err := driver.CreateContainerMountpoint(containerPoolVolumeMntPoint, shared.VarPath(fs), privileged)
-		if err != nil {
-			return false, err
-		}
-	}
-
-	ourMount := false
-	if !shared.IsMountPoint(containerPoolVolumeMntPoint) {
-		source := fmt.Sprintf("%s/%s", s.getOnDiskPoolName(), fs)
-		zfsMountOptions := fmt.Sprintf("rw,zfsutil,mntpoint=%s", containerPoolVolumeMntPoint)
-		mounterr := storageDrivers.TryMount(source, containerPoolVolumeMntPoint, "zfs", 0, zfsMountOptions)
-		if mounterr != nil {
-			if mounterr != unix.EBUSY {
-				logger.Errorf("Failed to mount ZFS dataset \"%s\" onto \"%s\": %v", source, containerPoolVolumeMntPoint, mounterr)
-				return false, errors.Wrapf(mounterr, "Failed to mount ZFS dataset \"%s\" onto \"%s\"", source, containerPoolVolumeMntPoint)
-			}
-			// EBUSY error in zfs are related to a bug we're
-			// tracking. So ignore them for now, report back that
-			// the mount isn't ours and proceed.
-			logger.Warnf("ZFS returned EBUSY while \"%s\" is actually not a mountpoint", containerPoolVolumeMntPoint)
-			return false, mounterr
-		}
-		ourMount = true
-	}
-
-	logger.Debugf("Mounted ZFS storage volume for container \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-	return ourMount, nil
-}
-
-func (s *storageZfs) doContainerDelete(projectName, name string) error {
-	logger.Debugf("Deleting ZFS storage volume for container \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-
-	poolName := s.getOnDiskPoolName()
-	containerName := name
-	fs := fmt.Sprintf("containers/%s", project.Prefix(projectName, containerName))
-	containerPoolVolumeMntPoint := driver.GetContainerMountPoint(projectName, s.pool.Name, containerName)
-
-	if zfsFilesystemEntityExists(poolName, fs) {
-		removable := true
-		snaps, err := zfsPoolListSnapshots(poolName, fs)
-		if err != nil {
-			return err
-		}
-
-		for _, snap := range snaps {
-			var err error
-			removable, err = zfsPoolVolumeSnapshotRemovable(poolName, fs, snap)
-			if err != nil {
-				return err
-			}
-
-			if !removable {
-				break
-			}
-		}
-
-		if removable {
-			origin, err := zfsFilesystemEntityPropertyGet(poolName, fs, "origin")
-			if err != nil {
-				return err
-			}
-			poolName := s.getOnDiskPoolName()
-			origin = strings.TrimPrefix(origin, fmt.Sprintf("%s/", poolName))
-
-			err = zfsPoolVolumeDestroy(poolName, fs)
-			if err != nil {
-				return err
-			}
-
-			err = zfsPoolVolumeCleanup(poolName, origin)
-			if err != nil {
-				return err
-			}
-		} else {
-			err := zfsPoolVolumeSet(poolName, fs, "mountpoint", "none")
-			if err != nil {
-				return err
-			}
-
-			err = zfsPoolVolumeRename(poolName, fs, fmt.Sprintf("deleted/containers/%s", uuid.NewRandom().String()), true)
-			if err != nil {
-				return err
-			}
-		}
-	}
-
-	err := deleteContainerMountpoint(containerPoolVolumeMntPoint, shared.VarPath("containers", project.Prefix(projectName, name)), s.GetStorageTypeName())
-	if err != nil {
-		return err
-	}
-
-	snapshotZfsDataset := fmt.Sprintf("snapshots/%s", containerName)
-	zfsPoolVolumeDestroy(poolName, snapshotZfsDataset)
-
-	// Delete potential leftover snapshot mountpoints.
-	snapshotMntPoint := driver.GetSnapshotMountPoint(projectName, s.pool.Name, containerName)
-	if shared.PathExists(snapshotMntPoint) {
-		err := os.RemoveAll(snapshotMntPoint)
-		if err != nil {
-			return err
-		}
-	}
-
-	// Delete potential leftover snapshot symlinks:
-	// ${LXD_DIR}/snapshots/<container_name> to ${POOL}/snapshots/<container_name>
-	snapshotSymlink := shared.VarPath("snapshots", project.Prefix(projectName, containerName))
-	if shared.PathExists(snapshotSymlink) {
-		err := os.Remove(snapshotSymlink)
-		if err != nil {
-			return err
-		}
-	}
-
-	logger.Debugf("Deleted ZFS storage volume for container \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-	return nil
-}
-
-func (s *storageZfs) doContainerCreate(projectName, name string, privileged bool) error {
-	logger.Debugf("Creating empty ZFS storage volume for container \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-
-	containerPath := shared.VarPath("containers", project.Prefix(projectName, name))
-	containerName := name
-	fs := fmt.Sprintf("containers/%s", project.Prefix(projectName, containerName))
-	poolName := s.getOnDiskPoolName()
-	dataset := fmt.Sprintf("%s/%s", poolName, fs)
-	containerPoolVolumeMntPoint := driver.GetContainerMountPoint(projectName, s.pool.Name, containerName)
-
-	// Create volume.
-	msg, err := zfsPoolVolumeCreate(dataset, "mountpoint=none", "canmount=noauto")
-	if err != nil {
-		logger.Errorf("Failed to create ZFS storage volume for container \"%s\" on storage pool \"%s\": %s", s.volume.Name, s.pool.Name, msg)
-		return err
-	}
-
-	// Set mountpoint.
-	err = zfsPoolVolumeSet(poolName, fs, "mountpoint", containerPoolVolumeMntPoint)
-	if err != nil {
-		return err
-	}
-
-	err = driver.CreateContainerMountpoint(containerPoolVolumeMntPoint, containerPath, privileged)
-	if err != nil {
-		return err
-	}
-
-	logger.Debugf("Created empty ZFS storage volume for container \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-	return nil
-}
-
-func zfsIdmapSetSkipper(dir string, absPath string, fi os.FileInfo) bool {
-	strippedPath := absPath
-	if dir != "" {
-		strippedPath = absPath[len(dir):]
-	}
-
-	if fi.IsDir() && strippedPath == "/.zfs/snapshot" {
-		return true
-	}
-
-	return false
-}

From 66a8279eaabcc9c6870268abaf253baad8691985 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?St=C3=A9phane=20Graber?= <stgraber at ubuntu.com>
Date: Wed, 15 Jan 2020 00:13:30 -0500
Subject: [PATCH 04/36] lxd/storage: Remove legacy lvm implementation
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Signed-off-by: Stéphane Graber <stgraber at ubuntu.com>
---
 lxd/api_internal.go      |    6 +-
 lxd/patches.go           |   27 +-
 lxd/patches_utils.go     |  140 +++
 lxd/storage.go           |   23 -
 lxd/storage_lvm.go       | 2377 --------------------------------------
 lxd/storage_lvm_utils.go | 1090 -----------------
 6 files changed, 154 insertions(+), 3509 deletions(-)
 delete mode 100644 lxd/storage_lvm.go
 delete mode 100644 lxd/storage_lvm_utils.go

diff --git a/lxd/api_internal.go b/lxd/api_internal.go
index b41fbf4406..5073c5ec8e 100644
--- a/lxd/api_internal.go
+++ b/lxd/api_internal.go
@@ -727,11 +727,11 @@ func internalImport(d *Daemon, r *http.Request) response.Response {
 			}
 		case "lvm":
 			ctName, csName, _ := shared.InstanceGetParentAndSnapshotName(snap.Name)
-			ctLvmName := containerNameToLVName(fmt.Sprintf("%s/%s", project.Prefix(projectName, ctName), csName))
-			ctLvName := getLVName(poolName,
+			ctLvmName := lvmNameToLVName(fmt.Sprintf("%s/%s", project.Prefix(projectName, ctName), csName))
+			ctLvName := lvmLVName(poolName,
 				storagePoolVolumeAPIEndpointContainers,
 				ctLvmName)
-			exists, err := storageLVExists(ctLvName)
+			exists, err := lvmLVExists(ctLvName)
 			if err != nil {
 				return response.InternalError(err)
 			}
diff --git a/lxd/patches.go b/lxd/patches.go
index d2400b1763..05dbf1adf3 100644
--- a/lxd/patches.go
+++ b/lxd/patches.go
@@ -206,12 +206,7 @@ func patchRenameCustomVolumeLVs(name string, d *Daemon) error {
 			return err
 		}
 
-		sType, err := storageStringToType(pool.Driver)
-		if err != nil {
-			return err
-		}
-
-		if sType != storageTypeLvm {
+		if pool.Driver != "lvm" {
 			continue
 		}
 
@@ -227,9 +222,9 @@ func patchRenameCustomVolumeLVs(name string, d *Daemon) error {
 
 		for _, volume := range volumes {
 			oldName := fmt.Sprintf("%s/custom_%s", vgName, volume)
-			newName := fmt.Sprintf("%s/custom_%s", vgName, containerNameToLVName(volume))
+			newName := fmt.Sprintf("%s/custom_%s", vgName, lvmNameToLVName(volume))
 
-			exists, err := storageLVExists(newName)
+			exists, err := lvmLVExists(newName)
 			if err != nil {
 				return err
 			}
@@ -1052,7 +1047,7 @@ func upgradeFromStorageTypeLvm(name string, d *Daemon, defaultPoolName string, d
 	}
 
 	// Activate volume group
-	err = storageVGActivate(defaultPoolName)
+	err = lvmVGActivate(defaultPoolName)
 	if err != nil {
 		logger.Errorf("Could not activate volume group \"%s\". Manual intervention needed", defaultPoolName)
 		return err
@@ -1168,9 +1163,9 @@ func upgradeFromStorageTypeLvm(name string, d *Daemon, defaultPoolName string, d
 		// new storage api. We do os.Rename() here to preserve
 		// permissions and ownership.
 		newContainerMntPoint := driver.GetContainerMountPoint("default", defaultPoolName, ct)
-		ctLvName := containerNameToLVName(ct)
+		ctLvName := lvmNameToLVName(ct)
 		newContainerLvName := fmt.Sprintf("%s_%s", storagePoolVolumeAPIEndpointContainers, ctLvName)
-		containerLvDevPath := getLvmDevPath("default", defaultPoolName, storagePoolVolumeAPIEndpointContainers, ctLvName)
+		containerLvDevPath := lvmDevPath("default", defaultPoolName, storagePoolVolumeAPIEndpointContainers, ctLvName)
 		if !shared.PathExists(containerLvDevPath) {
 			oldLvDevPath := fmt.Sprintf("/dev/%s/%s", defaultPoolName, ctLvName)
 			// If the old LVM device path for the logical volume
@@ -1324,9 +1319,9 @@ func upgradeFromStorageTypeLvm(name string, d *Daemon, defaultPoolName string, d
 			os.Remove(oldSnapshotMntPoint + ".lv")
 
 			// Make sure we use a valid lv name.
-			csLvName := containerNameToLVName(cs)
+			csLvName := lvmNameToLVName(cs)
 			newSnapshotLvName := fmt.Sprintf("%s_%s", storagePoolVolumeAPIEndpointContainers, csLvName)
-			snapshotLvDevPath := getLvmDevPath("default", defaultPoolName, storagePoolVolumeAPIEndpointContainers, csLvName)
+			snapshotLvDevPath := lvmDevPath("default", defaultPoolName, storagePoolVolumeAPIEndpointContainers, csLvName)
 			if !shared.PathExists(snapshotLvDevPath) {
 				oldLvDevPath := fmt.Sprintf("/dev/%s/%s", defaultPoolName, csLvName)
 				if shared.PathExists(oldLvDevPath) {
@@ -1506,7 +1501,7 @@ func upgradeFromStorageTypeLvm(name string, d *Daemon, defaultPoolName string, d
 
 		// Rename the logical volume device.
 		newImageLvName := fmt.Sprintf("%s_%s", storagePoolVolumeAPIEndpointImages, img)
-		imageLvDevPath := getLvmDevPath("default", defaultPoolName, storagePoolVolumeAPIEndpointImages, img)
+		imageLvDevPath := lvmDevPath("default", defaultPoolName, storagePoolVolumeAPIEndpointImages, img)
 		oldLvDevPath := fmt.Sprintf("/dev/%s/%s", defaultPoolName, img)
 		// Only create logical volumes for images that have a logical
 		// volume on the pre-storage-api LXD instance. If not, we don't
@@ -2482,8 +2477,8 @@ func patchStorageApiDetectLVSize(name string, d *Daemon) error {
 			// It shouldn't be possible that false volume types
 			// exist in the db, so it's safe to ignore the error.
 			volumeTypeApiEndpoint, _ := storagePoolVolumeTypeNameToAPIEndpoint(volume.Type)
-			lvmName := containerNameToLVName(volume.Name)
-			lvmLvDevPath := getLvmDevPath("default", poolName, volumeTypeApiEndpoint, lvmName)
+			lvmName := lvmNameToLVName(volume.Name)
+			lvmLvDevPath := lvmDevPath("default", poolName, volumeTypeApiEndpoint, lvmName)
 			size, err := lvmGetLVSize(lvmLvDevPath)
 			if err != nil {
 				logger.Errorf("Failed to detect size of logical volume: %s", err)
diff --git a/lxd/patches_utils.go b/lxd/patches_utils.go
index 17775e6105..959ebb87f4 100644
--- a/lxd/patches_utils.go
+++ b/lxd/patches_utils.go
@@ -3,10 +3,13 @@ package main
 import (
 	"fmt"
 	"os"
+	"os/exec"
 	"path"
 	"path/filepath"
 	"sort"
+	"strconv"
 	"strings"
+	"syscall"
 
 	"github.com/pborman/uuid"
 	"github.com/pkg/errors"
@@ -15,7 +18,9 @@ import (
 	"github.com/lxc/lxd/lxd/project"
 	"github.com/lxc/lxd/lxd/state"
 	driver "github.com/lxc/lxd/lxd/storage"
+	storageDrivers "github.com/lxc/lxd/lxd/storage/drivers"
 	"github.com/lxc/lxd/shared"
+	"github.com/lxc/lxd/shared/units"
 )
 
 // For 'dir' storage backend.
@@ -463,3 +468,138 @@ func zfsPoolVolumeSnapshotRename(pool string, path string, oldName string, newNa
 
 	return nil
 }
+
+// For 'lvm' storage backend.
+func lvmLVRename(vgName string, oldName string, newName string) error {
+	_, err := shared.TryRunCommand("lvrename", vgName, oldName, newName)
+	if err != nil {
+		return fmt.Errorf("could not rename volume group from \"%s\" to \"%s\": %v", oldName, newName, err)
+	}
+
+	return nil
+}
+
+func lvmLVExists(lvName string) (bool, error) {
+	_, err := shared.RunCommand("lvs", "--noheadings", "-o", "lv_attr", lvName)
+	if err != nil {
+		runErr, ok := err.(shared.RunError)
+		if ok {
+			exitError, ok := runErr.Err.(*exec.ExitError)
+			if ok {
+				waitStatus := exitError.Sys().(syscall.WaitStatus)
+				if waitStatus.ExitStatus() == 5 {
+					// logical volume not found
+					return false, nil
+				}
+			}
+		}
+
+		return false, fmt.Errorf("error checking for logical volume \"%s\"", lvName)
+	}
+
+	return true, nil
+}
+
+func lvmVGActivate(lvmVolumePath string) error {
+	_, err := shared.TryRunCommand("vgchange", "-ay", lvmVolumePath)
+	if err != nil {
+		return fmt.Errorf("could not activate volume group \"%s\": %v", lvmVolumePath, err)
+	}
+
+	return nil
+}
+
+func lvmNameToLVName(containerName string) string {
+	lvName := strings.Replace(containerName, "-", "--", -1)
+	return strings.Replace(lvName, shared.SnapshotDelimiter, "-", -1)
+}
+
+func lvmDevPath(projectName, lvmPool string, volumeType string, lvmVolume string) string {
+	lvmVolume = project.Prefix(projectName, lvmVolume)
+	if volumeType == "" {
+		return fmt.Sprintf("/dev/%s/%s", lvmPool, lvmVolume)
+	}
+
+	return fmt.Sprintf("/dev/%s/%s_%s", lvmPool, volumeType, lvmVolume)
+}
+
+func lvmGetLVSize(lvPath string) (string, error) {
+	msg, err := shared.TryRunCommand("lvs", "--noheadings", "-o", "size", "--nosuffix", "--units", "b", lvPath)
+	if err != nil {
+		return "", fmt.Errorf("failed to retrieve size of logical volume: %s: %s", string(msg), err)
+	}
+
+	sizeString := string(msg)
+	sizeString = strings.TrimSpace(sizeString)
+	size, err := strconv.ParseInt(sizeString, 10, 64)
+	if err != nil {
+		return "", err
+	}
+
+	detectedSize := units.GetByteSizeString(size, 0)
+
+	return detectedSize, nil
+}
+
+func lvmLVName(lvmPool string, volumeType string, lvmVolume string) string {
+	if volumeType == "" {
+		return fmt.Sprintf("%s/%s", lvmPool, lvmVolume)
+	}
+
+	return fmt.Sprintf("%s/%s_%s", lvmPool, volumeType, lvmVolume)
+}
+
+func lvmContainerDeleteInternal(projectName, poolName string, ctName string, isSnapshot bool, vgName string, ctPath string) error {
+	containerMntPoint := ""
+	containerLvmName := lvmNameToLVName(ctName)
+	if isSnapshot {
+		containerMntPoint = driver.GetSnapshotMountPoint(projectName, poolName, ctName)
+	} else {
+		containerMntPoint = driver.GetContainerMountPoint(projectName, poolName, ctName)
+	}
+
+	if shared.IsMountPoint(containerMntPoint) {
+		err := storageDrivers.TryUnmount(containerMntPoint, 0)
+		if err != nil {
+			return fmt.Errorf(`Failed to unmount container path `+
+				`"%s": %s`, containerMntPoint, err)
+		}
+	}
+
+	containerLvmDevPath := lvmDevPath(projectName, vgName,
+		storagePoolVolumeAPIEndpointContainers, containerLvmName)
+
+	lvExists, _ := lvmLVExists(containerLvmDevPath)
+	if lvExists {
+		err := lvmRemoveLV(projectName, vgName, storagePoolVolumeAPIEndpointContainers, containerLvmName)
+		if err != nil {
+			return err
+		}
+	}
+
+	var err error
+	if isSnapshot {
+		sourceName, _, _ := shared.InstanceGetParentAndSnapshotName(ctName)
+		snapshotMntPointSymlinkTarget := shared.VarPath("storage-pools", poolName, "containers-snapshots", project.Prefix(projectName, sourceName))
+		snapshotMntPointSymlink := shared.VarPath("snapshots", project.Prefix(projectName, sourceName))
+		err = deleteSnapshotMountpoint(containerMntPoint, snapshotMntPointSymlinkTarget, snapshotMntPointSymlink)
+	} else {
+		err = deleteContainerMountpoint(containerMntPoint, ctPath, "lvm")
+	}
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func lvmRemoveLV(project, vgName string, volumeType string, lvName string) error {
+	lvmVolumePath := lvmDevPath(project, vgName, volumeType, lvName)
+
+	_, err := shared.TryRunCommand("lvremove", "-f", lvmVolumePath)
+	if err != nil {
+		return fmt.Errorf("Could not remove LV named %s: %v", lvName, err)
+	}
+
+	return nil
+}
diff --git a/lxd/storage.go b/lxd/storage.go
index 61d2d6ed2f..8a87be9f59 100644
--- a/lxd/storage.go
+++ b/lxd/storage.go
@@ -97,7 +97,6 @@ type storageType int
 
 const (
 	storageTypeCeph storageType = iota
-	storageTypeLvm
 	storageTypeMock
 )
 
@@ -107,8 +106,6 @@ func storageTypeToString(sType storageType) (string, error) {
 	switch sType {
 	case storageTypeCeph:
 		return "ceph", nil
-	case storageTypeLvm:
-		return "lvm", nil
 	case storageTypeMock:
 		return "mock", nil
 	}
@@ -120,8 +117,6 @@ func storageStringToType(sName string) (storageType, error) {
 	switch sName {
 	case "ceph":
 		return storageTypeCeph, nil
-	case "lvm":
-		return storageTypeLvm, nil
 	case "mock":
 		return storageTypeMock, nil
 	}
@@ -249,13 +244,6 @@ func storageCoreInit(driver string) (storage, error) {
 			return nil, err
 		}
 		return &ceph, nil
-	case storageTypeLvm:
-		lvm := storageLvm{}
-		err = lvm.StorageCoreInit()
-		if err != nil {
-			return nil, err
-		}
-		return &lvm, nil
 	case storageTypeMock:
 		mock := storageMock{}
 		err = mock.StorageCoreInit()
@@ -308,17 +296,6 @@ func storageInit(s *state.State, project, poolName, volumeName string, volumeTyp
 			return nil, err
 		}
 		return &ceph, nil
-	case storageTypeLvm:
-		lvm := storageLvm{}
-		lvm.poolID = poolID
-		lvm.pool = pool
-		lvm.volume = volume
-		lvm.s = s
-		err = lvm.StoragePoolInit()
-		if err != nil {
-			return nil, err
-		}
-		return &lvm, nil
 	case storageTypeMock:
 		mock := storageMock{}
 		mock.poolID = poolID
diff --git a/lxd/storage_lvm.go b/lxd/storage_lvm.go
deleted file mode 100644
index e82675685e..0000000000
--- a/lxd/storage_lvm.go
+++ /dev/null
@@ -1,2377 +0,0 @@
-package main
-
-import (
-	"fmt"
-	"io"
-	"os"
-	"path/filepath"
-	"strconv"
-	"strings"
-
-	"github.com/gorilla/websocket"
-	"github.com/pborman/uuid"
-	"github.com/pkg/errors"
-
-	"github.com/lxc/lxd/lxd/backup"
-	"github.com/lxc/lxd/lxd/instance"
-	"github.com/lxc/lxd/lxd/instance/instancetype"
-	"github.com/lxc/lxd/lxd/migration"
-	"github.com/lxc/lxd/lxd/operations"
-	"github.com/lxc/lxd/lxd/project"
-	"github.com/lxc/lxd/lxd/rsync"
-	driver "github.com/lxc/lxd/lxd/storage"
-	storageDrivers "github.com/lxc/lxd/lxd/storage/drivers"
-	"github.com/lxc/lxd/shared"
-	"github.com/lxc/lxd/shared/api"
-	"github.com/lxc/lxd/shared/ioprogress"
-	"github.com/lxc/lxd/shared/logger"
-	"github.com/lxc/lxd/shared/units"
-)
-
-type storageLvm struct {
-	vgName       string
-	thinPoolName string
-	useThinpool  bool
-	loopInfo     *os.File
-	storageShared
-}
-
-var lvmVersion = ""
-
-// Only initialize the minimal information we need about a given storage type.
-func (s *storageLvm) StorageCoreInit() error {
-	s.sType = storageTypeLvm
-	typeName, err := storageTypeToString(s.sType)
-	if err != nil {
-		return err
-	}
-	s.sTypeName = typeName
-
-	if lvmVersion != "" {
-		s.sTypeVersion = lvmVersion
-		return nil
-	}
-
-	output, err := shared.RunCommand("lvm", "version")
-	if err != nil {
-		return fmt.Errorf("Error getting LVM version: %v", err)
-	}
-	lines := strings.Split(output, "\n")
-
-	s.sTypeVersion = ""
-	for idx, line := range lines {
-		fields := strings.SplitAfterN(line, ":", 2)
-		if len(fields) < 2 {
-			continue
-		}
-
-		if !strings.Contains(line, "version:") {
-			continue
-		}
-
-		if idx > 0 {
-			s.sTypeVersion += " / "
-		}
-		s.sTypeVersion += strings.TrimSpace(fields[1])
-	}
-
-	lvmVersion = s.sTypeVersion
-
-	return nil
-}
-
-func (s *storageLvm) StoragePoolInit() error {
-	err := s.StorageCoreInit()
-	if err != nil {
-		return err
-	}
-
-	source := s.pool.Config["source"]
-	s.thinPoolName = s.getLvmThinpoolName()
-	s.useThinpool = s.usesThinpool()
-
-	if s.pool.Config["lvm.vg_name"] != "" {
-		s.vgName = s.pool.Config["lvm.vg_name"]
-	}
-
-	if source != "" && !filepath.IsAbs(source) {
-		ok, err := storageVGExists(source)
-		if err != nil {
-			// Internal error.
-			return err
-		} else if !ok {
-			// Volume group does not exist.
-			return fmt.Errorf("the requested volume group \"%s\" does not exist", source)
-		}
-		s.vgName = source
-	}
-
-	return nil
-}
-
-func (s *storageLvm) StoragePoolCheck() error {
-	logger.Debugf("Checking LVM storage pool \"%s\"", s.pool.Name)
-
-	_, err := s.StoragePoolMount()
-	if err != nil {
-		return err
-	}
-	if s.loopInfo != nil {
-		defer s.loopInfo.Close()
-		defer func() { s.loopInfo = nil }()
-	}
-
-	poolName := s.getOnDiskPoolName()
-	err = storageVGActivate(poolName)
-	if err != nil {
-		return err
-	}
-
-	logger.Debugf("Checked LVM storage pool \"%s\"", s.pool.Name)
-	return nil
-}
-
-func (s *storageLvm) StoragePoolCreate() error {
-	logger.Infof("Creating LVM storage pool \"%s\"", s.pool.Name)
-
-	s.pool.Config["volatile.initial_source"] = s.pool.Config["source"]
-
-	var globalErr error
-	var pvExisted bool
-	var vgExisted bool
-	tryUndo := true
-	poolName := s.getOnDiskPoolName()
-	source := s.pool.Config["source"]
-	// must be initialized
-	vgName := ""
-	// not initialized in all cases
-	pvName := ""
-
-	// Create the mountpoint for the storage pool.
-	poolMntPoint := driver.GetStoragePoolMountPoint(s.pool.Name)
-	err := os.MkdirAll(poolMntPoint, 0711)
-	if err != nil {
-		return err
-	}
-	defer func() {
-		if tryUndo {
-			os.Remove(poolMntPoint)
-		}
-	}()
-
-	defaultSource := filepath.Join(shared.VarPath("disks"), fmt.Sprintf("%s.img", s.pool.Name))
-	if source == "" || source == defaultSource {
-		source = defaultSource
-		s.pool.Config["source"] = source
-
-		if s.pool.Config["lvm.vg_name"] == "" {
-			s.pool.Config["lvm.vg_name"] = poolName
-		}
-
-		f, err := os.Create(source)
-		if err != nil {
-			return fmt.Errorf("Failed to open %s: %s", source, err)
-		}
-		defer f.Close()
-
-		err = f.Chmod(0600)
-		if err != nil {
-			return fmt.Errorf("Failed to chmod %s: %s", source, err)
-		}
-
-		size, err := units.ParseByteSizeString(s.pool.Config["size"])
-		if err != nil {
-			return err
-		}
-		err = f.Truncate(size)
-		if err != nil {
-			return fmt.Errorf("Failed to create sparse file %s: %s", source, err)
-		}
-
-		_, err = s.StoragePoolMount()
-		if err != nil {
-			return err
-		}
-		defer func() {
-			if tryUndo {
-				os.Remove(source)
-			}
-		}()
-		if s.loopInfo != nil {
-			defer s.loopInfo.Close()
-			defer func() { s.loopInfo = nil }()
-		}
-
-		// Check if the physical volume already exists.
-		pvName = s.loopInfo.Name()
-		pvExisted, globalErr = storagePVExists(pvName)
-		if globalErr != nil {
-			return globalErr
-		}
-
-		// Check if the volume group already exists.
-		vgExisted, globalErr = storageVGExists(poolName)
-		if globalErr != nil {
-			return globalErr
-		}
-	} else {
-		s.pool.Config["size"] = ""
-		if filepath.IsAbs(source) {
-			pvName = source
-			if !shared.IsBlockdevPath(pvName) {
-				return fmt.Errorf("Custom loop file locations are not supported")
-			}
-
-			if s.pool.Config["lvm.vg_name"] == "" {
-				s.pool.Config["lvm.vg_name"] = poolName
-			}
-
-			// Set source to volume group name.
-			s.pool.Config["source"] = poolName
-
-			// Check if the physical volume already exists.
-			pvExisted, globalErr = storagePVExists(pvName)
-			if globalErr != nil {
-				return globalErr
-			}
-
-			// Check if the volume group already exists.
-			vgExisted, globalErr = storageVGExists(poolName)
-			if globalErr != nil {
-				return globalErr
-			}
-		} else {
-			// The physical volume must already consist
-			pvExisted = true
-			vgName = source
-			if s.pool.Config["lvm.vg_name"] != "" && s.pool.Config["lvm.vg_name"] != vgName {
-				// User gave us something weird.
-				return fmt.Errorf("Invalid combination of \"source\" and \"lvm.vg_name\" property")
-			}
-
-			s.pool.Config["lvm.vg_name"] = vgName
-			s.vgName = vgName
-
-			vgExisted, globalErr = storageVGExists(vgName)
-			if globalErr != nil {
-				return globalErr
-			}
-
-			// Volume group must exist but doesn't.
-			if !vgExisted {
-				return fmt.Errorf("The requested volume group \"%s\" does not exist", vgName)
-			}
-		}
-	}
-
-	if !pvExisted {
-		// This is an internal error condition which should never be
-		// hit.
-		if pvName == "" {
-			logger.Errorf("No name for physical volume detected")
-		}
-
-		_, err := shared.TryRunCommand("pvcreate", pvName)
-		if err != nil {
-			return fmt.Errorf("Failed to create the physical volume for the lvm storage pool: %v", err)
-		}
-		defer func() {
-			if tryUndo {
-				shared.TryRunCommand("pvremove", pvName)
-			}
-		}()
-	}
-
-	if vgExisted {
-		// Check that the volume group is empty.
-		// Otherwise we will refuse to use it.
-		count, err := lvmGetLVCount(poolName)
-		if err != nil {
-			logger.Errorf("Failed to determine whether the volume group \"%s\" is empty", poolName)
-			return err
-		}
-
-		empty := true
-		if count > 0 && !s.useThinpool {
-			empty = false
-		}
-
-		if count > 0 && s.useThinpool {
-			ok, err := storageLVMThinpoolExists(poolName, s.thinPoolName)
-			if err != nil {
-				logger.Errorf("Failed to determine whether thinpool \"%s\" exists in volume group \"%s\": %s", poolName, s.thinPoolName, err)
-				return err
-			}
-			empty = ok
-		}
-
-		if !empty {
-			msg := fmt.Sprintf("volume group \"%s\" is not empty", poolName)
-			logger.Errorf(msg)
-			return fmt.Errorf(msg)
-		}
-
-		// Check that we don't already use this volume group.
-		inUse, user, err := driver.LXDUsesPool(s.s.Cluster, poolName, s.pool.Driver, "lvm.vg_name")
-		if err != nil {
-			return err
-		}
-
-		if inUse {
-			msg := fmt.Sprintf("LXD already uses volume group \"%s\" for pool \"%s\"", poolName, user)
-			logger.Errorf(msg)
-			return fmt.Errorf(msg)
-		}
-	} else {
-		_, err := shared.TryRunCommand("vgcreate", poolName, pvName)
-		if err != nil {
-			return fmt.Errorf("failed to create the volume group for the lvm storage pool: %v", err)
-		}
-	}
-
-	err = s.StoragePoolCheck()
-	if err != nil {
-		return err
-	}
-
-	// Deregister cleanup.
-	tryUndo = false
-
-	logger.Infof("Created LVM storage pool \"%s\"", s.pool.Name)
-	return nil
-}
-
-func (s *storageLvm) StoragePoolDelete() error {
-	logger.Infof("Deleting LVM storage pool \"%s\"", s.pool.Name)
-
-	source := s.pool.Config["source"]
-	if source == "" {
-		return fmt.Errorf("no \"source\" property found for the storage pool")
-	}
-
-	_, err := s.StoragePoolMount()
-	if err != nil {
-		return err
-	}
-	if s.loopInfo != nil {
-		defer s.loopInfo.Close()
-		defer func() { s.loopInfo = nil }()
-	}
-
-	poolName := s.getOnDiskPoolName()
-	poolExists, _ := storageVGExists(poolName)
-
-	// Delete the thinpool.
-	if s.useThinpool && poolExists {
-		// Check that the thinpool actually exists. For example, it
-		// won't when the user has never created a storage volume in the
-		// storage pool.
-		devPath := getLvmDevPath("default", poolName, "", s.thinPoolName)
-		ok, _ := storageLVExists(devPath)
-		if ok {
-			msg, err := shared.TryRunCommand("lvremove", "-f", devPath)
-			if err != nil {
-				logger.Errorf("Failed to delete thinpool \"%s\" from volume group \"%s\": %s", s.thinPoolName, poolName, msg)
-				return err
-			}
-		}
-	}
-
-	// Check that the count in the volume group is zero. If not, we need to
-	// assume that other users are using the volume group, so don't remove
-	// it. This actually goes against policy since we explicitly state: our
-	// pool, and nothing but our pool but still, let's not hurt users.
-	count, err := lvmGetLVCount(poolName)
-	if err != nil {
-		return err
-	}
-
-	// Remove the volume group.
-	if count == 0 && poolExists {
-		_, err := shared.TryRunCommand("vgremove", "-f", poolName)
-		if err != nil {
-			logger.Errorf("Failed to destroy the volume group for the lvm storage pool: %v", err)
-			return err
-		}
-	}
-
-	if s.loopInfo != nil {
-		// Set LO_FLAGS_AUTOCLEAR before we remove the loop file
-		// otherwise we will get EBADF.
-		err = storageDrivers.SetAutoclearOnLoopDev(int(s.loopInfo.Fd()))
-		if err != nil {
-			logger.Warnf("Failed to set LO_FLAGS_AUTOCLEAR on loop device: %s, manual cleanup needed", err)
-		}
-
-		output, err := shared.TryRunCommand("pvremove", "-f", s.loopInfo.Name())
-		if err != nil {
-			logger.Warnf("Failed to destroy the physical volume for the lvm storage pool: %s", output)
-		}
-	}
-
-	if filepath.IsAbs(source) {
-		// This is a loop file so deconfigure the associated loop
-		// device.
-		err = os.Remove(source)
-		if err != nil {
-			return err
-		}
-	}
-
-	// Delete the mountpoint for the storage pool.
-	poolMntPoint := driver.GetStoragePoolMountPoint(s.pool.Name)
-	err = os.RemoveAll(poolMntPoint)
-	if err != nil {
-		return err
-	}
-
-	logger.Infof("Deleted LVM storage pool \"%s\"", s.pool.Name)
-	return nil
-}
-
-// Currently only used for loop-backed LVM storage pools. Can be called without
-// overhead since it is essentially a noop for non-loop-backed LVM storage
-// pools.
-func (s *storageLvm) StoragePoolMount() (bool, error) {
-	source := s.pool.Config["source"]
-	if source == "" {
-		return false, fmt.Errorf("no \"source\" property found for the storage pool")
-	}
-
-	if !filepath.IsAbs(source) {
-		return true, nil
-	}
-
-	poolMountLockID := getPoolMountLockID(s.pool.Name)
-	lxdStorageMapLock.Lock()
-	if waitChannel, ok := lxdStorageOngoingOperationMap[poolMountLockID]; ok {
-		lxdStorageMapLock.Unlock()
-		if _, ok := <-waitChannel; ok {
-			logger.Warnf("Received value over semaphore, this should not have happened")
-		}
-		// Give the benefit of the doubt and assume that the other
-		// thread actually succeeded in mounting the storage pool.
-		return false, nil
-	}
-
-	lxdStorageOngoingOperationMap[poolMountLockID] = make(chan bool)
-	lxdStorageMapLock.Unlock()
-
-	removeLockFromMap := func() {
-		lxdStorageMapLock.Lock()
-		if waitChannel, ok := lxdStorageOngoingOperationMap[poolMountLockID]; ok {
-			close(waitChannel)
-			delete(lxdStorageOngoingOperationMap, poolMountLockID)
-		}
-		lxdStorageMapLock.Unlock()
-	}
-
-	defer removeLockFromMap()
-
-	if filepath.IsAbs(source) && !shared.IsBlockdevPath(source) {
-		// Try to prepare new loop device.
-		loopF, loopErr := storageDrivers.PrepareLoopDev(source, 0)
-		if loopErr != nil {
-			return false, loopErr
-		}
-		// Make sure that LO_FLAGS_AUTOCLEAR is unset.
-		loopErr = storageDrivers.UnsetAutoclearOnLoopDev(int(loopF.Fd()))
-		if loopErr != nil {
-			return false, loopErr
-		}
-		s.loopInfo = loopF
-	}
-
-	return true, nil
-}
-
-func (s *storageLvm) StoragePoolUmount() (bool, error) {
-	return true, nil
-}
-
-func (s *storageLvm) StoragePoolVolumeCreate() error {
-	logger.Infof("Creating LVM storage volume \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-	tryUndo := true
-
-	volumeLvmName := containerNameToLVName(s.volume.Name)
-	poolName := s.getOnDiskPoolName()
-	thinPoolName := s.getLvmThinpoolName()
-	lvFsType := s.getLvmFilesystem()
-	lvSize, err := s.getLvmVolumeSize()
-	if lvSize == "" {
-		return err
-	}
-
-	volumeType, err := storagePoolVolumeTypeNameToAPIEndpoint(s.volume.Type)
-	if err != nil {
-		return err
-	}
-
-	if s.useThinpool {
-		err = lvmCreateThinpool(s.s, s.sTypeVersion, poolName, thinPoolName, lvFsType)
-		if err != nil {
-			return err
-		}
-	}
-
-	err = lvmCreateLv("default", poolName, thinPoolName, volumeLvmName, lvFsType, lvSize, volumeType, s.useThinpool)
-	if err != nil {
-		return fmt.Errorf("Error Creating LVM LV for new image: %v", err)
-	}
-	defer func() {
-		if tryUndo {
-			s.StoragePoolVolumeDelete()
-		}
-	}()
-
-	customPoolVolumeMntPoint := driver.GetStoragePoolVolumeMountPoint(s.pool.Name, s.volume.Name)
-	err = os.MkdirAll(customPoolVolumeMntPoint, 0711)
-	if err != nil {
-		return err
-	}
-
-	// apply quota
-	if s.volume.Config["size"] != "" {
-		size, err := units.ParseByteSizeString(s.volume.Config["size"])
-		if err != nil {
-			return err
-		}
-
-		err = s.StorageEntitySetQuota(storagePoolVolumeTypeCustom, size, nil)
-		if err != nil {
-			return err
-		}
-	}
-
-	tryUndo = false
-
-	logger.Infof("Created LVM storage volume \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-	return nil
-}
-
-func (s *storageLvm) StoragePoolVolumeDelete() error {
-	logger.Infof("Deleting LVM storage volume \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-
-	volumeLvmName := containerNameToLVName(s.volume.Name)
-	poolName := s.getOnDiskPoolName()
-	customLvmDevPath := getLvmDevPath("default", poolName,
-		storagePoolVolumeAPIEndpointCustom, volumeLvmName)
-	lvExists, _ := storageLVExists(customLvmDevPath)
-
-	if lvExists {
-		_, err := s.StoragePoolVolumeUmount()
-		if err != nil {
-			return err
-		}
-	}
-
-	volumeType, err := storagePoolVolumeTypeNameToAPIEndpoint(s.volume.Type)
-	if err != nil {
-		return err
-	}
-
-	if lvExists {
-		err = removeLV("default", poolName, volumeType, volumeLvmName)
-		if err != nil {
-			return err
-		}
-	}
-
-	customPoolVolumeMntPoint := driver.GetStoragePoolVolumeMountPoint(s.pool.Name, s.volume.Name)
-	if shared.PathExists(customPoolVolumeMntPoint) {
-		err := os.RemoveAll(customPoolVolumeMntPoint)
-		if err != nil {
-			return err
-		}
-	}
-
-	err = s.s.Cluster.StoragePoolVolumeDelete(
-		"default",
-		s.volume.Name,
-		storagePoolVolumeTypeCustom,
-		s.poolID)
-	if err != nil {
-		logger.Errorf(`Failed to delete database entry for LVM storage volume "%s" on storage pool "%s"`, s.volume.Name, s.pool.Name)
-	}
-
-	logger.Infof("Deleted LVM storage volume \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-	return nil
-}
-
-func (s *storageLvm) StoragePoolVolumeMount() (bool, error) {
-	logger.Debugf("Mounting LVM storage volume \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-
-	volumeLvmName := containerNameToLVName(s.volume.Name)
-	customPoolVolumeMntPoint := driver.GetStoragePoolVolumeMountPoint(s.pool.Name, s.volume.Name)
-	poolName := s.getOnDiskPoolName()
-	lvFsType := s.getLvmFilesystem()
-	volumeType, err := storagePoolVolumeTypeNameToAPIEndpoint(s.volume.Type)
-	if err != nil {
-		return false, err
-	}
-	lvmVolumePath := getLvmDevPath("default", poolName, volumeType, volumeLvmName)
-
-	customMountLockID := getCustomMountLockID(s.pool.Name, s.volume.Name)
-	lxdStorageMapLock.Lock()
-	if waitChannel, ok := lxdStorageOngoingOperationMap[customMountLockID]; ok {
-		lxdStorageMapLock.Unlock()
-		if _, ok := <-waitChannel; ok {
-			logger.Warnf("Received value over semaphore, this should not have happened")
-		}
-		// Give the benefit of the doubt and assume that the other
-		// thread actually succeeded in mounting the storage volume.
-		return false, nil
-	}
-
-	lxdStorageOngoingOperationMap[customMountLockID] = make(chan bool)
-	lxdStorageMapLock.Unlock()
-
-	var customerr error
-	ourMount := false
-	if !shared.IsMountPoint(customPoolVolumeMntPoint) {
-		mountFlags, mountOptions := resolveMountOptions(s.getLvmMountOptions())
-		customerr = storageDrivers.TryMount(lvmVolumePath, customPoolVolumeMntPoint, lvFsType, mountFlags, mountOptions)
-		ourMount = true
-	}
-
-	lxdStorageMapLock.Lock()
-	if waitChannel, ok := lxdStorageOngoingOperationMap[customMountLockID]; ok {
-		close(waitChannel)
-		delete(lxdStorageOngoingOperationMap, customMountLockID)
-	}
-	lxdStorageMapLock.Unlock()
-
-	if customerr != nil {
-		return false, customerr
-	}
-
-	logger.Debugf("Mounted LVM storage volume \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-	return ourMount, nil
-}
-
-func (s *storageLvm) StoragePoolVolumeUmount() (bool, error) {
-	logger.Debugf("Unmounting LVM storage volume \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-
-	customPoolVolumeMntPoint := driver.GetStoragePoolVolumeMountPoint(s.pool.Name, s.volume.Name)
-
-	customUmountLockID := getCustomUmountLockID(s.pool.Name, s.volume.Name)
-	lxdStorageMapLock.Lock()
-	if waitChannel, ok := lxdStorageOngoingOperationMap[customUmountLockID]; ok {
-		lxdStorageMapLock.Unlock()
-		if _, ok := <-waitChannel; ok {
-			logger.Warnf("Received value over semaphore, this should not have happened")
-		}
-		// Give the benefit of the doubt and assume that the other
-		// thread actually succeeded in unmounting the storage volume.
-		return false, nil
-	}
-
-	lxdStorageOngoingOperationMap[customUmountLockID] = make(chan bool)
-	lxdStorageMapLock.Unlock()
-
-	var customerr error
-	ourUmount := false
-	if shared.IsMountPoint(customPoolVolumeMntPoint) {
-		customerr = storageDrivers.TryUnmount(customPoolVolumeMntPoint, 0)
-		ourUmount = true
-	}
-
-	lxdStorageMapLock.Lock()
-	if waitChannel, ok := lxdStorageOngoingOperationMap[customUmountLockID]; ok {
-		close(waitChannel)
-		delete(lxdStorageOngoingOperationMap, customUmountLockID)
-	}
-	lxdStorageMapLock.Unlock()
-
-	if customerr != nil {
-		return false, customerr
-	}
-
-	logger.Debugf("Unmounted LVM storage volume \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-	return ourUmount, nil
-}
-
-func (s *storageLvm) GetContainerPoolInfo() (int64, string, string) {
-	return s.poolID, s.pool.Name, s.getOnDiskPoolName()
-}
-
-func (s *storageLvm) StoragePoolUpdate(writable *api.StoragePoolPut, changedConfig []string) error {
-	logger.Infof(`Updating LVM storage pool "%s"`, s.pool.Name)
-
-	changeable := changeableStoragePoolProperties["lvm"]
-	unchangeable := []string{}
-	for _, change := range changedConfig {
-		if !shared.StringInSlice(change, changeable) {
-			unchangeable = append(unchangeable, change)
-		}
-	}
-
-	if len(unchangeable) > 0 {
-		return updateStoragePoolError(unchangeable, "lvm")
-	}
-
-	// "volume.block.mount_options" requires no on-disk modifications.
-	// "volume.block.filesystem" requires no on-disk modifications.
-	// "volume.size" requires no on-disk modifications.
-	// "rsync.bwlimit" requires no on-disk modifications.
-
-	revert := true
-
-	if shared.StringInSlice("lvm.thinpool_name", changedConfig) {
-		if !s.useThinpool {
-			return fmt.Errorf(`The LVM storage pool "%s" does `+
-				`not use thin pools. The "lvm.thinpool_name" `+
-				`property cannot be set`, s.pool.Name)
-		}
-
-		newThinpoolName := writable.Config["lvm.thinpool_name"]
-		// Paranoia check
-		if newThinpoolName == "" {
-			return fmt.Errorf(`Could not rename volume group: No ` +
-				`new name provided`)
-		}
-
-		poolName := s.getOnDiskPoolName()
-		oldThinpoolName := s.getLvmThinpoolName()
-		err := lvmLVRename(poolName, oldThinpoolName, newThinpoolName)
-		if err != nil {
-			return err
-		}
-
-		// Already set the new thinpool name so that any potentially
-		// following operations use the correct on-disk name of the
-		// volume group.
-		s.setLvmThinpoolName(newThinpoolName)
-		defer func() {
-			if !revert {
-				return
-			}
-
-			err = lvmLVRename(poolName, newThinpoolName, oldThinpoolName)
-			if err != nil {
-				logger.Warnf(`Failed to rename LVM thinpool from "%s" to "%s": %s. Manual intervention needed`, newThinpoolName, oldThinpoolName, err)
-			}
-			s.setLvmThinpoolName(oldThinpoolName)
-		}()
-	}
-
-	if shared.StringInSlice("lvm.vg_name", changedConfig) {
-		newName := writable.Config["lvm.vg_name"]
-		// Paranoia check
-		if newName == "" {
-			return fmt.Errorf(`Could not rename volume group: No ` +
-				`new name provided`)
-		}
-		writable.Config["source"] = newName
-
-		oldPoolName := s.getOnDiskPoolName()
-		err := lvmVGRename(oldPoolName, newName)
-		if err != nil {
-			return err
-		}
-
-		// Already set the new dataset name so that any potentially
-		// following operations use the correct on-disk name of the
-		// volume group.
-		s.setOnDiskPoolName(newName)
-		defer func() {
-			if !revert {
-				return
-			}
-
-			err := lvmVGRename(newName, oldPoolName)
-			if err != nil {
-				logger.Warnf(`Failed to rename LVM volume group from "%s" to "%s": %s. Manual intervention needed`, newName, oldPoolName, err)
-			}
-			s.setOnDiskPoolName(oldPoolName)
-		}()
-	}
-
-	// Update succeeded.
-	revert = false
-
-	logger.Infof(`Updated LVM storage pool "%s"`, s.pool.Name)
-	return nil
-}
-
-func (s *storageLvm) StoragePoolVolumeUpdate(writable *api.StorageVolumePut,
-	changedConfig []string) error {
-
-	if writable.Restore != "" {
-		logger.Infof(`Restoring LVM storage volume "%s" from snapshot "%s"`,
-			s.volume.Name, writable.Restore)
-
-		_, err := s.StoragePoolVolumeUmount()
-		if err != nil {
-			return err
-		}
-
-		sourceLvmName := containerNameToLVName(fmt.Sprintf("%s/%s", s.volume.Name, writable.Restore))
-		targetLvmName := containerNameToLVName(s.volume.Name)
-
-		if s.useThinpool {
-			poolName := s.getOnDiskPoolName()
-
-			err := removeLV("default", poolName,
-				storagePoolVolumeAPIEndpointCustom, targetLvmName)
-			if err != nil {
-				logger.Errorf("Failed to remove \"%s\": %s",
-					targetLvmName, err)
-			}
-
-			_, err = s.createSnapshotLV("default", poolName, sourceLvmName,
-				storagePoolVolumeAPIEndpointCustom, targetLvmName,
-				storagePoolVolumeAPIEndpointCustom, false, true)
-			if err != nil {
-				return fmt.Errorf("Error creating snapshot LV: %v", err)
-			}
-		} else {
-			poolName := s.getOnDiskPoolName()
-			sourceName := fmt.Sprintf("%s/%s", s.volume.Name, writable.Restore)
-			sourceVolumeMntPoint := driver.GetStoragePoolVolumeSnapshotMountPoint(poolName, sourceName)
-			targetVolumeMntPoint := driver.GetStoragePoolVolumeMountPoint(poolName, s.volume.Name)
-
-			bwlimit := s.pool.Config["rsync.bwlimit"]
-			output, err := rsync.LocalCopy(sourceVolumeMntPoint, targetVolumeMntPoint, bwlimit, true)
-			if err != nil {
-				return fmt.Errorf("Failed to rsync container: %s: %s", string(output), err)
-			}
-		}
-
-		logger.Infof(`Restored LVM storage volume "%s" from snapshot "%s"`,
-			s.volume.Name, writable.Restore)
-		return nil
-	}
-
-	logger.Infof(`Updating LVM storage volume "%s"`, s.volume.Name)
-
-	changeable := changeableStoragePoolVolumeProperties["lvm"]
-	unchangeable := []string{}
-	for _, change := range changedConfig {
-		if !shared.StringInSlice(change, changeable) {
-			unchangeable = append(unchangeable, change)
-		}
-	}
-
-	if len(unchangeable) > 0 {
-		return updateStoragePoolVolumeError(unchangeable, "lvm")
-	}
-
-	if shared.StringInSlice("size", changedConfig) {
-		if s.volume.Type != storagePoolVolumeTypeNameCustom {
-			return updateStoragePoolVolumeError([]string{"size"}, "lvm")
-		}
-
-		if s.volume.Config["size"] != writable.Config["size"] {
-			size, err := units.ParseByteSizeString(writable.Config["size"])
-			if err != nil {
-				return err
-			}
-
-			err = s.StorageEntitySetQuota(storagePoolVolumeTypeCustom, size, nil)
-			if err != nil {
-				return err
-			}
-		}
-	}
-
-	logger.Infof(`Updated LVM storage volume "%s"`, s.volume.Name)
-	return nil
-}
-
-func (s *storageLvm) StoragePoolVolumeRename(newName string) error {
-	logger.Infof(`Renaming LVM storage volume on storage pool "%s" from "%s" to "%s`,
-		s.pool.Name, s.volume.Name, newName)
-
-	_, err := s.StoragePoolVolumeUmount()
-	if err != nil {
-		return err
-	}
-
-	usedBy, err := storagePoolVolumeUsedByInstancesGet(s.s, "default", s.pool.Name, s.volume.Name)
-	if err != nil {
-		return err
-	}
-	if len(usedBy) > 0 {
-		return fmt.Errorf(`LVM storage volume "%s" on storage pool "%s" is attached to containers`,
-			s.volume.Name, s.pool.Name)
-	}
-
-	sourceLVName := containerNameToLVName(s.volume.Name)
-	targetLVName := containerNameToLVName(newName)
-
-	err = s.renameLVByPath("default", sourceLVName, targetLVName,
-		storagePoolVolumeAPIEndpointCustom)
-	if err != nil {
-		return fmt.Errorf(`Failed to rename logical volume from "%s" to "%s": %s`,
-			s.volume.Name, newName, err)
-	}
-
-	sourceName, _, ok := shared.InstanceGetParentAndSnapshotName(s.volume.Name)
-	if !ok {
-		return fmt.Errorf("Not a snapshot name")
-	}
-	fullSnapshotName := fmt.Sprintf("%s%s%s", sourceName, shared.SnapshotDelimiter, newName)
-	oldPath := driver.GetStoragePoolVolumeMountPoint(s.pool.Name, s.volume.Name)
-	newPath := driver.GetStoragePoolVolumeMountPoint(s.pool.Name, fullSnapshotName)
-	err = os.Rename(oldPath, newPath)
-	if err != nil {
-		return err
-	}
-
-	logger.Infof(`Renamed ZFS storage volume on storage pool "%s" from "%s" to "%s`,
-		s.pool.Name, s.volume.Name, newName)
-
-	return s.s.Cluster.StoragePoolVolumeRename("default", s.volume.Name, newName,
-		storagePoolVolumeTypeCustom, s.poolID)
-}
-
-func (s *storageLvm) ContainerStorageReady(container instance.Instance) bool {
-	containerLvmName := containerNameToLVName(container.Name())
-	poolName := s.getOnDiskPoolName()
-	containerLvmPath := getLvmDevPath(container.Project(), poolName, storagePoolVolumeAPIEndpointContainers, containerLvmName)
-	ok, _ := storageLVExists(containerLvmPath)
-	return ok
-}
-
-func (s *storageLvm) ContainerCreate(container instance.Instance) error {
-	logger.Debugf("Creating empty LVM storage volume for container \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-
-	tryUndo := true
-
-	containerName := container.Name()
-	containerLvmName := containerNameToLVName(containerName)
-	thinPoolName := s.getLvmThinpoolName()
-	lvFsType := s.getLvmFilesystem()
-	lvSize, err := s.getLvmVolumeSize()
-	if lvSize == "" {
-		return err
-	}
-
-	poolName := s.getOnDiskPoolName()
-	if s.useThinpool {
-		err = lvmCreateThinpool(s.s, s.sTypeVersion, poolName, thinPoolName, lvFsType)
-		if err != nil {
-			return err
-		}
-	}
-
-	err = lvmCreateLv(container.Project(), poolName, thinPoolName, containerLvmName, lvFsType, lvSize, storagePoolVolumeAPIEndpointContainers, s.useThinpool)
-	if err != nil {
-		return err
-	}
-	defer func() {
-		if tryUndo {
-			s.ContainerDelete(container)
-		}
-	}()
-
-	if container.IsSnapshot() {
-		containerMntPoint := driver.GetSnapshotMountPoint(container.Project(), s.pool.Name, containerName)
-		sourceName, _, _ := shared.InstanceGetParentAndSnapshotName(containerName)
-		snapshotMntPointSymlinkTarget := shared.VarPath("storage-pools", s.pool.Name, "containers-snapshots", project.Prefix(container.Project(), sourceName))
-		snapshotMntPointSymlink := shared.VarPath("snapshots", project.Prefix(container.Project(), sourceName))
-		err := os.MkdirAll(containerMntPoint, 0711)
-		if err != nil {
-			return err
-		}
-		err = driver.CreateSnapshotMountpoint(containerMntPoint, snapshotMntPointSymlinkTarget, snapshotMntPointSymlink)
-		if err != nil {
-			return err
-		}
-	} else {
-		containerMntPoint := driver.GetContainerMountPoint(container.Project(), s.pool.Name, containerName)
-		containerPath := container.Path()
-		err := os.MkdirAll(containerMntPoint, 0711)
-		if err != nil {
-			return err
-		}
-		err = driver.CreateContainerMountpoint(containerMntPoint, containerPath, container.IsPrivileged())
-		if err != nil {
-			return err
-		}
-	}
-
-	tryUndo = false
-
-	logger.Debugf("Created empty LVM storage volume for container \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-	return nil
-}
-
-func (s *storageLvm) ContainerCreateFromImage(container instance.Instance, fingerprint string, tracker *ioprogress.ProgressTracker) error {
-	logger.Debugf("Creating LVM storage volume for container \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-
-	tryUndo := true
-
-	containerName := container.Name()
-	containerLvmName := containerNameToLVName(containerName)
-
-	var err error
-	if s.useThinpool {
-		err = s.containerCreateFromImageThinLv(container, fingerprint)
-	} else {
-		err = s.containerCreateFromImageLv(container, fingerprint)
-	}
-	if err != nil {
-		logger.Errorf(`Failed to create LVM storage volume for container "%s" on storage pool "%s": %s`, containerName, s.pool.Name, err)
-		return err
-	}
-	logger.Debugf(`Created LVM storage volume for container "%s" on storage pool "%s"`, containerName, s.pool.Name)
-	defer func() {
-		if tryUndo {
-			s.ContainerDelete(container)
-		}
-	}()
-
-	containerMntPoint := driver.GetContainerMountPoint(container.Project(), s.pool.Name, containerName)
-	containerPath := container.Path()
-	err = os.MkdirAll(containerMntPoint, 0711)
-	if err != nil {
-		return errors.Wrapf(err, "Create container mount point directory at %s", containerMntPoint)
-	}
-	err = driver.CreateContainerMountpoint(containerMntPoint, containerPath, container.IsPrivileged())
-	if err != nil {
-		return errors.Wrap(err, "Create container mount point")
-	}
-
-	poolName := s.getOnDiskPoolName()
-	containerLvDevPath := getLvmDevPath(container.Project(), poolName, storagePoolVolumeAPIEndpointContainers, containerLvmName)
-	// Generate a new xfs's UUID
-	lvFsType := s.getLvmFilesystem()
-	msg, err := driver.FSGenerateNewUUID(lvFsType, containerLvDevPath)
-	if err != nil {
-		logger.Errorf("Failed to create new \"%s\" UUID for container \"%s\" on storage pool \"%s\": %s", lvFsType, containerName, s.pool.Name, msg)
-		return err
-	}
-
-	ourMount, err := s.ContainerMount(container)
-	if err != nil {
-		return errors.Wrap(err, "Container mount")
-	}
-	if ourMount {
-		defer s.ContainerUmount(container, containerPath)
-	}
-
-	err = os.Chmod(containerMntPoint, 0100)
-	if err != nil {
-		return errors.Wrap(err, "Set mount point permissions")
-	}
-
-	err = container.DeferTemplateApply("create")
-	if err != nil {
-		logger.Errorf("Error in create template during ContainerCreateFromImage, continuing to unmount: %s", err)
-		return err
-	}
-
-	tryUndo = false
-
-	logger.Debugf("Created LVM storage volume for container \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-	return nil
-}
-
-func lvmContainerDeleteInternal(projectName, poolName string, ctName string, isSnapshot bool, vgName string, ctPath string) error {
-	containerMntPoint := ""
-	containerLvmName := containerNameToLVName(ctName)
-	if isSnapshot {
-		containerMntPoint = driver.GetSnapshotMountPoint(projectName, poolName, ctName)
-	} else {
-		containerMntPoint = driver.GetContainerMountPoint(projectName, poolName, ctName)
-	}
-
-	if shared.IsMountPoint(containerMntPoint) {
-		err := storageDrivers.TryUnmount(containerMntPoint, 0)
-		if err != nil {
-			return fmt.Errorf(`Failed to unmount container path `+
-				`"%s": %s`, containerMntPoint, err)
-		}
-	}
-
-	containerLvmDevPath := getLvmDevPath(projectName, vgName,
-		storagePoolVolumeAPIEndpointContainers, containerLvmName)
-
-	lvExists, _ := storageLVExists(containerLvmDevPath)
-	if lvExists {
-		err := removeLV(projectName, vgName, storagePoolVolumeAPIEndpointContainers, containerLvmName)
-		if err != nil {
-			return err
-		}
-	}
-
-	var err error
-	if isSnapshot {
-		sourceName, _, _ := shared.InstanceGetParentAndSnapshotName(ctName)
-		snapshotMntPointSymlinkTarget := shared.VarPath("storage-pools", poolName, "containers-snapshots", project.Prefix(projectName, sourceName))
-		snapshotMntPointSymlink := shared.VarPath("snapshots", project.Prefix(projectName, sourceName))
-		err = deleteSnapshotMountpoint(containerMntPoint, snapshotMntPointSymlinkTarget, snapshotMntPointSymlink)
-	} else {
-		err = deleteContainerMountpoint(containerMntPoint, ctPath, "lvm")
-	}
-	if err != nil {
-		return err
-	}
-
-	return nil
-}
-
-func (s *storageLvm) ContainerDelete(container instance.Instance) error {
-	logger.Debugf("Deleting LVM storage volume for container \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-
-	containerName := container.Name()
-	poolName := s.getOnDiskPoolName()
-	err := lvmContainerDeleteInternal(container.Project(), s.pool.Name, containerName, container.IsSnapshot(), poolName, container.Path())
-	if err != nil {
-		return err
-	}
-
-	logger.Debugf("Deleted LVM storage volume for container \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-	return nil
-}
-
-func (s *storageLvm) ContainerCopy(target instance.Instance, source instance.Instance, containerOnly bool) error {
-	logger.Debugf("Copying LVM container storage for container %s to %s", source.Name(), target.Name())
-
-	err := s.doContainerCopy(target, source, containerOnly, false, nil)
-	if err != nil {
-		return err
-	}
-
-	logger.Debugf("Copied LVM container storage for container %s to %s", source.Name(), target.Name())
-	return nil
-}
-
-func (s *storageLvm) doContainerCopy(target instance.Instance, source instance.Instance, containerOnly bool, refresh bool, refreshSnapshots []instance.Instance) error {
-	ourStart, err := source.StorageStart()
-	if err != nil {
-		return err
-	}
-	if ourStart {
-		defer source.StorageStop()
-	}
-
-	sourcePool, err := source.StoragePool()
-	if err != nil {
-		return err
-	}
-	targetPool, err := target.StoragePool()
-	if err != nil {
-		return err
-	}
-	srcState := s.s
-	if sourcePool != targetPool {
-		// setup storage for the source volume
-		srcStorage, err := storagePoolVolumeInit(s.s, "default", sourcePool, source.Name(), storagePoolVolumeTypeContainer)
-		if err != nil {
-			return err
-		}
-
-		ourMount, err := srcStorage.StoragePoolMount()
-		if err != nil {
-			return err
-		}
-		if ourMount {
-			defer srcStorage.StoragePoolUmount()
-		}
-		srcState = srcStorage.GetState()
-	}
-
-	err = s.copyContainer(target, source, refresh)
-	if err != nil {
-		return err
-	}
-
-	if containerOnly {
-		return nil
-	}
-
-	var snapshots []instance.Instance
-
-	if refresh {
-		snapshots = refreshSnapshots
-	} else {
-		snapshots, err = source.Snapshots()
-		if err != nil {
-			return err
-		}
-	}
-
-	if len(snapshots) == 0 {
-		return nil
-	}
-
-	for _, snap := range snapshots {
-		_, snapOnlyName, _ := shared.InstanceGetParentAndSnapshotName(snap.Name())
-		newSnapName := fmt.Sprintf("%s/%s", target.Name(), snapOnlyName)
-
-		logger.Debugf("Copying LVM container storage for snapshot %s to %s", snap.Name(), newSnapName)
-
-		sourceSnapshot, err := instance.LoadByProjectAndName(srcState, source.Project(), snap.Name())
-		if err != nil {
-			return err
-		}
-
-		targetSnapshot, err := instance.LoadByProjectAndName(s.s, source.Project(), newSnapName)
-		if err != nil {
-			return err
-		}
-
-		err = s.copySnapshot(targetSnapshot, sourceSnapshot, refresh)
-		if err != nil {
-			return err
-		}
-
-		logger.Debugf("Copied LVM container storage for snapshot %s to %s", snap.Name(), newSnapName)
-	}
-
-	return nil
-}
-
-func (s *storageLvm) ContainerRefresh(target instance.Instance, source instance.Instance, snapshots []instance.Instance) error {
-	logger.Debugf("Refreshing LVM container storage for %s from %s", target.Name(), source.Name())
-
-	err := s.doContainerCopy(target, source, len(snapshots) == 0, true, snapshots)
-	if err != nil {
-		return err
-	}
-
-	logger.Debugf("Refreshed LVM container storage for %s from %s", target.Name(), source.Name())
-	return nil
-}
-
-func (s *storageLvm) ContainerMount(c instance.Instance) (bool, error) {
-	return s.doContainerMount(c.Project(), c.Name(), false)
-}
-
-func (s *storageLvm) doContainerMount(project, name string, snap bool) (bool, error) {
-	logger.Debugf("Mounting LVM storage volume for container \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-
-	containerLvmName := containerNameToLVName(name)
-	lvFsType := s.getLvmFilesystem()
-	poolName := s.getOnDiskPoolName()
-	containerLvmPath := getLvmDevPath(project, poolName, storagePoolVolumeAPIEndpointContainers, containerLvmName)
-	containerMntPoint := driver.GetContainerMountPoint(project, s.pool.Name, name)
-	if shared.IsSnapshot(name) {
-		containerMntPoint = driver.GetSnapshotMountPoint(project, s.pool.Name, name)
-	}
-
-	containerMountLockID := getContainerMountLockID(s.pool.Name, name)
-	lxdStorageMapLock.Lock()
-	if waitChannel, ok := lxdStorageOngoingOperationMap[containerMountLockID]; ok {
-		lxdStorageMapLock.Unlock()
-		if _, ok := <-waitChannel; ok {
-			logger.Warnf("Received value over semaphore, this should not have happened")
-		}
-		// Give the benefit of the doubt and assume that the other
-		// thread actually succeeded in mounting the storage volume.
-		return false, nil
-	}
-
-	lxdStorageOngoingOperationMap[containerMountLockID] = make(chan bool)
-	lxdStorageMapLock.Unlock()
-
-	var mounterr error
-	ourMount := false
-	if !shared.IsMountPoint(containerMntPoint) {
-		mountFlags, mountOptions := resolveMountOptions(s.getLvmMountOptions())
-		if snap && lvFsType == "xfs" {
-			idx := strings.Index(mountOptions, "nouuid")
-			if idx < 0 {
-				mountOptions += ",nouuid"
-			}
-		}
-
-		mounterr = storageDrivers.TryMount(containerLvmPath, containerMntPoint, lvFsType, mountFlags, mountOptions)
-		ourMount = true
-	}
-
-	lxdStorageMapLock.Lock()
-	if waitChannel, ok := lxdStorageOngoingOperationMap[containerMountLockID]; ok {
-		close(waitChannel)
-		delete(lxdStorageOngoingOperationMap, containerMountLockID)
-	}
-	lxdStorageMapLock.Unlock()
-
-	if mounterr != nil {
-		return false, errors.Wrapf(mounterr, "Mount %s onto %s", containerLvmPath, containerMntPoint)
-	}
-
-	logger.Debugf("Mounted LVM storage volume for container \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-	return ourMount, nil
-}
-
-func (s *storageLvm) ContainerUmount(c instance.Instance, path string) (bool, error) {
-	return s.umount(c.Project(), c.Name(), path)
-}
-
-func (s *storageLvm) umount(project, name string, path string) (bool, error) {
-	logger.Debugf("Unmounting LVM storage volume for container \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-	containerMntPoint := driver.GetContainerMountPoint(project, s.pool.Name, name)
-	if shared.IsSnapshot(name) {
-		containerMntPoint = driver.GetSnapshotMountPoint(project, s.pool.Name, name)
-	}
-
-	containerUmountLockID := getContainerUmountLockID(s.pool.Name, name)
-	lxdStorageMapLock.Lock()
-	if waitChannel, ok := lxdStorageOngoingOperationMap[containerUmountLockID]; ok {
-		lxdStorageMapLock.Unlock()
-		if _, ok := <-waitChannel; ok {
-			logger.Warnf("Received value over semaphore, this should not have happened")
-		}
-		// Give the benefit of the doubt and assume that the other
-		// thread actually succeeded in unmounting the storage volume.
-		return false, nil
-	}
-
-	lxdStorageOngoingOperationMap[containerUmountLockID] = make(chan bool)
-	lxdStorageMapLock.Unlock()
-
-	var imgerr error
-	ourUmount := false
-	if shared.IsMountPoint(containerMntPoint) {
-		imgerr = storageDrivers.TryUnmount(containerMntPoint, 0)
-		ourUmount = true
-	}
-
-	lxdStorageMapLock.Lock()
-	if waitChannel, ok := lxdStorageOngoingOperationMap[containerUmountLockID]; ok {
-		close(waitChannel)
-		delete(lxdStorageOngoingOperationMap, containerUmountLockID)
-	}
-	lxdStorageMapLock.Unlock()
-
-	if imgerr != nil {
-		return false, imgerr
-	}
-
-	logger.Debugf("Unmounted LVM storage volume for container \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-	return ourUmount, nil
-}
-
-func (s *storageLvm) ContainerRename(container instance.Instance, newContainerName string) error {
-	logger.Debugf("Renaming LVM storage volume for container \"%s\" from %s to %s", s.volume.Name, s.volume.Name, newContainerName)
-
-	tryUndo := true
-
-	oldName := container.Name()
-	oldLvmName := containerNameToLVName(oldName)
-	newLvmName := containerNameToLVName(newContainerName)
-
-	_, err := s.ContainerUmount(container, container.Path())
-	if err != nil {
-		return err
-	}
-
-	err = s.renameLVByPath(container.Project(), oldLvmName, newLvmName, storagePoolVolumeAPIEndpointContainers)
-	if err != nil {
-		return fmt.Errorf("Failed to rename a container LV, oldName='%s', newName='%s', err='%s'", oldLvmName, newLvmName, err)
-	}
-	defer func() {
-		if tryUndo {
-			s.renameLVByPath(container.Project(), newLvmName, oldLvmName, storagePoolVolumeAPIEndpointContainers)
-		}
-	}()
-
-	// MAYBE(FIXME(brauner)): Register another cleanup function that tries to
-	// rename alreday renamed snapshots back to their old name when the
-	// rename fails.
-	if !container.IsSnapshot() {
-		snaps, err := container.Snapshots()
-		if err != nil {
-			return err
-		}
-
-		for _, snap := range snaps {
-			baseSnapName := filepath.Base(snap.Name())
-			newSnapshotName := newContainerName + shared.SnapshotDelimiter + baseSnapName
-			err := s.ContainerRename(snap, newSnapshotName)
-			if err != nil {
-				return err
-			}
-		}
-
-		oldContainerMntPoint := driver.GetContainerMountPoint(container.Project(), s.pool.Name, oldName)
-		oldContainerMntPointSymlink := container.Path()
-		newContainerMntPoint := driver.GetContainerMountPoint(container.Project(), s.pool.Name, newContainerName)
-		newContainerMntPointSymlink := shared.VarPath("containers", project.Prefix(container.Project(), newContainerName))
-		err = renameContainerMountpoint(oldContainerMntPoint, oldContainerMntPointSymlink, newContainerMntPoint, newContainerMntPointSymlink)
-		if err != nil {
-			return err
-		}
-
-		oldSnapshotPath := driver.GetSnapshotMountPoint(container.Project(), s.pool.Name, oldName)
-		newSnapshotPath := driver.GetSnapshotMountPoint(container.Project(), s.pool.Name, newContainerName)
-		if shared.PathExists(oldSnapshotPath) {
-			err = os.Rename(oldSnapshotPath, newSnapshotPath)
-			if err != nil {
-				return err
-			}
-		}
-
-		oldSnapshotSymlink := shared.VarPath("snapshots", project.Prefix(container.Project(), oldName))
-		newSnapshotSymlink := shared.VarPath("snapshots", project.Prefix(container.Project(), newContainerName))
-		if shared.PathExists(oldSnapshotSymlink) {
-			err := os.Remove(oldSnapshotSymlink)
-			if err != nil {
-				return err
-			}
-
-			err = os.Symlink(newSnapshotPath, newSnapshotSymlink)
-			if err != nil {
-				return err
-			}
-		}
-	}
-
-	tryUndo = false
-
-	logger.Debugf("Renamed LVM storage volume for container \"%s\" from %s to %s", s.volume.Name, s.volume.Name, newContainerName)
-	return nil
-}
-
-func (s *storageLvm) ContainerRestore(target instance.Instance, source instance.Instance) error {
-	logger.Debugf("Restoring LVM storage volume for container \"%s\" from %s to %s", s.volume.Name, source.Name(), target.Name())
-
-	if source.Type() != instancetype.Container {
-		return fmt.Errorf("Source Instance type must be container")
-	}
-
-	if target.Type() != instancetype.Container {
-		return fmt.Errorf("Target Instance type must be container")
-	}
-
-	srcCt := source.(*containerLXC)
-	targetCt := target.(*containerLXC)
-
-	_, sourcePool, _ := srcCt.Storage().GetContainerPoolInfo()
-	if s.pool.Name != sourcePool {
-		return fmt.Errorf("containers must be on the same pool to be restored")
-	}
-
-	sourceName := source.Name()
-	sourceLvmName := containerNameToLVName(sourceName)
-
-	targetName := target.Name()
-	targetLvmName := containerNameToLVName(targetName)
-	targetPath := target.Path()
-	if s.useThinpool {
-		ourUmount, err := targetCt.Storage().ContainerUmount(target, targetPath)
-		if err != nil {
-			return err
-		}
-		if ourUmount {
-			defer targetCt.Storage().ContainerMount(target)
-		}
-
-		poolName := s.getOnDiskPoolName()
-
-		err = removeLV(target.Project(), poolName,
-			storagePoolVolumeAPIEndpointContainers, targetLvmName)
-		if err != nil {
-			logger.Errorf("Failed to remove \"%s\": %s",
-				targetLvmName, err)
-		}
-
-		_, err = s.createSnapshotLV(source.Project(), poolName, sourceLvmName,
-			storagePoolVolumeAPIEndpointContainers, targetLvmName,
-			storagePoolVolumeAPIEndpointContainers, false, true)
-		if err != nil {
-			return fmt.Errorf("Error creating snapshot LV: %v", err)
-		}
-	} else {
-		ourStart, err := source.StorageStart()
-		if err != nil {
-			return err
-		}
-		if ourStart {
-			defer source.StorageStop()
-		}
-
-		ourStart, err = target.StorageStart()
-		if err != nil {
-			return err
-		}
-		if ourStart {
-			defer target.StorageStop()
-		}
-
-		poolName := s.getOnDiskPoolName()
-		sourceName := source.Name()
-		targetContainerMntPoint := driver.GetContainerMountPoint(target.Project(), poolName, targetName)
-		sourceContainerMntPoint := driver.GetContainerMountPoint(target.Project(), poolName, sourceName)
-		if source.IsSnapshot() {
-			sourceContainerMntPoint = driver.GetSnapshotMountPoint(target.Project(), poolName, sourceName)
-		}
-
-		err = target.Freeze()
-		if err != nil {
-		}
-		defer target.Unfreeze()
-
-		bwlimit := s.pool.Config["rsync.bwlimit"]
-		output, err := rsync.LocalCopy(sourceContainerMntPoint, targetContainerMntPoint, bwlimit, true)
-		if err != nil {
-			return fmt.Errorf("Failed to rsync container: %s: %s", string(output), err)
-		}
-	}
-
-	logger.Debugf("Restored LVM storage volume for container \"%s\" from %s to %s", s.volume.Name, sourceName, targetName)
-	return nil
-}
-
-func (s *storageLvm) ContainerGetUsage(container instance.Instance) (int64, error) {
-	return -1, fmt.Errorf("the LVM container backend doesn't support quotas")
-}
-
-func (s *storageLvm) ContainerSnapshotCreate(snapshotContainer instance.Instance, sourceContainer instance.Instance) error {
-	logger.Debugf("Creating LVM storage volume for snapshot \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-
-	err := s.createSnapshotContainer(snapshotContainer, sourceContainer, true)
-	if err != nil {
-		return err
-	}
-
-	logger.Debugf("Created LVM storage volume for snapshot \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-	return nil
-}
-
-func (s *storageLvm) ContainerSnapshotDelete(snapshotContainer instance.Instance) error {
-	logger.Debugf("Deleting LVM storage volume for snapshot \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-
-	err := s.ContainerDelete(snapshotContainer)
-	if err != nil {
-		return fmt.Errorf("Error deleting snapshot %s: %s", snapshotContainer.Name(), err)
-	}
-
-	logger.Debugf("Deleted LVM storage volume for snapshot \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-	return nil
-}
-
-func (s *storageLvm) ContainerSnapshotRename(snapshotContainer instance.Instance, newContainerName string) error {
-	logger.Debugf("Renaming LVM storage volume for snapshot \"%s\" from %s to %s", s.volume.Name, s.volume.Name, newContainerName)
-
-	tryUndo := true
-
-	oldName := snapshotContainer.Name()
-	oldLvmName := containerNameToLVName(oldName)
-	newLvmName := containerNameToLVName(newContainerName)
-
-	err := s.renameLVByPath(snapshotContainer.Project(), oldLvmName, newLvmName, storagePoolVolumeAPIEndpointContainers)
-	if err != nil {
-		return fmt.Errorf("Failed to rename a container LV, oldName='%s', newName='%s', err='%s'", oldLvmName, newLvmName, err)
-	}
-	defer func() {
-		if tryUndo {
-			s.renameLVByPath(snapshotContainer.Project(), newLvmName, oldLvmName, storagePoolVolumeAPIEndpointContainers)
-		}
-	}()
-
-	oldSnapshotMntPoint := driver.GetSnapshotMountPoint(snapshotContainer.Project(), s.pool.Name, oldName)
-	newSnapshotMntPoint := driver.GetSnapshotMountPoint(snapshotContainer.Project(), s.pool.Name, newContainerName)
-	err = os.Rename(oldSnapshotMntPoint, newSnapshotMntPoint)
-	if err != nil {
-		return err
-	}
-
-	tryUndo = false
-
-	logger.Debugf("Renamed LVM storage volume for snapshot \"%s\" from %s to %s", s.volume.Name, s.volume.Name, newContainerName)
-	return nil
-}
-
-func (s *storageLvm) ContainerSnapshotStart(container instance.Instance) (bool, error) {
-	logger.Debugf(`Initializing LVM storage volume for snapshot "%s" on storage pool "%s"`, s.volume.Name, s.pool.Name)
-
-	poolName := s.getOnDiskPoolName()
-	containerName := container.Name()
-	containerLvmName := containerNameToLVName(containerName)
-	containerLvmPath := getLvmDevPath(container.Project(), poolName, storagePoolVolumeAPIEndpointContainers, containerLvmName)
-
-	wasWritableAtCheck, err := lvmLvIsWritable(containerLvmPath)
-	if err != nil {
-		return false, err
-	}
-
-	if !wasWritableAtCheck {
-		_, err := shared.TryRunCommand("lvchange", "-prw", fmt.Sprintf("%s/%s_%s", poolName, storagePoolVolumeAPIEndpointContainers, project.Prefix(container.Project(), containerLvmName)))
-		if err != nil {
-			logger.Errorf("Failed to make LVM snapshot \"%s\" read-write: %v", containerName, err)
-			return false, err
-		}
-	}
-
-	lvFsType := s.getLvmFilesystem()
-	containerMntPoint := driver.GetSnapshotMountPoint(container.Project(), s.pool.Name, containerName)
-	if !shared.IsMountPoint(containerMntPoint) {
-		mntOptString := s.getLvmMountOptions()
-		mountFlags, mountOptions := resolveMountOptions(mntOptString)
-
-		if lvFsType == "xfs" {
-			idx := strings.Index(mountOptions, "nouuid")
-			if idx < 0 {
-				mountOptions += ",nouuid"
-			}
-		}
-
-		err = storageDrivers.TryMount(containerLvmPath, containerMntPoint, lvFsType, mountFlags, mountOptions)
-		if err != nil {
-			logger.Errorf(`Failed to mount LVM snapshot "%s" with filesystem "%s" options "%s" onto "%s": %s`, s.volume.Name, lvFsType, mntOptString, containerMntPoint, err)
-			return false, err
-		}
-	}
-
-	logger.Debugf(`Initialized LVM storage volume for snapshot "%s" on storage pool "%s"`, s.volume.Name, s.pool.Name)
-
-	if wasWritableAtCheck {
-		return false, nil
-	}
-
-	return true, nil
-}
-
-func (s *storageLvm) ContainerSnapshotStop(container instance.Instance) (bool, error) {
-	logger.Debugf("Stopping LVM storage volume for snapshot \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-
-	containerName := container.Name()
-	snapshotMntPoint := driver.GetSnapshotMountPoint(container.Project(), s.pool.Name, containerName)
-
-	poolName := s.getOnDiskPoolName()
-
-	if shared.IsMountPoint(snapshotMntPoint) {
-		err := storageDrivers.TryUnmount(snapshotMntPoint, 0)
-		if err != nil {
-			return false, err
-		}
-	}
-
-	containerLvmPath := getLvmDevPath(container.Project(), poolName, storagePoolVolumeAPIEndpointContainers, containerNameToLVName(containerName))
-	wasWritableAtCheck, err := lvmLvIsWritable(containerLvmPath)
-	if err != nil {
-		return false, err
-	}
-
-	if wasWritableAtCheck {
-		containerLvmName := containerNameToLVName(project.Prefix(container.Project(), containerName))
-		_, err := shared.TryRunCommand("lvchange", "-pr", fmt.Sprintf("%s/%s_%s", poolName, storagePoolVolumeAPIEndpointContainers, containerLvmName))
-		if err != nil {
-			logger.Errorf("Failed to make LVM snapshot read-only: %v", err)
-			return false, err
-		}
-	}
-
-	logger.Debugf("Stopped LVM storage volume for snapshot \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-
-	if wasWritableAtCheck {
-		return false, nil
-	}
-
-	return true, nil
-}
-
-func (s *storageLvm) ContainerSnapshotCreateEmpty(snapshotContainer instance.Instance) error {
-	logger.Debugf("Creating empty LVM storage volume for snapshot \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-
-	err := s.ContainerCreate(snapshotContainer)
-	if err != nil {
-		return err
-	}
-
-	logger.Debugf("Created empty LVM storage volume for snapshot \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-	return nil
-}
-
-func (s *storageLvm) ContainerBackupCreate(path string, backup backup.Backup, source instance.Instance) error {
-	poolName := s.getOnDiskPoolName()
-
-	// Prepare for rsync
-	rsync := func(oldPath string, newPath string, bwlimit string) error {
-		output, err := rsync.LocalCopy(oldPath, newPath, bwlimit, true)
-		if err != nil {
-			return fmt.Errorf("Failed to rsync: %s: %s", string(output), err)
-		}
-
-		return nil
-	}
-
-	bwlimit := s.pool.Config["rsync.bwlimit"]
-
-	// Handle snapshots
-	if !backup.InstanceOnly() {
-		snapshotsPath := fmt.Sprintf("%s/snapshots", path)
-
-		// Retrieve the snapshots
-		snapshots, err := source.Snapshots()
-		if err != nil {
-			return err
-		}
-
-		// Create the snapshot path
-		if len(snapshots) > 0 {
-			err = os.MkdirAll(snapshotsPath, 0711)
-			if err != nil {
-				return err
-			}
-		}
-
-		for _, snap := range snapshots {
-			_, snapName, _ := shared.InstanceGetParentAndSnapshotName(snap.Name())
-			snapshotMntPoint := driver.GetSnapshotMountPoint(snap.Project(), s.pool.Name, snap.Name())
-			target := fmt.Sprintf("%s/%s", snapshotsPath, snapName)
-
-			// Mount the snapshot
-			_, err := s.ContainerSnapshotStart(snap)
-			if err != nil {
-				return err
-			}
-
-			// Copy the snapshot
-			err = rsync(snapshotMntPoint, target, bwlimit)
-			s.ContainerSnapshotStop(snap)
-			if err != nil {
-				return err
-			}
-		}
-	}
-
-	// Make a temporary snapshot of the container
-	sourceLvmDatasetSnapshot := fmt.Sprintf("snapshot-%s", uuid.NewRandom().String())
-	tmpContainerMntPoint := driver.GetContainerMountPoint(source.Project(), s.pool.Name, sourceLvmDatasetSnapshot)
-	err := os.MkdirAll(tmpContainerMntPoint, 0100)
-	if err != nil {
-		return err
-	}
-	defer os.RemoveAll(tmpContainerMntPoint)
-
-	_, err = s.createSnapshotLV(source.Project(), poolName, containerNameToLVName(source.Name()),
-		storagePoolVolumeAPIEndpointContainers, containerNameToLVName(sourceLvmDatasetSnapshot),
-		storagePoolVolumeAPIEndpointContainers, false, s.useThinpool)
-	if err != nil {
-		return err
-	}
-	defer removeLV(source.Project(), poolName, storagePoolVolumeAPIEndpointContainers,
-		containerNameToLVName(sourceLvmDatasetSnapshot))
-
-	// Mount the temporary snapshot
-	_, err = s.doContainerMount(source.Project(), sourceLvmDatasetSnapshot, true)
-	if err != nil {
-		return err
-	}
-
-	// Copy the container
-	containerPath := fmt.Sprintf("%s/container", path)
-	err = rsync(tmpContainerMntPoint, containerPath, bwlimit)
-	s.umount(source.Project(), sourceLvmDatasetSnapshot, "")
-	if err != nil {
-		return err
-	}
-
-	return nil
-}
-
-func (s *storageLvm) ContainerBackupLoad(info backup.Info, data io.ReadSeeker, tarArgs []string) error {
-	containerPath, err := s.doContainerBackupLoad(info.Project, info.Name, info.Privileged, false)
-	if err != nil {
-		return err
-	}
-
-	// Prepare tar arguments
-	args := append(tarArgs, []string{
-		"-",
-		"--strip-components=2",
-		"--xattrs-include=*",
-		"-C", containerPath, "backup/container",
-	}...)
-
-	// Extract container
-	data.Seek(0, 0)
-	err = shared.RunCommandWithFds(data, nil, "tar", args...)
-	if err != nil {
-		return err
-	}
-
-	for _, snap := range info.Snapshots {
-		containerPath, err := s.doContainerBackupLoad(info.Project, fmt.Sprintf("%s/%s", info.Name, snap),
-			info.Privileged, true)
-		if err != nil {
-			return err
-		}
-
-		// Prepare tar arguments
-		args := append(tarArgs, []string{
-			"-",
-			"--strip-components=3",
-			"--xattrs-include=*",
-			"-C", containerPath, fmt.Sprintf("backup/snapshots/%s", snap),
-		}...)
-
-		// Extract snapshots
-		data.Seek(0, 0)
-		err = shared.RunCommandWithFds(data, nil, "tar", args...)
-		if err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (s *storageLvm) doContainerBackupLoad(projectName, containerName string, privileged bool,
-	snapshot bool) (string, error) {
-	tryUndo := true
-
-	var containerPath string
-	if snapshot {
-		containerPath = shared.VarPath("snapshots", project.Prefix(projectName, containerName))
-	} else {
-		containerPath = shared.VarPath("containers", project.Prefix(projectName, containerName))
-	}
-	containerLvmName := containerNameToLVName(containerName)
-	thinPoolName := s.getLvmThinpoolName()
-	lvFsType := s.getLvmFilesystem()
-	lvSize, err := s.getLvmVolumeSize()
-	if lvSize == "" {
-		return "", err
-	}
-
-	poolName := s.getOnDiskPoolName()
-	if s.useThinpool {
-		err = lvmCreateThinpool(s.s, s.sTypeVersion, poolName, thinPoolName, lvFsType)
-		if err != nil {
-			return "", err
-		}
-	}
-
-	if !snapshot {
-		err = lvmCreateLv(projectName, poolName, thinPoolName, containerLvmName, lvFsType, lvSize,
-			storagePoolVolumeAPIEndpointContainers, s.useThinpool)
-	} else {
-		cname, _, _ := shared.InstanceGetParentAndSnapshotName(containerName)
-		_, err = s.createSnapshotLV(projectName, poolName, containerNameToLVName(cname), storagePoolVolumeAPIEndpointContainers,
-			containerLvmName, storagePoolVolumeAPIEndpointContainers, false, s.useThinpool)
-	}
-	if err != nil {
-		return "", err
-	}
-
-	defer func() {
-		if tryUndo {
-			lvmContainerDeleteInternal(projectName, s.pool.Name, containerName, false, poolName,
-				containerPath)
-		}
-	}()
-
-	var containerMntPoint string
-	if snapshot {
-		containerMntPoint = driver.GetSnapshotMountPoint(projectName, s.pool.Name, containerName)
-	} else {
-		containerMntPoint = driver.GetContainerMountPoint(projectName, s.pool.Name, containerName)
-	}
-	err = os.MkdirAll(containerMntPoint, 0711)
-	if err != nil {
-		return "", err
-	}
-
-	if snapshot {
-		cname, _, _ := shared.InstanceGetParentAndSnapshotName(containerName)
-		snapshotMntPointSymlink := shared.VarPath("snapshots", project.Prefix(projectName, cname))
-		snapshotMntPointSymlinkTarget := shared.VarPath("storage-pools", s.pool.Name, "containers-snapshots", project.Prefix(projectName, cname))
-		err = driver.CreateSnapshotMountpoint(containerMntPoint, snapshotMntPointSymlinkTarget,
-			snapshotMntPointSymlink)
-	} else {
-		err = driver.CreateContainerMountpoint(containerMntPoint, containerPath, privileged)
-	}
-	if err != nil {
-		return "", err
-	}
-
-	_, err = s.doContainerMount(projectName, containerName, false)
-	if err != nil {
-		return "", err
-	}
-
-	tryUndo = false
-
-	return containerPath, nil
-}
-
-func (s *storageLvm) ImageCreate(fingerprint string, tracker *ioprogress.ProgressTracker) error {
-	logger.Debugf("Creating LVM storage volume for image \"%s\" on storage pool \"%s\"", fingerprint, s.pool.Name)
-
-	tryUndo := true
-	trySubUndo := true
-
-	poolName := s.getOnDiskPoolName()
-	thinPoolName := s.getLvmThinpoolName()
-	lvFsType := s.getLvmFilesystem()
-	lvSize, err := s.getLvmVolumeSize()
-	if lvSize == "" {
-		return err
-	}
-
-	err = s.createImageDbPoolVolume(fingerprint)
-	if err != nil {
-		return err
-	}
-	defer func() {
-		if !trySubUndo {
-			return
-		}
-		err := s.deleteImageDbPoolVolume(fingerprint)
-		if err != nil {
-			logger.Warnf("Could not delete image \"%s\" from storage volume database, manual intervention needed", fingerprint)
-		}
-	}()
-
-	if s.useThinpool {
-		err = lvmCreateThinpool(s.s, s.sTypeVersion, poolName, thinPoolName, lvFsType)
-		if err != nil {
-			return err
-		}
-
-		err = lvmCreateLv("default", poolName, thinPoolName, fingerprint, lvFsType, lvSize, storagePoolVolumeAPIEndpointImages, true)
-		if err != nil {
-			return fmt.Errorf("Error Creating LVM LV for new image: %v", err)
-		}
-		defer func() {
-			if tryUndo {
-				s.ImageDelete(fingerprint)
-			}
-		}()
-	}
-	trySubUndo = false
-
-	// Create image mountpoint.
-	imageMntPoint := driver.GetImageMountPoint(s.pool.Name, fingerprint)
-	if !shared.PathExists(imageMntPoint) {
-		err := os.MkdirAll(imageMntPoint, 0700)
-		if err != nil {
-			return err
-		}
-	}
-
-	if s.useThinpool {
-		_, err = s.ImageMount(fingerprint)
-		if err != nil {
-			return err
-		}
-
-		imagePath := shared.VarPath("images", fingerprint)
-		err = driver.ImageUnpack(imagePath, imageMntPoint, "", true, s.s.OS.RunningInUserNS, nil)
-		if err != nil {
-			return err
-		}
-
-		s.ImageUmount(fingerprint)
-	}
-
-	tryUndo = false
-
-	logger.Debugf("Created LVM storage volume for image \"%s\" on storage pool \"%s\"", fingerprint, s.pool.Name)
-	return nil
-}
-
-func (s *storageLvm) ImageDelete(fingerprint string) error {
-	logger.Debugf("Deleting LVM storage volume for image \"%s\" on storage pool \"%s\"", fingerprint, s.pool.Name)
-
-	if s.useThinpool {
-		poolName := s.getOnDiskPoolName()
-		imageLvmDevPath := getLvmDevPath("default", poolName,
-			storagePoolVolumeAPIEndpointImages, fingerprint)
-		lvExists, _ := storageLVExists(imageLvmDevPath)
-
-		if lvExists {
-			_, err := s.ImageUmount(fingerprint)
-			if err != nil {
-				return err
-			}
-
-			err = removeLV("default", poolName, storagePoolVolumeAPIEndpointImages, fingerprint)
-			if err != nil {
-				return err
-			}
-		}
-	}
-
-	err := s.deleteImageDbPoolVolume(fingerprint)
-	if err != nil {
-		return err
-	}
-
-	imageMntPoint := driver.GetImageMountPoint(s.pool.Name, fingerprint)
-	if shared.PathExists(imageMntPoint) {
-		err := os.Remove(imageMntPoint)
-		if err != nil {
-			return err
-		}
-	}
-
-	logger.Debugf("Deleted LVM storage volume for image \"%s\" on storage pool \"%s\"", fingerprint, s.pool.Name)
-	return nil
-}
-
-func (s *storageLvm) ImageMount(fingerprint string) (bool, error) {
-	logger.Debugf("Mounting LVM storage volume for image \"%s\" on storage pool \"%s\"", fingerprint, s.pool.Name)
-
-	imageMntPoint := driver.GetImageMountPoint(s.pool.Name, fingerprint)
-	if shared.IsMountPoint(imageMntPoint) {
-		return false, nil
-	}
-
-	// Shouldn't happen.
-	lvmFstype := s.getLvmFilesystem()
-	if lvmFstype == "" {
-		return false, fmt.Errorf("no filesystem type specified")
-	}
-
-	poolName := s.getOnDiskPoolName()
-	lvmVolumePath := getLvmDevPath("default", poolName, storagePoolVolumeAPIEndpointImages, fingerprint)
-	mountFlags, mountOptions := resolveMountOptions(s.getLvmMountOptions())
-	err := storageDrivers.TryMount(lvmVolumePath, imageMntPoint, lvmFstype, mountFlags, mountOptions)
-	if err != nil {
-		logger.Errorf(fmt.Sprintf("Error mounting image LV for unpacking: %s", err))
-		return false, fmt.Errorf("Error mounting image LV: %v", err)
-	}
-
-	logger.Debugf("Mounted LVM storage volume for image \"%s\" on storage pool \"%s\"", fingerprint, s.pool.Name)
-	return true, nil
-}
-
-func (s *storageLvm) ImageUmount(fingerprint string) (bool, error) {
-	logger.Debugf("Unmounting LVM storage volume for image \"%s\" on storage pool \"%s\"", fingerprint, s.pool.Name)
-
-	imageMntPoint := driver.GetImageMountPoint(s.pool.Name, fingerprint)
-	if !shared.IsMountPoint(imageMntPoint) {
-		return false, nil
-	}
-
-	err := storageDrivers.TryUnmount(imageMntPoint, 0)
-	if err != nil {
-		return false, err
-	}
-
-	logger.Debugf("Unmounted LVM storage volume for image \"%s\" on storage pool \"%s\"", fingerprint, s.pool.Name)
-	return true, nil
-}
-
-func (s *storageLvm) MigrationType() migration.MigrationFSType {
-	return migration.MigrationFSType_RSYNC
-}
-
-func (s *storageLvm) PreservesInodes() bool {
-	return false
-}
-
-func (s *storageLvm) MigrationSource(args MigrationSourceArgs) (MigrationStorageSourceDriver, error) {
-	return rsyncMigrationSource(args)
-}
-
-func (s *storageLvm) MigrationSink(conn *websocket.Conn, op *operations.Operation, args MigrationSinkArgs) error {
-	return rsyncMigrationSink(conn, op, args)
-}
-
-func (s *storageLvm) StorageEntitySetQuota(volumeType int, size int64, data interface{}) error {
-	logger.Debugf(`Setting LVM quota for "%s"`, s.volume.Name)
-
-	if !shared.IntInSlice(volumeType, supportedVolumeTypes) {
-		return fmt.Errorf("Invalid storage type")
-	}
-
-	poolName := s.getOnDiskPoolName()
-	var c instance.Instance
-	fsType := s.getLvmFilesystem()
-	lvDevPath := ""
-	mountpoint := ""
-	switch volumeType {
-	case storagePoolVolumeTypeContainer:
-		c = data.(instance.Instance)
-		ctName := c.Name()
-		if c.IsRunning() {
-			msg := fmt.Sprintf(`Cannot resize LVM storage volume `+
-				`for container "%s" when it is running`,
-				ctName)
-			logger.Errorf(msg)
-			return fmt.Errorf(msg)
-		}
-
-		ctLvmName := containerNameToLVName(ctName)
-		lvDevPath = getLvmDevPath("default", poolName, storagePoolVolumeAPIEndpointContainers, ctLvmName)
-		mountpoint = driver.GetContainerMountPoint(c.Project(), s.pool.Name, ctName)
-	default:
-		customLvmName := containerNameToLVName(s.volume.Name)
-		lvDevPath = getLvmDevPath("default", poolName, storagePoolVolumeAPIEndpointCustom, customLvmName)
-		mountpoint = driver.GetStoragePoolVolumeMountPoint(s.pool.Name, s.volume.Name)
-	}
-
-	oldSize, err := units.ParseByteSizeString(s.volume.Config["size"])
-	if err != nil {
-		return err
-	}
-
-	// The right disjunct just means that someone unset the size property in
-	// the container's config. We obviously cannot resize to 0.
-	if oldSize == size || size == 0 {
-		return nil
-	}
-
-	if size < oldSize {
-		err = s.lvReduce(lvDevPath, size, fsType, mountpoint, volumeType, data)
-	} else if size > oldSize {
-		err = s.lvExtend(lvDevPath, size, fsType, mountpoint, volumeType, data)
-	}
-	if err != nil {
-		return err
-	}
-
-	// Update the database
-	s.volume.Config["size"] = units.GetByteSizeString(size, 0)
-	err = s.s.Cluster.StoragePoolVolumeUpdateByProject(
-		"default",
-		s.volume.Name,
-		volumeType,
-		s.poolID,
-		s.volume.Description,
-		s.volume.Config)
-	if err != nil {
-		return err
-	}
-
-	logger.Debugf(`Set LVM quota for "%s"`, s.volume.Name)
-	return nil
-}
-
-func (s *storageLvm) StoragePoolResources() (*api.ResourcesStoragePool, error) {
-	res := api.ResourcesStoragePool{}
-
-	// Thinpools will always report zero free space on the volume group, so calculate approx
-	// used space using the thinpool logical volume allocated (data and meta) percentages.
-	if s.useThinpool {
-		args := []string{fmt.Sprintf("%s/%s", s.vgName, s.thinPoolName), "--noheadings",
-			"--units", "b", "--nosuffix", "--separator", ",", "-o", "lv_size,data_percent,metadata_percent"}
-
-		out, err := shared.TryRunCommand("lvs", args...)
-		if err != nil {
-			return nil, err
-		}
-
-		parts := strings.Split(strings.TrimSpace(out), ",")
-		if len(parts) < 3 {
-			return nil, fmt.Errorf("Unexpected output from lvs command")
-		}
-
-		total, err := strconv.ParseUint(parts[0], 10, 64)
-		if err != nil {
-			return nil, err
-		}
-
-		res.Space.Total = total
-
-		dataPerc, err := strconv.ParseFloat(parts[1], 64)
-		if err != nil {
-			return nil, err
-		}
-
-		metaPerc, err := strconv.ParseFloat(parts[2], 64)
-		if err != nil {
-			return nil, err
-		}
-
-		res.Space.Used = uint64(float64(total) * ((dataPerc + metaPerc) / 100))
-	} else {
-		// If thinpools are not in use, calculate used space in volume group.
-		args := []string{s.vgName, "--noheadings",
-			"--units", "b", "--nosuffix", "--separator", ",", "-o", "vg_size,vg_free"}
-
-		out, err := shared.TryRunCommand("vgs", args...)
-		if err != nil {
-			return nil, err
-		}
-
-		parts := strings.Split(strings.TrimSpace(out), ",")
-		if len(parts) < 2 {
-			return nil, fmt.Errorf("Unexpected output from vgs command")
-		}
-
-		total, err := strconv.ParseUint(parts[0], 10, 64)
-		if err != nil {
-			return nil, err
-		}
-
-		res.Space.Total = total
-
-		free, err := strconv.ParseUint(parts[1], 10, 64)
-		if err != nil {
-			return nil, err
-		}
-		res.Space.Used = total - free
-	}
-
-	return &res, nil
-}
-
-func (s *storageLvm) StoragePoolVolumeCopy(source *api.StorageVolumeSource) error {
-	logger.Infof("Copying LVM storage volume \"%s\" on storage pool \"%s\" as \"%s\" to storage pool \"%s\"", source.Name, source.Pool, s.volume.Name, s.pool.Name)
-	successMsg := fmt.Sprintf("Copied LVM storage volume \"%s\" on storage pool \"%s\" as \"%s\" to storage pool \"%s\"", source.Name, source.Pool, s.volume.Name, s.pool.Name)
-
-	if s.pool.Name != source.Pool {
-		// Cross-pool copy
-		// setup storage for the source volume
-		srcStorage, err := storagePoolVolumeInit(s.s, "default", source.Pool, source.Name, storagePoolVolumeTypeCustom)
-		if err != nil {
-			return err
-		}
-
-		ourMount, err := srcStorage.StoragePoolMount()
-		if err != nil {
-			return err
-		}
-		if ourMount {
-			defer srcStorage.StoragePoolUmount()
-		}
-	}
-
-	err := s.copyVolume(source.Pool, source.Name)
-	if err != nil {
-		return err
-	}
-
-	if source.VolumeOnly {
-		logger.Infof(successMsg)
-		return nil
-	}
-
-	snapshots, err := driver.VolumeSnapshotsGet(s.s, source.Pool, source.Name, storagePoolVolumeTypeCustom)
-	if err != nil {
-		return err
-	}
-
-	if len(snapshots) == 0 {
-		return nil
-	}
-
-	for _, snap := range snapshots {
-		err = s.copyVolumeSnapshot(source.Pool, snap.Name)
-		if err != nil {
-			return err
-		}
-	}
-
-	logger.Infof(successMsg)
-	return nil
-}
-
-func (s *storageLvm) StorageMigrationSource(args MigrationSourceArgs) (MigrationStorageSourceDriver, error) {
-	return rsyncStorageMigrationSource(args)
-}
-
-func (s *storageLvm) StorageMigrationSink(conn *websocket.Conn, op *operations.Operation, args MigrationSinkArgs) error {
-	return rsyncStorageMigrationSink(conn, op, args)
-}
-
-func (s *storageLvm) StoragePoolVolumeSnapshotCreate(target *api.StorageVolumeSnapshotsPost) error {
-	logger.Debugf("Creating LVM storage volume for snapshot \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-
-	poolName := s.getOnDiskPoolName()
-	sourceOnlyName, _, ok := shared.InstanceGetParentAndSnapshotName(target.Name)
-	if !ok {
-		return fmt.Errorf("Not a snapshot")
-	}
-
-	sourceLvmName := containerNameToLVName(sourceOnlyName)
-	targetLvmName := containerNameToLVName(target.Name)
-
-	_, err := s.createSnapshotLV("default", poolName, sourceLvmName, storagePoolVolumeAPIEndpointCustom, targetLvmName, storagePoolVolumeAPIEndpointCustom, true, s.useThinpool)
-	if err != nil {
-		return fmt.Errorf("Failed to create snapshot logical volume %s", err)
-	}
-
-	targetPath := driver.GetStoragePoolVolumeSnapshotMountPoint(s.pool.Name, target.Name)
-	err = os.MkdirAll(targetPath, driver.SnapshotsDirMode)
-	if err != nil {
-		logger.Errorf("Failed to create mountpoint \"%s\" for RBD storage volume \"%s\" on storage pool \"%s\": %s", targetPath, s.volume.Name, s.pool.Name, err)
-		return err
-	}
-
-	logger.Debugf("Created LVM storage volume for snapshot \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-	return nil
-}
-
-func (s *storageLvm) StoragePoolVolumeSnapshotDelete() error {
-	logger.Infof("Deleting LVM storage volume snapshot \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-
-	snapshotLVName := containerNameToLVName(s.volume.Name)
-	storageVolumeSnapshotPath := driver.GetStoragePoolVolumeSnapshotMountPoint(s.pool.Name, s.volume.Name)
-	if shared.IsMountPoint(storageVolumeSnapshotPath) {
-		err := storageDrivers.TryUnmount(storageVolumeSnapshotPath, 0)
-		if err != nil {
-			return fmt.Errorf("Failed to unmount snapshot path \"%s\": %s", storageVolumeSnapshotPath, err)
-		}
-	}
-
-	poolName := s.getOnDiskPoolName()
-	snapshotLVDevPath := getLvmDevPath("default", poolName, storagePoolVolumeAPIEndpointCustom, snapshotLVName)
-	lvExists, _ := storageLVExists(snapshotLVDevPath)
-	if lvExists {
-		err := removeLV("default", poolName, storagePoolVolumeAPIEndpointCustom, snapshotLVName)
-		if err != nil {
-			return err
-		}
-	}
-
-	err := os.Remove(storageVolumeSnapshotPath)
-	if err != nil && !os.IsNotExist(err) {
-		return err
-	}
-
-	sourceName, _, _ := shared.InstanceGetParentAndSnapshotName(s.volume.Name)
-	storageVolumeSnapshotPath = driver.GetStoragePoolVolumeSnapshotMountPoint(s.pool.Name, sourceName)
-	empty, err := shared.PathIsEmpty(storageVolumeSnapshotPath)
-	if err == nil && empty {
-		os.RemoveAll(storageVolumeSnapshotPath)
-	}
-
-	err = s.s.Cluster.StoragePoolVolumeDelete(
-		"default",
-		s.volume.Name,
-		storagePoolVolumeTypeCustom,
-		s.poolID)
-	if err != nil {
-		logger.Errorf(`Failed to delete database entry for LVM storage volume "%s" on storage pool "%s"`,
-			s.volume.Name, s.pool.Name)
-	}
-
-	logger.Infof("Deleted LVM storage volume snapshot \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-	return nil
-}
-
-func (s *storageLvm) StoragePoolVolumeSnapshotRename(newName string) error {
-	sourceName, _, ok := shared.InstanceGetParentAndSnapshotName(s.volume.Name)
-	fullSnapshotName := fmt.Sprintf("%s%s%s", sourceName, shared.SnapshotDelimiter, newName)
-
-	logger.Infof("Renaming LVM storage volume on storage pool \"%s\" from \"%s\" to \"%s\"", s.pool.Name, s.volume.Name, fullSnapshotName)
-
-	_, err := s.StoragePoolVolumeUmount()
-	if err != nil {
-		return err
-	}
-
-	if !ok {
-		return fmt.Errorf("Not a snapshot name")
-	}
-
-	sourceLVName := containerNameToLVName(s.volume.Name)
-	targetLVName := containerNameToLVName(fullSnapshotName)
-
-	err = s.renameLVByPath("default", sourceLVName, targetLVName, storagePoolVolumeAPIEndpointCustom)
-	if err != nil {
-		return fmt.Errorf("Failed to rename logical volume from \"%s\" to \"%s\": %s", s.volume.Name, fullSnapshotName, err)
-	}
-
-	oldPath := driver.GetStoragePoolVolumeSnapshotMountPoint(s.pool.Name, s.volume.Name)
-	newPath := driver.GetStoragePoolVolumeSnapshotMountPoint(s.pool.Name, fullSnapshotName)
-	err = os.Rename(oldPath, newPath)
-	if err != nil {
-		return err
-	}
-
-	logger.Infof("Renamed LVM storage volume on storage pool \"%s\" from \"%s\" to \"%s\"", s.pool.Name, s.volume.Name, fullSnapshotName)
-
-	return s.s.Cluster.StoragePoolVolumeRename("default", s.volume.Name, fullSnapshotName, storagePoolVolumeTypeCustom, s.poolID)
-}
diff --git a/lxd/storage_lvm_utils.go b/lxd/storage_lvm_utils.go
deleted file mode 100644
index 5448118d3b..0000000000
--- a/lxd/storage_lvm_utils.go
+++ /dev/null
@@ -1,1090 +0,0 @@
-package main
-
-import (
-	"fmt"
-	"os"
-	"os/exec"
-	"strconv"
-	"strings"
-	"syscall"
-
-	"github.com/pkg/errors"
-
-	"github.com/lxc/lxd/lxd/db"
-	"github.com/lxc/lxd/lxd/instance"
-	"github.com/lxc/lxd/lxd/instance/instancetype"
-	"github.com/lxc/lxd/lxd/project"
-	"github.com/lxc/lxd/lxd/rsync"
-	"github.com/lxc/lxd/lxd/state"
-	driver "github.com/lxc/lxd/lxd/storage"
-	"github.com/lxc/lxd/shared"
-	"github.com/lxc/lxd/shared/api"
-	"github.com/lxc/lxd/shared/logger"
-	"github.com/lxc/lxd/shared/units"
-	"github.com/lxc/lxd/shared/version"
-)
-
-func (s *storageLvm) lvExtend(lvPath string, lvSize int64, fsType string, fsMntPoint string, volumeType int, data interface{}) error {
-	// Round the size to closest 512 bytes
-	lvSize = int64(lvSize/512) * 512
-	lvSizeString := units.GetByteSizeString(lvSize, 0)
-
-	msg, err := shared.TryRunCommand(
-		"lvextend",
-		"-L", lvSizeString,
-		"-f",
-		lvPath)
-	if err != nil {
-		logger.Errorf("Could not extend LV \"%s\": %s", lvPath, msg)
-		return fmt.Errorf("could not extend LV \"%s\": %s", lvPath, msg)
-	}
-
-	switch volumeType {
-	case storagePoolVolumeTypeContainer:
-		c := data.(instance.Instance)
-		ourMount, err := c.StorageStart()
-		if err != nil {
-			return err
-		}
-		if ourMount {
-			defer c.StorageStop()
-		}
-	case storagePoolVolumeTypeCustom:
-		ourMount, err := s.StoragePoolVolumeMount()
-		if err != nil {
-			return err
-		}
-		if ourMount {
-			defer s.StoragePoolVolumeUmount()
-		}
-	default:
-		return fmt.Errorf(`Resizing not implemented for storage `+
-			`volume type %d`, volumeType)
-	}
-
-	return driver.GrowFileSystem(fsType, lvPath, fsMntPoint)
-}
-
-func (s *storageLvm) lvReduce(lvPath string, lvSize int64, fsType string, fsMntPoint string, volumeType int, data interface{}) error {
-	var err error
-	var msg string
-
-	// Round the size to closest 512 bytes
-	lvSize = int64(lvSize/512) * 512
-	lvSizeString := units.GetByteSizeString(lvSize, 0)
-
-	cleanupFunc, err := shrinkVolumeFilesystem(s, volumeType, fsType, lvPath, fsMntPoint, lvSize, data)
-	if cleanupFunc != nil {
-		defer cleanupFunc()
-	}
-	if err != nil {
-		return err
-	}
-
-	msg, err = shared.TryRunCommand(
-		"lvreduce",
-		"-L", lvSizeString,
-		"-f",
-		lvPath)
-	if err != nil {
-		logger.Errorf("Could not reduce LV \"%s\": %s", lvPath, msg)
-		return fmt.Errorf("could not reduce LV \"%s\": %s", lvPath, msg)
-	}
-
-	logger.Debugf("Reduced underlying %s filesystem for LV \"%s\"", fsType, lvPath)
-	return nil
-}
-
-func (s *storageLvm) getLvmMountOptions() string {
-	if s.volume.Config["block.mount_options"] != "" {
-		return s.volume.Config["block.mount_options"]
-	}
-
-	if s.pool.Config["volume.block.mount_options"] != "" {
-		return s.pool.Config["volume.block.mount_options"]
-	}
-
-	if s.getLvmFilesystem() == "btrfs" {
-		return "user_subvol_rm_allowed,discard"
-	}
-
-	return "discard"
-}
-
-func (s *storageLvm) getLvmFilesystem() string {
-	if s.volume.Config["block.filesystem"] != "" {
-		return s.volume.Config["block.filesystem"]
-	}
-
-	if s.pool.Config["volume.block.filesystem"] != "" {
-		return s.pool.Config["volume.block.filesystem"]
-	}
-
-	return "ext4"
-}
-
-func (s *storageLvm) getLvmVolumeSize() (string, error) {
-	sz, err := units.ParseByteSizeString(s.volume.Config["size"])
-	if err != nil {
-		return "", err
-	}
-
-	// Safety net: Set to default value.
-	if sz == 0 {
-		sz, _ = units.ParseByteSizeString("10GB")
-	}
-
-	return fmt.Sprintf("%d", sz), nil
-}
-
-func (s *storageLvm) getLvmThinpoolName() string {
-	if s.pool.Config["lvm.thinpool_name"] != "" {
-		return s.pool.Config["lvm.thinpool_name"]
-	}
-
-	return "LXDThinPool"
-}
-
-func (s *storageLvm) usesThinpool() bool {
-	// Default is to use a thinpool.
-	if s.pool.Config["lvm.use_thinpool"] == "" {
-		return true
-	}
-
-	return shared.IsTrue(s.pool.Config["lvm.use_thinpool"])
-}
-
-func (s *storageLvm) setLvmThinpoolName(newThinpoolName string) {
-	s.pool.Config["lvm.thinpool_name"] = newThinpoolName
-}
-
-func (s *storageLvm) getOnDiskPoolName() string {
-	if s.vgName != "" {
-		return s.vgName
-	}
-
-	return s.pool.Name
-}
-
-func (s *storageLvm) setOnDiskPoolName(newName string) {
-	s.vgName = newName
-	s.pool.Config["source"] = newName
-}
-
-func (s *storageLvm) renameLVByPath(project, oldName string, newName string, volumeType string) error {
-	oldLvmName := getPrefixedLvName(project, volumeType, oldName)
-	newLvmName := getPrefixedLvName(project, volumeType, newName)
-	poolName := s.getOnDiskPoolName()
-	return lvmLVRename(poolName, oldLvmName, newLvmName)
-}
-
-func removeLV(project, vgName string, volumeType string, lvName string) error {
-	lvmVolumePath := getLvmDevPath(project, vgName, volumeType, lvName)
-
-	_, err := shared.TryRunCommand("lvremove", "-f", lvmVolumePath)
-	if err != nil {
-		logger.Errorf("Could not remove LV \"%s\": %v", lvName, err)
-		return fmt.Errorf("Could not remove LV named %s: %v", lvName, err)
-	}
-
-	return nil
-}
-
-func (s *storageLvm) createSnapshotLV(project, vgName string, origLvName string, origVolumeType string, lvName string, volumeType string, readonly bool, makeThinLv bool) (string, error) {
-	sourceProject := project
-	if origVolumeType == storagePoolVolumeAPIEndpointImages {
-		// Image volumes are shared across projects.
-		sourceProject = "default"
-	}
-
-	sourceLvmVolumePath := getLvmDevPath(sourceProject, vgName, origVolumeType, origLvName)
-	isRecent, err := lvmVersionIsAtLeast(s.sTypeVersion, "2.02.99")
-	if err != nil {
-		return "", fmt.Errorf("Error checking LVM version: %v", err)
-	}
-
-	lvmPoolVolumeName := getPrefixedLvName(project, volumeType, lvName)
-	args := []string{"-n", lvmPoolVolumeName, "-s", sourceLvmVolumePath}
-	if isRecent {
-		args = append(args, "-kn")
-	}
-
-	// If the source is not a thin volume the size needs to be specified.
-	// According to LVM tools 15-20% of the original volume should be
-	// sufficient. However, let's not be stingy at first otherwise we might
-	// force users to fiddle around with lvextend.
-	if !makeThinLv {
-		lvSize, err := s.getLvmVolumeSize()
-		if lvSize == "" {
-			return "", err
-		}
-
-		// Round the size to closest 512 bytes
-		lvSizeInt, err := units.ParseByteSizeString(lvSize)
-		if err != nil {
-			return "", err
-		}
-
-		lvSizeInt = int64(lvSizeInt/512) * 512
-		lvSizeString := units.GetByteSizeString(lvSizeInt, 0)
-
-		args = append(args, "--size", lvSizeString)
-	}
-
-	if readonly {
-		args = append(args, "-pr")
-	} else {
-		args = append(args, "-prw")
-	}
-
-	_, err = shared.TryRunCommand("lvcreate", args...)
-	if err != nil {
-		logger.Errorf("Could not create LV snapshot: %s to %s: %v", origLvName, lvName, err)
-		return "", fmt.Errorf("Could not create snapshot LV named %s: %v", lvName, err)
-	}
-
-	targetLvmVolumePath := getLvmDevPath(project, vgName, volumeType, lvName)
-	if makeThinLv {
-		// Snapshots of thin logical volumes can be directly activated.
-		// Normal snapshots will complain about changing the origin
-		// (Which they never do.), so skip the activation since the
-		// logical volume will be automatically activated anyway.
-		err := storageLVActivate(targetLvmVolumePath)
-		if err != nil {
-			return "", errors.Wrap(err, "Activate LVM volume")
-		}
-	}
-
-	return targetLvmVolumePath, nil
-}
-
-func (s *storageLvm) createSnapshotContainer(snapshotContainer instance.Instance, sourceContainer instance.Instance, readonly bool) error {
-	tryUndo := true
-
-	sourceContainerName := sourceContainer.Name()
-	targetContainerName := snapshotContainer.Name()
-	sourceContainerLvmName := containerNameToLVName(sourceContainerName)
-	targetContainerLvmName := containerNameToLVName(targetContainerName)
-	logger.Debugf("Creating snapshot: %s to %s", sourceContainerName, targetContainerName)
-
-	poolName := s.getOnDiskPoolName()
-	_, err := s.createSnapshotLV(sourceContainer.Project(), poolName, sourceContainerLvmName, storagePoolVolumeAPIEndpointContainers, targetContainerLvmName, storagePoolVolumeAPIEndpointContainers, readonly, s.useThinpool)
-	if err != nil {
-		return fmt.Errorf("Error creating snapshot LV: %s", err)
-	}
-	defer func() {
-		if tryUndo {
-			s.ContainerDelete(snapshotContainer)
-		}
-	}()
-
-	targetContainerMntPoint := ""
-	targetContainerPath := snapshotContainer.Path()
-	targetIsSnapshot := snapshotContainer.IsSnapshot()
-	targetPool, err := snapshotContainer.StoragePool()
-	if err != nil {
-		return errors.Wrap(err, "Get snapshot storage pool")
-	}
-	if targetIsSnapshot {
-		targetContainerMntPoint = driver.GetSnapshotMountPoint(sourceContainer.Project(), s.pool.Name, targetContainerName)
-		sourceName, _, _ := shared.InstanceGetParentAndSnapshotName(sourceContainerName)
-		snapshotMntPointSymlinkTarget := shared.VarPath("storage-pools", s.pool.Name, "containers-snapshots", project.Prefix(sourceContainer.Project(), sourceName))
-		snapshotMntPointSymlink := shared.VarPath("snapshots", project.Prefix(sourceContainer.Project(), sourceName))
-		err = driver.CreateSnapshotMountpoint(targetContainerMntPoint, snapshotMntPointSymlinkTarget, snapshotMntPointSymlink)
-	} else {
-		targetContainerMntPoint = driver.GetContainerMountPoint(sourceContainer.Project(), targetPool, targetContainerName)
-		err = driver.CreateContainerMountpoint(targetContainerMntPoint, targetContainerPath, snapshotContainer.IsPrivileged())
-	}
-	if err != nil {
-		return errors.Wrap(err, "Create mount point")
-	}
-
-	tryUndo = false
-
-	return nil
-}
-
-// Copy a container on a storage pool that does use a thinpool.
-func (s *storageLvm) copyContainerThinpool(target instance.Instance, source instance.Instance, readonly bool) error {
-	err := s.createSnapshotContainer(target, source, readonly)
-	if err != nil {
-		logger.Errorf("Error creating snapshot LV for copy: %s", err)
-		return err
-	}
-
-	// Generate a new xfs's UUID
-	LVFilesystem := s.getLvmFilesystem()
-	poolName := s.getOnDiskPoolName()
-	containerName := target.Name()
-	containerLvmName := containerNameToLVName(containerName)
-	containerLvDevPath := getLvmDevPath(target.Project(), poolName,
-		storagePoolVolumeAPIEndpointContainers, containerLvmName)
-
-	// If btrfstune sees two btrfs filesystems with the same UUID it
-	// gets confused and wants both of them unmounted. So unmount
-	// the source as well.
-	if LVFilesystem == "btrfs" {
-		ourUmount, err := s.ContainerUmount(source, source.Path())
-		if err != nil {
-			return err
-		}
-
-		if ourUmount {
-			defer s.ContainerMount(source)
-		}
-	}
-
-	msg, err := driver.FSGenerateNewUUID(LVFilesystem, containerLvDevPath)
-	if err != nil {
-		logger.Errorf("Failed to create new \"%s\" UUID for container \"%s\" on storage pool \"%s\": %s", LVFilesystem, containerName, s.pool.Name, msg)
-		return err
-	}
-
-	return nil
-}
-
-func (s *storageLvm) copySnapshot(target instance.Instance, source instance.Instance, refresh bool) error {
-	sourcePool, err := source.StoragePool()
-	if err != nil {
-		return err
-	}
-
-	targetParentName, _, _ := shared.InstanceGetParentAndSnapshotName(target.Name())
-	containersPath := driver.GetSnapshotMountPoint(target.Project(), s.pool.Name, targetParentName)
-	snapshotMntPointSymlinkTarget := shared.VarPath("storage-pools", s.pool.Name, "containers-snapshots", project.Prefix(target.Project(), targetParentName))
-	snapshotMntPointSymlink := shared.VarPath("snapshots", project.Prefix(target.Project(), targetParentName))
-	err = driver.CreateSnapshotMountpoint(containersPath, snapshotMntPointSymlinkTarget, snapshotMntPointSymlink)
-	if err != nil {
-		return err
-	}
-
-	if s.useThinpool && sourcePool == s.pool.Name && !refresh {
-		err = s.copyContainerThinpool(target, source, true)
-	} else {
-		err = s.copyContainerLv(target, source, true, refresh)
-	}
-	if err != nil {
-		logger.Errorf("Error creating snapshot LV for copy: %s", err)
-		return err
-	}
-
-	return nil
-}
-
-// Copy a container on a storage pool that does not use a thinpool.
-func (s *storageLvm) copyContainerLv(target instance.Instance, source instance.Instance, readonly bool, refresh bool) error {
-	exists, err := storageLVExists(getLvmDevPath(target.Project(), s.getOnDiskPoolName(),
-		storagePoolVolumeAPIEndpointContainers, containerNameToLVName(target.Name())))
-	if err != nil {
-		return err
-	}
-
-	// Only create container/snapshot if it doesn't already exist
-	if !exists {
-		err := s.ContainerCreate(target)
-		if err != nil {
-			return err
-		}
-	}
-
-	targetName := target.Name()
-	targetStart, err := target.StorageStart()
-	if err != nil {
-		return err
-	}
-	if targetStart {
-		defer target.StorageStop()
-	}
-
-	sourceName := source.Name()
-	sourceStart, err := source.StorageStart()
-	if err != nil {
-		return err
-	}
-	if sourceStart {
-		defer source.StorageStop()
-	}
-
-	sourcePool, err := source.StoragePool()
-	if err != nil {
-		return err
-	}
-	sourceContainerMntPoint := driver.GetContainerMountPoint(source.Project(), sourcePool, sourceName)
-	if source.IsSnapshot() {
-		sourceContainerMntPoint = driver.GetSnapshotMountPoint(source.Project(), sourcePool, sourceName)
-	}
-
-	targetContainerMntPoint := driver.GetContainerMountPoint(target.Project(), s.pool.Name, targetName)
-	if target.IsSnapshot() {
-		targetContainerMntPoint = driver.GetSnapshotMountPoint(source.Project(), s.pool.Name, targetName)
-	}
-
-	if source.IsRunning() {
-		err = source.Freeze()
-		if err != nil {
-			return err
-		}
-		defer source.Unfreeze()
-	}
-
-	bwlimit := s.pool.Config["rsync.bwlimit"]
-	output, err := rsync.LocalCopy(sourceContainerMntPoint, targetContainerMntPoint, bwlimit, true)
-	if err != nil {
-		return fmt.Errorf("Failed to rsync container: %s: %s", string(output), err)
-	}
-
-	if readonly {
-		targetLvmName := containerNameToLVName(targetName)
-		poolName := s.getOnDiskPoolName()
-		_, err := shared.TryRunCommand("lvchange", "-pr", fmt.Sprintf("%s/%s_%s", poolName, storagePoolVolumeAPIEndpointContainers, targetLvmName))
-		if err != nil {
-			logger.Errorf("Failed to make LVM snapshot \"%s\" read-write: %v", targetName, err)
-			return err
-		}
-	}
-
-	return nil
-}
-
-// Copy an lvm container.
-func (s *storageLvm) copyContainer(target instance.Instance, source instance.Instance, refresh bool) error {
-	targetPool, err := target.StoragePool()
-	if err != nil {
-		return err
-	}
-
-	targetContainerMntPoint := driver.GetContainerMountPoint(target.Project(), targetPool, target.Name())
-	err = driver.CreateContainerMountpoint(targetContainerMntPoint, target.Path(), target.IsPrivileged())
-	if err != nil {
-		return err
-	}
-
-	sourcePool, err := source.StoragePool()
-	if err != nil {
-		return err
-	}
-
-	if s.useThinpool && targetPool == sourcePool && !refresh {
-		// If the storage pool uses a thinpool we can have snapshots of
-		// snapshots.
-		err = s.copyContainerThinpool(target, source, false)
-	} else {
-		// If the storage pools does not use a thinpool we need to
-		// perform full copies.
-		err = s.copyContainerLv(target, source, false, refresh)
-	}
-	if err != nil {
-		return err
-	}
-
-	err = target.DeferTemplateApply("copy")
-	if err != nil {
-		return err
-	}
-
-	return nil
-}
-
-func (s *storageLvm) containerCreateFromImageLv(c instance.Instance, fp string) error {
-	containerName := c.Name()
-
-	err := s.ContainerCreate(c)
-	if err != nil {
-		logger.Errorf(`Failed to create non-thinpool LVM storage volume for container "%s" on storage pool "%s": %s`, containerName, s.pool.Name, err)
-		return err
-	}
-	logger.Debugf(`Created non-thinpool LVM storage volume for container "%s" on storage pool "%s"`, containerName, s.pool.Name)
-
-	containerPath := c.Path()
-	_, err = s.ContainerMount(c)
-	if err != nil {
-		logger.Errorf(`Failed to mount non-thinpool LVM storage volume for container "%s" on storage pool "%s": %s`, containerName, s.pool.Name, err)
-		return err
-	}
-	logger.Debugf(`Mounted non-thinpool LVM storage volume for container "%s" on storage pool "%s"`, containerName, s.pool.Name)
-
-	imagePath := shared.VarPath("images", fp)
-	containerMntPoint := driver.GetContainerMountPoint(c.Project(), s.pool.Name, containerName)
-	err = driver.ImageUnpack(imagePath, containerMntPoint, "", true, s.s.OS.RunningInUserNS, nil)
-	if err != nil {
-		logger.Errorf(`Failed to unpack image "%s" into non-thinpool LVM storage volume "%s" for container "%s" on storage pool "%s": %s`, imagePath, containerMntPoint, containerName, s.pool.Name, err)
-		return err
-	}
-	logger.Debugf(`Unpacked image "%s" into non-thinpool LVM storage volume "%s" for container "%s" on storage pool "%s"`, imagePath, containerMntPoint, containerName, s.pool.Name)
-
-	s.ContainerUmount(c, containerPath)
-
-	return nil
-}
-
-func (s *storageLvm) containerCreateFromImageThinLv(c instance.Instance, fp string) error {
-	poolName := s.getOnDiskPoolName()
-	// Check if the image already exists.
-	imageLvmDevPath := getLvmDevPath("default", poolName, storagePoolVolumeAPIEndpointImages, fp)
-
-	imageStoragePoolLockID := getImageCreateLockID(poolName, fp)
-	lxdStorageMapLock.Lock()
-	if waitChannel, ok := lxdStorageOngoingOperationMap[imageStoragePoolLockID]; ok {
-		lxdStorageMapLock.Unlock()
-		if _, ok := <-waitChannel; ok {
-			logger.Warnf("Received value over semaphore, this should not have happened")
-		}
-	} else {
-		lxdStorageOngoingOperationMap[imageStoragePoolLockID] = make(chan bool)
-		lxdStorageMapLock.Unlock()
-
-		var imgerr error
-		ok, _ := storageLVExists(imageLvmDevPath)
-		if ok {
-			_, volume, err := s.s.Cluster.StoragePoolNodeVolumeGetType(fp, db.StoragePoolVolumeTypeImage, s.poolID)
-			if err != nil {
-				return errors.Wrapf(err, "Fetch image volume %s", fp)
-			}
-			if volume.Config["block.filesystem"] != s.getLvmFilesystem() {
-				// The storage pool volume.blockfilesystem property has changed, re-import the image
-				err := s.ImageDelete(fp)
-				if err != nil {
-					return errors.Wrap(err, "Image delete")
-				}
-				ok = false
-			}
-		}
-
-		if !ok {
-			imgerr = s.ImageCreate(fp, nil)
-		}
-
-		lxdStorageMapLock.Lock()
-		if waitChannel, ok := lxdStorageOngoingOperationMap[imageStoragePoolLockID]; ok {
-			close(waitChannel)
-			delete(lxdStorageOngoingOperationMap, imageStoragePoolLockID)
-		}
-		lxdStorageMapLock.Unlock()
-
-		if imgerr != nil {
-			return errors.Wrap(imgerr, "Image create")
-		}
-	}
-
-	containerName := c.Name()
-	containerLvmName := containerNameToLVName(containerName)
-	_, err := s.createSnapshotLV(c.Project(), poolName, fp, storagePoolVolumeAPIEndpointImages, containerLvmName, storagePoolVolumeAPIEndpointContainers, false, s.useThinpool)
-	if err != nil {
-		return errors.Wrap(err, "Create snapshot")
-	}
-
-	return nil
-}
-
-func lvmGetLVCount(vgName string) (int, error) {
-	output, err := shared.TryRunCommand("vgs", "--noheadings", "-o", "lv_count", vgName)
-	if err != nil {
-		return -1, err
-	}
-
-	output = strings.TrimSpace(output)
-	return strconv.Atoi(output)
-}
-
-func lvmLvIsWritable(lvName string) (bool, error) {
-	output, err := shared.TryRunCommand("lvs", "--noheadings", "-o", "lv_attr", lvName)
-	if err != nil {
-		return false, errors.Wrapf(err, "Error retrieving attributes for logical volume %q", lvName)
-	}
-
-	output = strings.TrimSpace(output)
-	return rune(output[1]) == 'w', nil
-}
-
-func storageVGActivate(lvmVolumePath string) error {
-	_, err := shared.TryRunCommand("vgchange", "-ay", lvmVolumePath)
-	if err != nil {
-		return fmt.Errorf("could not activate volume group \"%s\": %v", lvmVolumePath, err)
-	}
-
-	return nil
-}
-
-func storageLVActivate(lvmVolumePath string) error {
-	_, err := shared.TryRunCommand("lvchange", "-ay", lvmVolumePath)
-	if err != nil {
-		return fmt.Errorf("could not activate logival volume \"%s\": %v", lvmVolumePath, err)
-	}
-
-	return nil
-}
-
-func storagePVExists(pvName string) (bool, error) {
-	_, err := shared.RunCommand("pvs", "--noheadings", "-o", "lv_attr", pvName)
-	if err != nil {
-		runErr, ok := err.(shared.RunError)
-		if ok {
-			exitError, ok := runErr.Err.(*exec.ExitError)
-			if ok {
-				waitStatus := exitError.Sys().(syscall.WaitStatus)
-				if waitStatus.ExitStatus() == 5 {
-					// physical volume not found
-					return false, nil
-				}
-			}
-		}
-		return false, fmt.Errorf("error checking for physical volume \"%s\"", pvName)
-	}
-
-	return true, nil
-}
-
-func storageVGExists(vgName string) (bool, error) {
-	_, err := shared.RunCommand("vgs", "--noheadings", "-o", "lv_attr", vgName)
-	if err != nil {
-		runErr, ok := err.(shared.RunError)
-		if ok {
-			exitError, ok := runErr.Err.(*exec.ExitError)
-			if ok {
-				waitStatus := exitError.Sys().(syscall.WaitStatus)
-				if waitStatus.ExitStatus() == 5 {
-					// volume group not found
-					return false, nil
-				}
-			}
-		}
-
-		return false, fmt.Errorf("error checking for volume group \"%s\"", vgName)
-	}
-
-	return true, nil
-}
-
-func storageLVExists(lvName string) (bool, error) {
-	_, err := shared.RunCommand("lvs", "--noheadings", "-o", "lv_attr", lvName)
-	if err != nil {
-		runErr, ok := err.(shared.RunError)
-		if ok {
-			exitError, ok := runErr.Err.(*exec.ExitError)
-			if ok {
-				waitStatus := exitError.Sys().(syscall.WaitStatus)
-				if waitStatus.ExitStatus() == 5 {
-					// logical volume not found
-					return false, nil
-				}
-			}
-		}
-
-		return false, fmt.Errorf("error checking for logical volume \"%s\"", lvName)
-	}
-
-	return true, nil
-}
-
-func lvmGetLVSize(lvPath string) (string, error) {
-	msg, err := shared.TryRunCommand("lvs", "--noheadings", "-o", "size", "--nosuffix", "--units", "b", lvPath)
-	if err != nil {
-		return "", fmt.Errorf("failed to retrieve size of logical volume: %s: %s", string(msg), err)
-	}
-
-	sizeString := string(msg)
-	sizeString = strings.TrimSpace(sizeString)
-	size, err := strconv.ParseInt(sizeString, 10, 64)
-	if err != nil {
-		return "", err
-	}
-
-	detectedSize := units.GetByteSizeString(size, 0)
-
-	return detectedSize, nil
-}
-
-func storageLVMThinpoolExists(vgName string, poolName string) (bool, error) {
-	output, err := shared.RunCommand("vgs", "--noheadings", "-o", "lv_attr", fmt.Sprintf("%s/%s", vgName, poolName))
-	if err != nil {
-		runErr, ok := err.(shared.RunError)
-		if ok {
-			exitError, ok := runErr.Err.(*exec.ExitError)
-			if ok {
-				waitStatus := exitError.Sys().(syscall.WaitStatus)
-				if waitStatus.ExitStatus() == 5 {
-					// pool LV was not found
-					return false, nil
-				}
-			}
-		}
-
-		return false, fmt.Errorf("error checking for pool \"%s\"", poolName)
-	}
-	// Found LV named poolname, check type:
-	attrs := strings.TrimSpace(string(output[:]))
-	if strings.HasPrefix(attrs, "t") {
-		return true, nil
-	}
-
-	return false, fmt.Errorf("pool named \"%s\" exists but is not a thin pool", poolName)
-}
-
-func storageLVMGetThinPoolUsers(s *state.State) ([]string, error) {
-	results := []string{}
-
-	cNames, err := s.Cluster.ContainersNodeList(instancetype.Container)
-	if err != nil {
-		return results, err
-	}
-
-	for _, cName := range cNames {
-		var lvLinkPath string
-		if strings.Contains(cName, shared.SnapshotDelimiter) {
-			lvLinkPath = shared.VarPath("snapshots", fmt.Sprintf("%s.lv", cName))
-		} else {
-			lvLinkPath = shared.VarPath("containers", fmt.Sprintf("%s.lv", cName))
-		}
-
-		if shared.PathExists(lvLinkPath) {
-			results = append(results, cName)
-		}
-	}
-
-	imageNames, err := s.Cluster.ImagesGet("default", false)
-	if err != nil {
-		return results, err
-	}
-
-	for _, imageName := range imageNames {
-		imageLinkPath := shared.VarPath("images", fmt.Sprintf("%s.lv", imageName))
-		if shared.PathExists(imageLinkPath) {
-			results = append(results, imageName)
-		}
-	}
-
-	return results, nil
-}
-
-func storageLVMValidateThinPoolName(s *state.State, vgName string, value string) error {
-	users, err := storageLVMGetThinPoolUsers(s)
-	if err != nil {
-		return fmt.Errorf("error checking if a pool is already in use: %v", err)
-	}
-
-	if len(users) > 0 {
-		return fmt.Errorf("can not change LVM config. Images or containers are still using LVs: %v", users)
-	}
-
-	if value != "" {
-		if vgName == "" {
-			return fmt.Errorf("can not set lvm.thinpool_name without lvm.vg_name set")
-		}
-
-		poolExists, err := storageLVMThinpoolExists(vgName, value)
-		if err != nil {
-			return fmt.Errorf("error checking for thin pool \"%s\" in \"%s\": %v", value, vgName, err)
-		}
-
-		if !poolExists {
-			return fmt.Errorf("pool \"'%s\" does not exist in Volume Group \"%s\"", value, vgName)
-		}
-	}
-
-	return nil
-}
-
-func lvmVGRename(oldName string, newName string) error {
-	_, err := shared.TryRunCommand("vgrename", oldName, newName)
-	if err != nil {
-		return fmt.Errorf("could not rename volume group from \"%s\" to \"%s\": %v", oldName, newName, err)
-	}
-
-	return nil
-}
-
-func lvmLVRename(vgName string, oldName string, newName string) error {
-	_, err := shared.TryRunCommand("lvrename", vgName, oldName, newName)
-	if err != nil {
-		return fmt.Errorf("could not rename volume group from \"%s\" to \"%s\": %v", oldName, newName, err)
-	}
-
-	return nil
-}
-
-func containerNameToLVName(containerName string) string {
-	lvName := strings.Replace(containerName, "-", "--", -1)
-	return strings.Replace(lvName, shared.SnapshotDelimiter, "-", -1)
-}
-
-func getLvmDevPath(projectName, lvmPool string, volumeType string, lvmVolume string) string {
-	lvmVolume = project.Prefix(projectName, lvmVolume)
-	if volumeType == "" {
-		return fmt.Sprintf("/dev/%s/%s", lvmPool, lvmVolume)
-	}
-
-	return fmt.Sprintf("/dev/%s/%s_%s", lvmPool, volumeType, lvmVolume)
-}
-
-func getLVName(lvmPool string, volumeType string, lvmVolume string) string {
-	if volumeType == "" {
-		return fmt.Sprintf("%s/%s", lvmPool, lvmVolume)
-	}
-
-	return fmt.Sprintf("%s/%s_%s", lvmPool, volumeType, lvmVolume)
-}
-
-func getPrefixedLvName(projectName, volumeType string, lvmVolume string) string {
-	lvmVolume = project.Prefix(projectName, lvmVolume)
-	return fmt.Sprintf("%s_%s", volumeType, lvmVolume)
-}
-
-func lvmCreateLv(projectName, vgName string, thinPoolName string, lvName string, lvFsType string, lvSize string, volumeType string, makeThinLv bool) error {
-	var output string
-	var err error
-
-	// Round the size to closest 512 bytes
-	lvSizeInt, err := units.ParseByteSizeString(lvSize)
-	if err != nil {
-		return err
-	}
-
-	lvSizeInt = int64(lvSizeInt/512) * 512
-	lvSizeString := units.GetByteSizeString(lvSizeInt, 0)
-
-	lvmPoolVolumeName := getPrefixedLvName(projectName, volumeType, lvName)
-	if makeThinLv {
-		targetVg := fmt.Sprintf("%s/%s", vgName, thinPoolName)
-		_, err = shared.TryRunCommand("lvcreate", "-Wy", "--yes", "--thin", "-n", lvmPoolVolumeName, "--virtualsize", lvSizeString, targetVg)
-	} else {
-		_, err = shared.TryRunCommand("lvcreate", "-Wy", "--yes", "-n", lvmPoolVolumeName, "--size", lvSizeString, vgName)
-	}
-	if err != nil {
-		logger.Errorf("Could not create LV \"%s\": %v", lvmPoolVolumeName, err)
-		return fmt.Errorf("Could not create thin LV named %s: %v", lvmPoolVolumeName, err)
-	}
-
-	fsPath := getLvmDevPath(projectName, vgName, volumeType, lvName)
-
-	output, err = makeFSType(fsPath, lvFsType, nil)
-	if err != nil {
-		logger.Errorf("Filesystem creation failed: %v (%s)", err, output)
-		return fmt.Errorf("Error making filesystem on image LV: %v (%s)", err, output)
-	}
-
-	return nil
-}
-
-func lvmCreateThinpool(s *state.State, sTypeVersion string, vgName string, thinPoolName string, lvFsType string) error {
-	exists, err := storageLVMThinpoolExists(vgName, thinPoolName)
-	if err != nil {
-		return err
-	}
-
-	if exists {
-		return nil
-	}
-
-	err = createDefaultThinPool(sTypeVersion, vgName, thinPoolName, lvFsType)
-	if err != nil {
-		return err
-	}
-
-	err = storageLVMValidateThinPoolName(s, vgName, thinPoolName)
-	if err != nil {
-		logger.Errorf("Setting thin pool name: %s", err)
-		return fmt.Errorf("Error setting LVM thin pool config: %v", err)
-	}
-
-	return nil
-}
-
-func createDefaultThinPool(sTypeVersion string, vgName string, thinPoolName string, lvFsType string) error {
-	isRecent, err := lvmVersionIsAtLeast(sTypeVersion, "2.02.99")
-	if err != nil {
-		return fmt.Errorf("Error checking LVM version: %s", err)
-	}
-
-	// Create the thin pool
-	lvmThinPool := fmt.Sprintf("%s/%s", vgName, thinPoolName)
-	if isRecent {
-		_, err = shared.TryRunCommand(
-			"lvcreate",
-			"-Wy", "--yes",
-			"--poolmetadatasize", "1G",
-			"-l", "100%FREE",
-			"--thinpool", lvmThinPool)
-	} else {
-		_, err = shared.TryRunCommand(
-			"lvcreate",
-			"-Wy", "--yes",
-			"--poolmetadatasize", "1G",
-			"-L", "1G",
-			"--thinpool", lvmThinPool)
-	}
-
-	if err != nil {
-		logger.Errorf("Could not create thin pool \"%s\": %v", thinPoolName, err)
-		return fmt.Errorf("Could not create LVM thin pool named %s: %v", thinPoolName, err)
-	}
-
-	if !isRecent {
-		// Grow it to the maximum VG size (two step process required by old LVM)
-		_, err = shared.TryRunCommand("lvextend", "--alloc", "anywhere", "-l", "100%FREE", lvmThinPool)
-
-		if err != nil {
-			logger.Errorf("Could not grow thin pool: \"%s\": %v", thinPoolName, err)
-			return fmt.Errorf("Could not grow LVM thin pool named %s: %v", thinPoolName, err)
-		}
-	}
-
-	return nil
-}
-
-func lvmVersionIsAtLeast(sTypeVersion string, versionString string) (bool, error) {
-	lvmVersionString := strings.Split(sTypeVersion, "/")[0]
-
-	lvmVersion, err := version.Parse(lvmVersionString)
-	if err != nil {
-		return false, err
-	}
-
-	inVersion, err := version.Parse(versionString)
-	if err != nil {
-		return false, err
-	}
-
-	if lvmVersion.Compare(inVersion) < 0 {
-		return false, nil
-	}
-
-	return true, nil
-}
-
-// Copy an LVM custom volume.
-func (s *storageLvm) copyVolume(sourcePool string, source string) error {
-	targetMntPoint := driver.GetStoragePoolVolumeMountPoint(s.pool.Name, s.volume.Name)
-
-	err := os.MkdirAll(targetMntPoint, 0711)
-	if err != nil {
-		return err
-	}
-
-	if s.useThinpool && sourcePool == s.pool.Name {
-		err = s.copyVolumeThinpool(source, s.volume.Name, false)
-	} else {
-		err = s.copyVolumeLv(sourcePool, source, s.volume.Name, false)
-	}
-	if err != nil {
-		return err
-	}
-
-	return nil
-}
-
-func (s *storageLvm) copyVolumeSnapshot(sourcePool string, source string) error {
-	_, snapOnlyName, _ := shared.InstanceGetParentAndSnapshotName(source)
-	target := fmt.Sprintf("%s/%s", s.volume.Name, snapOnlyName)
-	targetMntPoint := driver.GetStoragePoolVolumeSnapshotMountPoint(s.pool.Name, target)
-
-	err := os.MkdirAll(targetMntPoint, 0711)
-	if err != nil {
-		return err
-	}
-
-	if s.useThinpool && sourcePool == s.pool.Name {
-		err = s.copyVolumeThinpool(source, target, true)
-	} else {
-		err = s.copyVolumeLv(sourcePool, source, target, true)
-	}
-	if err != nil {
-		return err
-	}
-
-	return nil
-}
-
-func (s *storageLvm) copyVolumeLv(sourcePool string, source string, target string, readOnly bool) error {
-	var srcMountPoint string
-	var dstMountPoint string
-
-	sourceIsSnapshot := shared.IsSnapshot(source)
-
-	if sourceIsSnapshot {
-		srcMountPoint = driver.GetStoragePoolVolumeSnapshotMountPoint(sourcePool, source)
-	} else {
-		srcMountPoint = driver.GetStoragePoolVolumeMountPoint(sourcePool, source)
-
-	}
-
-	targetIsSnapshot := shared.IsSnapshot(target)
-
-	if targetIsSnapshot {
-		dstMountPoint = driver.GetStoragePoolVolumeSnapshotMountPoint(s.pool.Name, target)
-	} else {
-		dstMountPoint = driver.GetStoragePoolVolumeMountPoint(s.pool.Name, target)
-	}
-
-	var err error
-
-	if targetIsSnapshot {
-		err = s.StoragePoolVolumeSnapshotCreate(&api.StorageVolumeSnapshotsPost{Name: target})
-	} else {
-		err = s.StoragePoolVolumeCreate()
-	}
-	if err != nil {
-		logger.Errorf("Failed to create LVM storage volume \"%s\" on storage pool \"%s\": %s", s.volume.Name, s.pool.Name, err)
-		return err
-	}
-
-	ourMount, err := s.StoragePoolVolumeMount()
-	if err != nil {
-		return err
-	}
-	if ourMount {
-		defer s.StoragePoolVolumeUmount()
-	}
-
-	bwlimit := s.pool.Config["rsync.bwlimit"]
-	_, err = rsync.LocalCopy(srcMountPoint, dstMountPoint, bwlimit, true)
-	if err != nil {
-		os.RemoveAll(dstMountPoint)
-		logger.Errorf("Failed to rsync into LVM storage volume \"%s\" on storage pool \"%s\": %s", s.volume.Name, s.pool.Name, err)
-		return err
-	}
-
-	// Snapshot are already read-only, and this will fail if trying to set them
-	// read-only again.
-	if readOnly && !targetIsSnapshot {
-		targetLvmName := containerNameToLVName(target)
-		poolName := s.getOnDiskPoolName()
-
-		_, err := shared.TryRunCommand("lvchange", "-pr", fmt.Sprintf("%s/%s_%s", poolName, storagePoolVolumeAPIEndpointCustom, targetLvmName))
-		if err != nil {
-			logger.Errorf("Failed to make LVM snapshot \"%s\" read-only: %v", s.volume.Name, err)
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (s *storageLvm) copyVolumeThinpool(source string, target string, readOnly bool) error {
-	sourceLvmName := containerNameToLVName(source)
-	targetLvmName := containerNameToLVName(target)
-
-	poolName := s.getOnDiskPoolName()
-	lvFsType := s.getLvmFilesystem()
-
-	lvSize, err := s.getLvmVolumeSize()
-	if lvSize == "" {
-		logger.Errorf("Failed to get size for LVM storage volume \"%s\" on storage pool \"%s\": %s", s.volume.Name, s.pool.Name, err)
-		return err
-	}
-
-	_, err = s.createSnapshotLV("default", poolName, sourceLvmName, storagePoolVolumeAPIEndpointCustom, targetLvmName, storagePoolVolumeAPIEndpointCustom, readOnly, s.useThinpool)
-	if err != nil {
-		logger.Errorf("Failed to create snapshot for LVM storage volume \"%s\" on storage pool \"%s\": %s", s.volume.Name, s.pool.Name, err)
-		return err
-	}
-
-	lvDevPath := getLvmDevPath("default", poolName, storagePoolVolumeAPIEndpointCustom, targetLvmName)
-
-	msg, err := driver.FSGenerateNewUUID(lvFsType, lvDevPath)
-	if err != nil {
-		logger.Errorf("Failed to create new UUID for filesystem \"%s\" for RBD storage volume \"%s\" on storage pool \"%s\": %s: %s", lvFsType, s.volume.Name, s.pool.Name, msg, err)
-		return err
-	}
-
-	return nil
-}

From 6c60fcfe49b790a5836efceea23246cd7fd4381f Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Tue, 18 Feb 2020 14:43:15 +0000
Subject: [PATCH 05/36] lxd/storage: Removes unused getPoolMountLockID

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/storage.go | 6 ------
 1 file changed, 6 deletions(-)

diff --git a/lxd/storage.go b/lxd/storage.go
index 8a87be9f59..89f02e758b 100644
--- a/lxd/storage.go
+++ b/lxd/storage.go
@@ -52,12 +52,6 @@ var lxdStorageOngoingOperationMap = map[string]chan bool{}
 // lxdStorageMapLock is used to access lxdStorageOngoingOperationMap.
 var lxdStorageMapLock sync.Mutex
 
-// The following functions are used to construct simple operation codes that are
-// unique.
-func getPoolMountLockID(poolName string) string {
-	return fmt.Sprintf("mount/pool/%s", poolName)
-}
-
 func getImageCreateLockID(poolName string, fingerprint string) string {
 	return fmt.Sprintf("create/image/%s/%s", poolName, fingerprint)
 }

From 60edaa281fe353358d1d9702463801074320b59a Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Wed, 19 Feb 2020 14:02:57 +0000
Subject: [PATCH 06/36] lxd/storage/pools/utils: Comment on storagePoolDBCreate

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/storage_pools_utils.go | 1 +
 1 file changed, 1 insertion(+)

diff --git a/lxd/storage_pools_utils.go b/lxd/storage_pools_utils.go
index 92fa2fbb09..500ac48b20 100644
--- a/lxd/storage_pools_utils.go
+++ b/lxd/storage_pools_utils.go
@@ -179,6 +179,7 @@ func profilesUsingPoolGetNames(db *db.Cluster, project string, poolName string)
 	return usedBy, nil
 }
 
+// storagePoolDBCreate creates a storage pool DB entry and returns the created Pool ID.
 func storagePoolDBCreate(s *state.State, poolName, poolDescription string, driver string, config map[string]string) (int64, error) {
 	// Check that the storage pool does not already exist.
 	_, err := s.Cluster.StoragePoolGetID(poolName)

From 19dd698b6f14a6e35fea8dff9f42d380417e1a62 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Wed, 19 Feb 2020 15:13:30 +0000
Subject: [PATCH 07/36] lxd/api/internal: Removes legacy storage pool loading

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/api_internal.go | 104 ++++++++++++--------------------------------
 1 file changed, 29 insertions(+), 75 deletions(-)

diff --git a/lxd/api_internal.go b/lxd/api_internal.go
index 5073c5ec8e..63ed0eff58 100644
--- a/lxd/api_internal.go
+++ b/lxd/api_internal.go
@@ -26,7 +26,6 @@ import (
 	"github.com/lxc/lxd/lxd/project"
 	"github.com/lxc/lxd/lxd/response"
 	storagePools "github.com/lxc/lxd/lxd/storage"
-	storageDrivers "github.com/lxc/lxd/lxd/storage/drivers"
 	"github.com/lxc/lxd/shared"
 	"github.com/lxc/lxd/shared/api"
 	log "github.com/lxc/lxd/shared/log15"
@@ -462,78 +461,41 @@ func internalImport(d *Daemon, r *http.Request) response.Response {
 		return response.SmartError(err)
 	}
 
-	// Update snapshot names to include container name (if needed)
+	// Update snapshot names to include container name (if needed).
 	for i, snap := range backup.Snapshots {
 		if !strings.Contains(snap.Name, "/") {
 			backup.Snapshots[i].Name = fmt.Sprintf("%s/%s", backup.Container.Name, snap.Name)
 		}
 	}
 
-	// Try to retrieve the storage pool the container supposedly lives on.
-	var poolErr error
-	poolID, pool, poolErr := d.cluster.StoragePoolGet(containerPoolName)
-	if poolErr != nil {
-		if poolErr != db.ErrNoSuchObject {
-			return response.SmartError(poolErr)
-		}
-	}
-
 	if backup.Pool == nil {
 		// We don't know what kind of storage type the pool is.
 		return response.BadRequest(fmt.Errorf(`No storage pool struct in the backup file found. The storage pool needs to be recovered manually`))
 	}
 
-	if poolErr == db.ErrNoSuchObject {
+	// Try to retrieve the storage pool the container supposedly lives on.
+	pool, err := storagePools.GetPoolByName(d.State(), containerPoolName)
+	if err == db.ErrNoSuchObject {
 		// Create the storage pool db entry if it doesn't exist.
-		_, err := storagePoolDBCreate(d.State(), containerPoolName, "",
-			backup.Pool.Driver, backup.Pool.Config)
+		_, err = storagePoolDBCreate(d.State(), containerPoolName, "", backup.Pool.Driver, backup.Pool.Config)
 		if err != nil {
-			err = errors.Wrap(err, "Create storage pool database entry")
-			return response.SmartError(err)
+			return response.SmartError(errors.Wrap(err, "Create storage pool database entry"))
 		}
 
-		poolID, err = d.cluster.StoragePoolGetID(containerPoolName)
+		pool, err = storagePools.GetPoolByName(d.State(), containerPoolName)
 		if err != nil {
-			return response.SmartError(err)
-		}
-	} else {
-		if backup.Pool.Name != containerPoolName {
-			return response.BadRequest(fmt.Errorf(`The storage pool %q the instance was detected on does not match the storage pool %q specified in the backup file`, containerPoolName, backup.Pool.Name))
-		}
-
-		if backup.Pool.Driver != pool.Driver {
-			return response.BadRequest(fmt.Errorf(`The storage pool's %q driver %q conflicts with the driver %q recorded in the instance's backup file`, containerPoolName, pool.Driver, backup.Pool.Driver))
+			return response.SmartError(errors.Wrap(err, "Load storage pool database entry"))
 		}
+	} else if err != nil {
+		return response.SmartError(errors.Wrap(err, "Find storage pool database entry"))
 	}
 
-	var poolName string
-	_, err = storagePools.GetPoolByName(d.State(), backup.Pool.Name)
-	if err != storageDrivers.ErrUnknownDriver && err != db.ErrNoSuchObject {
-		if err != nil {
-			return response.InternalError(err)
-		}
-
-		// FIXME: In the new world, we don't expose the on-disk pool
-		// name, instead we need to change the per-driver logic below to using
-		// clean storage functions.
-		poolName = backup.Pool.Name
-	} else {
-		initPool, err := storagePoolInit(d.State(), backup.Pool.Name)
-		if err != nil {
-			err = errors.Wrap(err, "Initialize storage")
-			return response.InternalError(err)
-		}
-
-		ourMount, err := initPool.StoragePoolMount()
-		if err != nil {
-			return response.InternalError(err)
-		}
-		if ourMount {
-			defer initPool.StoragePoolUmount()
-		}
+	if backup.Pool.Name != containerPoolName {
+		return response.BadRequest(fmt.Errorf(`The storage pool %q the instance was detected on does not match the storage pool %q specified in the backup file`, containerPoolName, backup.Pool.Name))
+	}
 
-		// retrieve on-disk pool name
-		_, _, poolName = initPool.GetContainerPoolInfo()
+	if backup.Pool.Driver != pool.Driver().Info().Name {
+		return response.BadRequest(fmt.Errorf(`The storage pool's %q driver %q conflicts with the driver %q recorded in the instance's backup file`, containerPoolName, pool.Driver().Info().Name, backup.Pool.Driver))
 	}
 
 	existingSnapshots := []*api.InstanceSnapshot{}
@@ -544,7 +506,7 @@ func internalImport(d *Daemon, r *http.Request) response.Response {
 	if len(backup.Snapshots) > 0 {
 		switch backup.Pool.Driver {
 		case "btrfs":
-			snapshotsDirPath := storagePools.GetSnapshotMountPoint(projectName, poolName, req.Name)
+			snapshotsDirPath := storagePools.GetSnapshotMountPoint(projectName, pool.Name(), req.Name)
 			snapshotsDir, err := os.Open(snapshotsDirPath)
 			if err != nil {
 				return response.InternalError(err)
@@ -556,7 +518,7 @@ func internalImport(d *Daemon, r *http.Request) response.Response {
 			}
 			snapshotsDir.Close()
 		case "dir":
-			snapshotsDirPath := storagePools.GetSnapshotMountPoint(projectName, poolName, req.Name)
+			snapshotsDirPath := storagePools.GetSnapshotMountPoint(projectName, pool.Name(), req.Name)
 			snapshotsDir, err := os.Open(snapshotsDirPath)
 			if err != nil {
 				return response.InternalError(err)
@@ -664,19 +626,18 @@ func internalImport(d *Daemon, r *http.Request) response.Response {
 		switch backup.Pool.Driver {
 		case "btrfs":
 			snapName := fmt.Sprintf("%s/%s", req.Name, od)
-			err = btrfsSnapshotDeleteInternal(projectName, poolName, snapName)
+			err = btrfsSnapshotDeleteInternal(projectName, pool.Name(), snapName)
 		case "dir":
 			snapName := fmt.Sprintf("%s/%s", req.Name, od)
-			err = dirSnapshotDeleteInternal(projectName, poolName, snapName)
+			err = dirSnapshotDeleteInternal(projectName, pool.Name(), snapName)
 		case "lvm":
 			onDiskPoolName := backup.Pool.Config["lvm.vg_name"]
 			if onDiskPoolName == "" {
-				onDiskPoolName = poolName
+				onDiskPoolName = pool.Name()
 			}
 			snapName := fmt.Sprintf("%s/%s", req.Name, od)
 			snapPath := storagePools.InstancePath(instancetype.Container, projectName, snapName, true)
-			err = lvmContainerDeleteInternal(projectName, poolName, req.Name,
-				true, onDiskPoolName, snapPath)
+			err = lvmContainerDeleteInternal(projectName, pool.Name(), req.Name, true, onDiskPoolName, snapPath)
 		case "ceph":
 			clusterName := "ceph"
 			if backup.Pool.Config["ceph.cluster_name"] != "" {
@@ -690,17 +651,14 @@ func internalImport(d *Daemon, r *http.Request) response.Response {
 
 			onDiskPoolName := backup.Pool.Config["ceph.osd.pool_name"]
 			snapName := fmt.Sprintf("snapshot_%s", od)
-			ret := cephContainerSnapshotDelete(clusterName,
-				onDiskPoolName, project.Prefix(projectName, req.Name),
-				storagePoolVolumeTypeNameContainer, snapName, userName)
+			ret := cephContainerSnapshotDelete(clusterName, onDiskPoolName, project.Prefix(projectName, req.Name), storagePoolVolumeTypeNameContainer, snapName, userName)
 			if ret < 0 {
 				err = fmt.Errorf(`Failed to delete snapshot`)
 			}
 		case "zfs":
 			onDiskPoolName := backup.Pool.Config["zfs.pool_name"]
 			snapName := fmt.Sprintf("%s/%s", req.Name, od)
-			err = zfsSnapshotDeleteInternal(projectName, poolName, snapName,
-				onDiskPoolName)
+			err = zfsSnapshotDeleteInternal(projectName, pool.Name(), snapName, onDiskPoolName)
 		}
 		if err != nil {
 			logger.Warnf(`Failed to delete snapshot`)
@@ -728,9 +686,7 @@ func internalImport(d *Daemon, r *http.Request) response.Response {
 		case "lvm":
 			ctName, csName, _ := shared.InstanceGetParentAndSnapshotName(snap.Name)
 			ctLvmName := lvmNameToLVName(fmt.Sprintf("%s/%s", project.Prefix(projectName, ctName), csName))
-			ctLvName := lvmLVName(poolName,
-				storagePoolVolumeAPIEndpointContainers,
-				ctLvmName)
+			ctLvName := lvmLVName(pool.Name(), storagePoolVolumeAPIEndpointContainers, ctLvmName)
 			exists, err := lvmLVExists(ctLvName)
 			if err != nil {
 				return response.InternalError(err)
@@ -772,9 +728,7 @@ func internalImport(d *Daemon, r *http.Request) response.Response {
 			ctName, csName, _ := shared.InstanceGetParentAndSnapshotName(snap.Name)
 			snapshotName := fmt.Sprintf("snapshot-%s", csName)
 
-			exists := zfsFilesystemEntityExists(poolName,
-				fmt.Sprintf("containers/%s@%s", project.Prefix(projectName, ctName),
-					snapshotName))
+			exists := zfsFilesystemEntityExists(pool.Name(), fmt.Sprintf("containers/%s@%s", project.Prefix(projectName, ctName), snapshotName))
 			if !exists {
 				if req.Force {
 					continue
@@ -787,7 +741,7 @@ func internalImport(d *Daemon, r *http.Request) response.Response {
 	}
 
 	// Check if a storage volume entry for the container already exists.
-	_, volume, ctVolErr := d.cluster.StoragePoolNodeVolumeGetTypeByProject(projectName, req.Name, storagePoolVolumeTypeContainer, poolID)
+	_, volume, ctVolErr := d.cluster.StoragePoolNodeVolumeGetTypeByProject(projectName, req.Name, storagePoolVolumeTypeContainer, pool.ID())
 	if ctVolErr != nil {
 		if ctVolErr != db.ErrNoSuchObject {
 			return response.SmartError(ctVolErr)
@@ -826,7 +780,7 @@ func internalImport(d *Daemon, r *http.Request) response.Response {
 		}
 
 		// Remove the storage volume db entry for the container since force was specified.
-		err := d.cluster.StoragePoolVolumeDelete(projectName, req.Name, storagePoolVolumeTypeContainer, poolID)
+		err := d.cluster.StoragePoolVolumeDelete(projectName, req.Name, storagePoolVolumeTypeContainer, pool.ID())
 		if err != nil {
 			return response.SmartError(err)
 		}
@@ -928,7 +882,7 @@ func internalImport(d *Daemon, r *http.Request) response.Response {
 		}
 
 		// Check if a storage volume entry for the snapshot already exists.
-		_, _, csVolErr := d.cluster.StoragePoolNodeVolumeGetTypeByProject(projectName, snap.Name, storagePoolVolumeTypeContainer, poolID)
+		_, _, csVolErr := d.cluster.StoragePoolNodeVolumeGetTypeByProject(projectName, snap.Name, storagePoolVolumeTypeContainer, pool.ID())
 		if csVolErr != nil {
 			if csVolErr != db.ErrNoSuchObject {
 				return response.SmartError(csVolErr)
@@ -948,7 +902,7 @@ func internalImport(d *Daemon, r *http.Request) response.Response {
 		}
 
 		if csVolErr == nil {
-			err := d.cluster.StoragePoolVolumeDelete(projectName, snap.Name, storagePoolVolumeTypeContainer, poolID)
+			err := d.cluster.StoragePoolVolumeDelete(projectName, snap.Name, storagePoolVolumeTypeContainer, pool.ID())
 			if err != nil {
 				return response.SmartError(err)
 			}

From acd92d8be54da3af43be86b79b2d373fc1abdb06 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Wed, 19 Feb 2020 15:13:55 +0000
Subject: [PATCH 08/36] lxd/api/internal: Consistent comment style

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/api_internal.go | 19 +++++++++----------
 1 file changed, 9 insertions(+), 10 deletions(-)

diff --git a/lxd/api_internal.go b/lxd/api_internal.go
index 63ed0eff58..8ab0e8c4a4 100644
--- a/lxd/api_internal.go
+++ b/lxd/api_internal.go
@@ -442,8 +442,7 @@ func internalImport(d *Daemon, r *http.Request) response.Response {
 		return response.BadRequest(fmt.Errorf(`The instance %q does not seem to exist on any storage pool`, req.Name))
 	}
 
-	// User needs to make sure that we can access the directory where
-	// backup.yaml lives.
+	// User needs to make sure that we can access the directory where backup.yaml lives.
 	containerMntPoint := containerMntPoints[0]
 	isEmpty, err := shared.PathIsEmpty(containerMntPoint)
 	if err != nil {
@@ -540,7 +539,7 @@ func internalImport(d *Daemon, r *http.Request) response.Response {
 			snaps := strings.Fields(msg)
 			prefix := fmt.Sprintf("containers_%s-", project.Prefix(projectName, req.Name))
 			for _, v := range snaps {
-				// ignore zombies
+				// Ignore zombies.
 				if strings.HasPrefix(v, prefix) {
 					onDiskSnapshots = append(onDiskSnapshots,
 						v[len(prefix):])
@@ -568,7 +567,7 @@ func internalImport(d *Daemon, r *http.Request) response.Response {
 			}
 
 			for _, v := range snaps {
-				// ignore zombies
+				// Ignore zombies.
 				if strings.HasPrefix(v, "snapshot_") {
 					onDiskSnapshots = append(onDiskSnapshots,
 						v[len("snapshot_"):])
@@ -583,7 +582,7 @@ func internalImport(d *Daemon, r *http.Request) response.Response {
 			}
 
 			for _, v := range snaps {
-				// ignore zombies
+				// Ignore zombies.
 				if strings.HasPrefix(v, "snapshot-") {
 					onDiskSnapshots = append(onDiskSnapshots,
 						v[len("snapshot-"):])
@@ -600,7 +599,7 @@ func internalImport(d *Daemon, r *http.Request) response.Response {
 		}
 	}
 
-	// delete snapshots that do not exist in backup.yaml
+	// Delete snapshots that do not exist in backup.yaml.
 	od := ""
 	for _, od = range onDiskSnapshots {
 		inBackupFile := false
@@ -794,13 +793,13 @@ func internalImport(d *Daemon, r *http.Request) response.Response {
 		}
 	}
 
-	// Prepare root disk entry if needed
+	// Prepare root disk entry if needed.
 	rootDev := map[string]string{}
 	rootDev["type"] = "disk"
 	rootDev["path"] = "/"
 	rootDev["pool"] = containerPoolName
 
-	// Mark the filesystem as going through an import
+	// Mark the filesystem as going through an import.
 	importingFilePath := storagePools.InstanceImportingFilePath(instancetype.Container, containerPoolName, projectName, req.Name)
 	fd, err := os.Create(importingFilePath)
 	if err != nil {
@@ -811,7 +810,7 @@ func internalImport(d *Daemon, r *http.Request) response.Response {
 
 	baseImage := backup.Container.Config["volatile.base_image"]
 
-	// Add root device if missing
+	// Add root device if missing.
 	root, _, _ := shared.GetRootDiskDevice(backup.Container.Devices)
 	if root == "" {
 		if backup.Container.Devices == nil {
@@ -915,7 +914,7 @@ func internalImport(d *Daemon, r *http.Request) response.Response {
 			return response.SmartError(err)
 		}
 
-		// Add root device if missing
+		// Add root device if missing.
 		root, _, _ := shared.GetRootDiskDevice(snap.Devices)
 		if root == "" {
 			if snap.Devices == nil {

From 7fa93e99af3e7b273d6b2600984476ab95cd6d84 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Wed, 19 Feb 2020 16:54:47 +0000
Subject: [PATCH 09/36] lxd/api/internal: Stops using backup pkg name as
 variable

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/api_internal.go | 102 ++++++++++++++++++++++----------------------
 1 file changed, 51 insertions(+), 51 deletions(-)

diff --git a/lxd/api_internal.go b/lxd/api_internal.go
index 8ab0e8c4a4..d1d9ecc6f4 100644
--- a/lxd/api_internal.go
+++ b/lxd/api_internal.go
@@ -455,19 +455,19 @@ func internalImport(d *Daemon, r *http.Request) response.Response {
 
 	// Read in the backup.yaml file.
 	backupYamlPath := filepath.Join(containerMntPoint, "backup.yaml")
-	backup, err := backup.ParseInstanceConfigYamlFile(backupYamlPath)
+	backupConf, err := backup.ParseInstanceConfigYamlFile(backupYamlPath)
 	if err != nil {
 		return response.SmartError(err)
 	}
 
 	// Update snapshot names to include container name (if needed).
-	for i, snap := range backup.Snapshots {
+	for i, snap := range backupConf.Snapshots {
 		if !strings.Contains(snap.Name, "/") {
-			backup.Snapshots[i].Name = fmt.Sprintf("%s/%s", backup.Container.Name, snap.Name)
+			backupConf.Snapshots[i].Name = fmt.Sprintf("%s/%s", backupConf.Container.Name, snap.Name)
 		}
 	}
 
-	if backup.Pool == nil {
+	if backupConf.Pool == nil {
 		// We don't know what kind of storage type the pool is.
 		return response.BadRequest(fmt.Errorf(`No storage pool struct in the backup file found. The storage pool needs to be recovered manually`))
 	}
@@ -476,7 +476,7 @@ func internalImport(d *Daemon, r *http.Request) response.Response {
 	pool, err := storagePools.GetPoolByName(d.State(), containerPoolName)
 	if err == db.ErrNoSuchObject {
 		// Create the storage pool db entry if it doesn't exist.
-		_, err = storagePoolDBCreate(d.State(), containerPoolName, "", backup.Pool.Driver, backup.Pool.Config)
+		_, err = storagePoolDBCreate(d.State(), containerPoolName, "", backupConf.Pool.Driver, backupConf.Pool.Config)
 		if err != nil {
 			return response.SmartError(errors.Wrap(err, "Create storage pool database entry"))
 		}
@@ -489,12 +489,12 @@ func internalImport(d *Daemon, r *http.Request) response.Response {
 		return response.SmartError(errors.Wrap(err, "Find storage pool database entry"))
 	}
 
-	if backup.Pool.Name != containerPoolName {
-		return response.BadRequest(fmt.Errorf(`The storage pool %q the instance was detected on does not match the storage pool %q specified in the backup file`, containerPoolName, backup.Pool.Name))
+	if backupConf.Pool.Name != containerPoolName {
+		return response.BadRequest(fmt.Errorf(`The storage pool %q the instance was detected on does not match the storage pool %q specified in the backup file`, containerPoolName, backupConf.Pool.Name))
 	}
 
-	if backup.Pool.Driver != pool.Driver().Info().Name {
-		return response.BadRequest(fmt.Errorf(`The storage pool's %q driver %q conflicts with the driver %q recorded in the instance's backup file`, containerPoolName, pool.Driver().Info().Name, backup.Pool.Driver))
+	if backupConf.Pool.Driver != pool.Driver().Info().Name {
+		return response.BadRequest(fmt.Errorf(`The storage pool's %q driver %q conflicts with the driver %q recorded in the instance's backup file`, containerPoolName, pool.Driver().Info().Name, backupConf.Pool.Driver))
 	}
 
 	existingSnapshots := []*api.InstanceSnapshot{}
@@ -591,7 +591,7 @@ func internalImport(d *Daemon, r *http.Request) response.Response {
 		}
 	}
 
-	if len(backup.Snapshots) != len(onDiskSnapshots) {
+	if len(backupConf.Snapshots) != len(onDiskSnapshots) {
 		if !req.Force {
 			msg := `There are either snapshots that don't exist on disk anymore or snapshots that are not recorded in the "backup.yaml" file. Pass "force" to remove them`
 			logger.Errorf(msg)
@@ -603,7 +603,7 @@ func internalImport(d *Daemon, r *http.Request) response.Response {
 	od := ""
 	for _, od = range onDiskSnapshots {
 		inBackupFile := false
-		for _, ib := range backup.Snapshots {
+		for _, ib := range backupConf.Snapshots {
 			_, snapOnlyName, _ := shared.InstanceGetParentAndSnapshotName(ib.Name)
 			if od == snapOnlyName {
 				inBackupFile = true
@@ -622,7 +622,7 @@ func internalImport(d *Daemon, r *http.Request) response.Response {
 		}
 
 		var err error
-		switch backup.Pool.Driver {
+		switch backupConf.Pool.Driver {
 		case "btrfs":
 			snapName := fmt.Sprintf("%s/%s", req.Name, od)
 			err = btrfsSnapshotDeleteInternal(projectName, pool.Name(), snapName)
@@ -630,7 +630,7 @@ func internalImport(d *Daemon, r *http.Request) response.Response {
 			snapName := fmt.Sprintf("%s/%s", req.Name, od)
 			err = dirSnapshotDeleteInternal(projectName, pool.Name(), snapName)
 		case "lvm":
-			onDiskPoolName := backup.Pool.Config["lvm.vg_name"]
+			onDiskPoolName := backupConf.Pool.Config["lvm.vg_name"]
 			if onDiskPoolName == "" {
 				onDiskPoolName = pool.Name()
 			}
@@ -639,23 +639,23 @@ func internalImport(d *Daemon, r *http.Request) response.Response {
 			err = lvmContainerDeleteInternal(projectName, pool.Name(), req.Name, true, onDiskPoolName, snapPath)
 		case "ceph":
 			clusterName := "ceph"
-			if backup.Pool.Config["ceph.cluster_name"] != "" {
-				clusterName = backup.Pool.Config["ceph.cluster_name"]
+			if backupConf.Pool.Config["ceph.cluster_name"] != "" {
+				clusterName = backupConf.Pool.Config["ceph.cluster_name"]
 			}
 
 			userName := "admin"
-			if backup.Pool.Config["ceph.user.name"] != "" {
-				userName = backup.Pool.Config["ceph.user.name"]
+			if backupConf.Pool.Config["ceph.user.name"] != "" {
+				userName = backupConf.Pool.Config["ceph.user.name"]
 			}
 
-			onDiskPoolName := backup.Pool.Config["ceph.osd.pool_name"]
+			onDiskPoolName := backupConf.Pool.Config["ceph.osd.pool_name"]
 			snapName := fmt.Sprintf("snapshot_%s", od)
 			ret := cephContainerSnapshotDelete(clusterName, onDiskPoolName, project.Prefix(projectName, req.Name), storagePoolVolumeTypeNameContainer, snapName, userName)
 			if ret < 0 {
 				err = fmt.Errorf(`Failed to delete snapshot`)
 			}
 		case "zfs":
-			onDiskPoolName := backup.Pool.Config["zfs.pool_name"]
+			onDiskPoolName := backupConf.Pool.Config["zfs.pool_name"]
 			snapName := fmt.Sprintf("%s/%s", req.Name, od)
 			err = zfsSnapshotDeleteInternal(projectName, pool.Name(), snapName, onDiskPoolName)
 		}
@@ -664,10 +664,10 @@ func internalImport(d *Daemon, r *http.Request) response.Response {
 		}
 	}
 
-	for _, snap := range backup.Snapshots {
-		switch backup.Pool.Driver {
+	for _, snap := range backupConf.Snapshots {
+		switch backupConf.Pool.Driver {
 		case "btrfs":
-			snpMntPt := storagePools.GetSnapshotMountPoint(projectName, backup.Pool.Name, snap.Name)
+			snpMntPt := storagePools.GetSnapshotMountPoint(projectName, backupConf.Pool.Name, snap.Name)
 			if !shared.PathExists(snpMntPt) || !btrfsIsSubVolume(snpMntPt) {
 				if req.Force {
 					continue
@@ -675,7 +675,7 @@ func internalImport(d *Daemon, r *http.Request) response.Response {
 				return response.BadRequest(needForce)
 			}
 		case "dir":
-			snpMntPt := storagePools.GetSnapshotMountPoint(projectName, backup.Pool.Name, snap.Name)
+			snpMntPt := storagePools.GetSnapshotMountPoint(projectName, backupConf.Pool.Name, snap.Name)
 			if !shared.PathExists(snpMntPt) {
 				if req.Force {
 					continue
@@ -699,16 +699,16 @@ func internalImport(d *Daemon, r *http.Request) response.Response {
 			}
 		case "ceph":
 			clusterName := "ceph"
-			if backup.Pool.Config["ceph.cluster_name"] != "" {
-				clusterName = backup.Pool.Config["ceph.cluster_name"]
+			if backupConf.Pool.Config["ceph.cluster_name"] != "" {
+				clusterName = backupConf.Pool.Config["ceph.cluster_name"]
 			}
 
 			userName := "admin"
-			if backup.Pool.Config["ceph.user.name"] != "" {
-				userName = backup.Pool.Config["ceph.user.name"]
+			if backupConf.Pool.Config["ceph.user.name"] != "" {
+				userName = backupConf.Pool.Config["ceph.user.name"]
 			}
 
-			onDiskPoolName := backup.Pool.Config["ceph.osd.pool_name"]
+			onDiskPoolName := backupConf.Pool.Config["ceph.osd.pool_name"]
 			ctName, csName, _ := shared.InstanceGetParentAndSnapshotName(snap.Name)
 			ctName = project.Prefix(projectName, ctName)
 			snapshotName := fmt.Sprintf("snapshot_%s", csName)
@@ -765,17 +765,17 @@ func internalImport(d *Daemon, r *http.Request) response.Response {
 		return response.BadRequest(fmt.Errorf(`Entry for instance %q already exists in the database. Set "force" to overwrite`, req.Name))
 	}
 
-	if backup.Volume == nil {
+	if backupConf.Volume == nil {
 		return response.BadRequest(fmt.Errorf(`No storage volume struct in the backup file found. The storage volume needs to be recovered manually`))
 	}
 
 	if ctVolErr == nil {
-		if volume.Name != backup.Volume.Name {
+		if volume.Name != backupConf.Volume.Name {
 			return response.BadRequest(fmt.Errorf(`The name %q of the storage volume is not identical to the instance's name "%s"`, volume.Name, req.Name))
 		}
 
-		if volume.Type != backup.Volume.Type {
-			return response.BadRequest(fmt.Errorf(`The type %q of the storage volume is not identical to the instance's type %q`, volume.Type, backup.Volume.Type))
+		if volume.Type != backupConf.Volume.Type {
+			return response.BadRequest(fmt.Errorf(`The type %q of the storage volume is not identical to the instance's type %q`, volume.Type, backupConf.Volume.Type))
 		}
 
 		// Remove the storage volume db entry for the container since force was specified.
@@ -808,28 +808,28 @@ func internalImport(d *Daemon, r *http.Request) response.Response {
 	fd.Close()
 	defer os.Remove(fd.Name())
 
-	baseImage := backup.Container.Config["volatile.base_image"]
+	baseImage := backupConf.Container.Config["volatile.base_image"]
 
 	// Add root device if missing.
-	root, _, _ := shared.GetRootDiskDevice(backup.Container.Devices)
+	root, _, _ := shared.GetRootDiskDevice(backupConf.Container.Devices)
 	if root == "" {
-		if backup.Container.Devices == nil {
-			backup.Container.Devices = map[string]map[string]string{}
+		if backupConf.Container.Devices == nil {
+			backupConf.Container.Devices = map[string]map[string]string{}
 		}
 
 		rootDevName := "root"
 		for i := 0; i < 100; i++ {
-			if backup.Container.Devices[rootDevName] == nil {
+			if backupConf.Container.Devices[rootDevName] == nil {
 				break
 			}
 			rootDevName = fmt.Sprintf("root%d", i)
 			continue
 		}
 
-		backup.Container.Devices[rootDevName] = rootDev
+		backupConf.Container.Devices[rootDevName] = rootDev
 	}
 
-	arch, err := osarch.ArchitectureId(backup.Container.Architecture)
+	arch, err := osarch.ArchitectureId(backupConf.Container.Architecture)
 	if err != nil {
 		return response.SmartError(err)
 	}
@@ -837,16 +837,16 @@ func internalImport(d *Daemon, r *http.Request) response.Response {
 		Project:      projectName,
 		Architecture: arch,
 		BaseImage:    baseImage,
-		Config:       backup.Container.Config,
-		CreationDate: backup.Container.CreatedAt,
+		Config:       backupConf.Container.Config,
+		CreationDate: backupConf.Container.CreatedAt,
 		Type:         instancetype.Container,
-		Description:  backup.Container.Description,
-		Devices:      deviceConfig.NewDevices(backup.Container.Devices),
-		Ephemeral:    backup.Container.Ephemeral,
-		LastUsedDate: backup.Container.LastUsedAt,
-		Name:         backup.Container.Name,
-		Profiles:     backup.Container.Profiles,
-		Stateful:     backup.Container.Stateful,
+		Description:  backupConf.Container.Description,
+		Devices:      deviceConfig.NewDevices(backupConf.Container.Devices),
+		Ephemeral:    backupConf.Container.Ephemeral,
+		LastUsedDate: backupConf.Container.LastUsedAt,
+		Name:         backupConf.Container.Name,
+		Profiles:     backupConf.Container.Profiles,
+		Stateful:     backupConf.Container.Stateful,
 	})
 	if err != nil {
 		err = errors.Wrap(err, "Create container")
@@ -855,7 +855,7 @@ func internalImport(d *Daemon, r *http.Request) response.Response {
 
 	containerPath := storagePools.InstancePath(instancetype.Container, projectName, req.Name, false)
 	isPrivileged := false
-	if backup.Container.Config["security.privileged"] == "" {
+	if backupConf.Container.Config["security.privileged"] == "" {
 		isPrivileged = true
 	}
 	err = storagePools.CreateContainerMountpoint(containerMntPoint, containerPath,
@@ -953,11 +953,11 @@ func internalImport(d *Daemon, r *http.Request) response.Response {
 		}
 
 		// Recreate missing mountpoints and symlinks.
-		snapshotMountPoint := storagePools.GetSnapshotMountPoint(projectName, backup.Pool.Name,
+		snapshotMountPoint := storagePools.GetSnapshotMountPoint(projectName, backupConf.Pool.Name,
 			snap.Name)
 		sourceName, _, _ := shared.InstanceGetParentAndSnapshotName(snap.Name)
 		sourceName = project.Prefix(projectName, sourceName)
-		snapshotMntPointSymlinkTarget := shared.VarPath("storage-pools", backup.Pool.Name, "containers-snapshots", sourceName)
+		snapshotMntPointSymlinkTarget := shared.VarPath("storage-pools", backupConf.Pool.Name, "containers-snapshots", sourceName)
 		snapshotMntPointSymlink := shared.VarPath("snapshots", sourceName)
 		err = storagePools.CreateSnapshotMountpoint(snapshotMountPoint, snapshotMntPointSymlinkTarget, snapshotMntPointSymlink)
 		if err != nil {

From 72e062decde6365746ce7cabbe5f69b18f0a32f3 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Wed, 19 Feb 2020 16:55:06 +0000
Subject: [PATCH 10/36] lxd/api/internal: Switches internalImport to use
 pool.CheckInstanceBackupFileSnapshots

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/api_internal.go | 251 ++------------------------------------------
 1 file changed, 8 insertions(+), 243 deletions(-)

diff --git a/lxd/api_internal.go b/lxd/api_internal.go
index d1d9ecc6f4..8eedb78c74 100644
--- a/lxd/api_internal.go
+++ b/lxd/api_internal.go
@@ -27,7 +27,6 @@ import (
 	"github.com/lxc/lxd/lxd/response"
 	storagePools "github.com/lxc/lxd/lxd/storage"
 	"github.com/lxc/lxd/shared"
-	"github.com/lxc/lxd/shared/api"
 	log "github.com/lxc/lxd/shared/log15"
 	"github.com/lxc/lxd/shared/logger"
 	"github.com/lxc/lxd/shared/osarch"
@@ -497,246 +496,14 @@ func internalImport(d *Daemon, r *http.Request) response.Response {
 		return response.BadRequest(fmt.Errorf(`The storage pool's %q driver %q conflicts with the driver %q recorded in the instance's backup file`, containerPoolName, pool.Driver().Info().Name, backupConf.Pool.Driver))
 	}
 
-	existingSnapshots := []*api.InstanceSnapshot{}
-	needForce := fmt.Errorf(`The snapshot does not exist on disk. Pass "force" to discard non-existing snapshots`)
-
-	// Retrieve all snapshots that exist on disk.
-	onDiskSnapshots := []string{}
-	if len(backup.Snapshots) > 0 {
-		switch backup.Pool.Driver {
-		case "btrfs":
-			snapshotsDirPath := storagePools.GetSnapshotMountPoint(projectName, pool.Name(), req.Name)
-			snapshotsDir, err := os.Open(snapshotsDirPath)
-			if err != nil {
-				return response.InternalError(err)
-			}
-			onDiskSnapshots, err = snapshotsDir.Readdirnames(-1)
-			if err != nil {
-				snapshotsDir.Close()
-				return response.InternalError(err)
-			}
-			snapshotsDir.Close()
-		case "dir":
-			snapshotsDirPath := storagePools.GetSnapshotMountPoint(projectName, pool.Name(), req.Name)
-			snapshotsDir, err := os.Open(snapshotsDirPath)
-			if err != nil {
-				return response.InternalError(err)
-			}
-			onDiskSnapshots, err = snapshotsDir.Readdirnames(-1)
-			if err != nil {
-				snapshotsDir.Close()
-				return response.InternalError(err)
-			}
-			snapshotsDir.Close()
-		case "lvm":
-			onDiskPoolName := backup.Pool.Config["lvm.vg_name"]
-			msg, err := shared.RunCommand("lvs", "-o", "lv_name",
-				onDiskPoolName, "--noheadings")
-			if err != nil {
-				return response.InternalError(err)
-			}
-
-			snaps := strings.Fields(msg)
-			prefix := fmt.Sprintf("containers_%s-", project.Prefix(projectName, req.Name))
-			for _, v := range snaps {
-				// Ignore zombies.
-				if strings.HasPrefix(v, prefix) {
-					onDiskSnapshots = append(onDiskSnapshots,
-						v[len(prefix):])
-				}
-			}
-		case "ceph":
-			clusterName := "ceph"
-			if backup.Pool.Config["ceph.cluster_name"] != "" {
-				clusterName = backup.Pool.Config["ceph.cluster_name"]
-			}
-
-			userName := "admin"
-			if backup.Pool.Config["ceph.user.name"] != "" {
-				userName = backup.Pool.Config["ceph.user.name"]
-			}
-
-			onDiskPoolName := backup.Pool.Config["ceph.osd.pool_name"]
-			snaps, err := cephRBDVolumeListSnapshots(clusterName,
-				onDiskPoolName, project.Prefix(projectName, req.Name),
-				storagePoolVolumeTypeNameContainer, userName)
-			if err != nil {
-				if err != db.ErrNoSuchObject {
-					return response.InternalError(err)
-				}
-			}
-
-			for _, v := range snaps {
-				// Ignore zombies.
-				if strings.HasPrefix(v, "snapshot_") {
-					onDiskSnapshots = append(onDiskSnapshots,
-						v[len("snapshot_"):])
-				}
-			}
-		case "zfs":
-			onDiskPoolName := backup.Pool.Config["zfs.pool_name"]
-			snaps, err := zfsPoolListSnapshots(onDiskPoolName,
-				fmt.Sprintf("containers/%s", project.Prefix(projectName, req.Name)))
-			if err != nil {
-				return response.InternalError(err)
-			}
-
-			for _, v := range snaps {
-				// Ignore zombies.
-				if strings.HasPrefix(v, "snapshot-") {
-					onDiskSnapshots = append(onDiskSnapshots,
-						v[len("snapshot-"):])
-				}
-			}
-		}
-	}
-
-	if len(backupConf.Snapshots) != len(onDiskSnapshots) {
-		if !req.Force {
-			msg := `There are either snapshots that don't exist on disk anymore or snapshots that are not recorded in the "backup.yaml" file. Pass "force" to remove them`
-			logger.Errorf(msg)
-			return response.InternalError(fmt.Errorf(msg))
-		}
-	}
-
-	// Delete snapshots that do not exist in backup.yaml.
-	od := ""
-	for _, od = range onDiskSnapshots {
-		inBackupFile := false
-		for _, ib := range backupConf.Snapshots {
-			_, snapOnlyName, _ := shared.InstanceGetParentAndSnapshotName(ib.Name)
-			if od == snapOnlyName {
-				inBackupFile = true
-				break
-			}
-		}
-
-		if inBackupFile {
-			continue
-		}
-
-		if !req.Force {
-			msg := `There are snapshots that are not recorded in the "backup.yaml" file. Pass "force" to remove them`
-			logger.Errorf(msg)
-			return response.InternalError(fmt.Errorf(msg))
-		}
-
-		var err error
-		switch backupConf.Pool.Driver {
-		case "btrfs":
-			snapName := fmt.Sprintf("%s/%s", req.Name, od)
-			err = btrfsSnapshotDeleteInternal(projectName, pool.Name(), snapName)
-		case "dir":
-			snapName := fmt.Sprintf("%s/%s", req.Name, od)
-			err = dirSnapshotDeleteInternal(projectName, pool.Name(), snapName)
-		case "lvm":
-			onDiskPoolName := backupConf.Pool.Config["lvm.vg_name"]
-			if onDiskPoolName == "" {
-				onDiskPoolName = pool.Name()
-			}
-			snapName := fmt.Sprintf("%s/%s", req.Name, od)
-			snapPath := storagePools.InstancePath(instancetype.Container, projectName, snapName, true)
-			err = lvmContainerDeleteInternal(projectName, pool.Name(), req.Name, true, onDiskPoolName, snapPath)
-		case "ceph":
-			clusterName := "ceph"
-			if backupConf.Pool.Config["ceph.cluster_name"] != "" {
-				clusterName = backupConf.Pool.Config["ceph.cluster_name"]
-			}
-
-			userName := "admin"
-			if backupConf.Pool.Config["ceph.user.name"] != "" {
-				userName = backupConf.Pool.Config["ceph.user.name"]
-			}
-
-			onDiskPoolName := backupConf.Pool.Config["ceph.osd.pool_name"]
-			snapName := fmt.Sprintf("snapshot_%s", od)
-			ret := cephContainerSnapshotDelete(clusterName, onDiskPoolName, project.Prefix(projectName, req.Name), storagePoolVolumeTypeNameContainer, snapName, userName)
-			if ret < 0 {
-				err = fmt.Errorf(`Failed to delete snapshot`)
-			}
-		case "zfs":
-			onDiskPoolName := backupConf.Pool.Config["zfs.pool_name"]
-			snapName := fmt.Sprintf("%s/%s", req.Name, od)
-			err = zfsSnapshotDeleteInternal(projectName, pool.Name(), snapName, onDiskPoolName)
-		}
-		if err != nil {
-			logger.Warnf(`Failed to delete snapshot`)
-		}
-	}
-
-	for _, snap := range backupConf.Snapshots {
-		switch backupConf.Pool.Driver {
-		case "btrfs":
-			snpMntPt := storagePools.GetSnapshotMountPoint(projectName, backupConf.Pool.Name, snap.Name)
-			if !shared.PathExists(snpMntPt) || !btrfsIsSubVolume(snpMntPt) {
-				if req.Force {
-					continue
-				}
-				return response.BadRequest(needForce)
-			}
-		case "dir":
-			snpMntPt := storagePools.GetSnapshotMountPoint(projectName, backupConf.Pool.Name, snap.Name)
-			if !shared.PathExists(snpMntPt) {
-				if req.Force {
-					continue
-				}
-				return response.BadRequest(needForce)
-			}
-		case "lvm":
-			ctName, csName, _ := shared.InstanceGetParentAndSnapshotName(snap.Name)
-			ctLvmName := lvmNameToLVName(fmt.Sprintf("%s/%s", project.Prefix(projectName, ctName), csName))
-			ctLvName := lvmLVName(pool.Name(), storagePoolVolumeAPIEndpointContainers, ctLvmName)
-			exists, err := lvmLVExists(ctLvName)
-			if err != nil {
-				return response.InternalError(err)
-			}
-
-			if !exists {
-				if req.Force {
-					continue
-				}
-				return response.BadRequest(needForce)
-			}
-		case "ceph":
-			clusterName := "ceph"
-			if backupConf.Pool.Config["ceph.cluster_name"] != "" {
-				clusterName = backupConf.Pool.Config["ceph.cluster_name"]
-			}
-
-			userName := "admin"
-			if backupConf.Pool.Config["ceph.user.name"] != "" {
-				userName = backupConf.Pool.Config["ceph.user.name"]
-			}
-
-			onDiskPoolName := backupConf.Pool.Config["ceph.osd.pool_name"]
-			ctName, csName, _ := shared.InstanceGetParentAndSnapshotName(snap.Name)
-			ctName = project.Prefix(projectName, ctName)
-			snapshotName := fmt.Sprintf("snapshot_%s", csName)
-
-			exists := cephRBDSnapshotExists(clusterName,
-				onDiskPoolName, ctName,
-				storagePoolVolumeTypeNameContainer,
-				snapshotName, userName)
-			if !exists {
-				if req.Force {
-					continue
-				}
-				return response.BadRequest(needForce)
-			}
-		case "zfs":
-			ctName, csName, _ := shared.InstanceGetParentAndSnapshotName(snap.Name)
-			snapshotName := fmt.Sprintf("snapshot-%s", csName)
-
-			exists := zfsFilesystemEntityExists(pool.Name(), fmt.Sprintf("containers/%s@%s", project.Prefix(projectName, ctName), snapshotName))
-			if !exists {
-				if req.Force {
-					continue
-				}
-				return response.BadRequest(needForce)
-			}
+	// Check snapshots are consistent, and if not, if req.Force is true, then delete snapshots that do not exist in backup.yaml.
+	existingSnapshots, err := pool.CheckInstanceBackupFileSnapshots(backupConf, projectName, req.Force, nil)
+	if err != nil {
+		if errors.Cause(err) == storagePools.ErrBackupSnapshotsMismatch {
+			return response.InternalError(fmt.Errorf(`%s. Set "force" to discard non-existing snapshots`, err))
 		}
 
-		existingSnapshots = append(existingSnapshots, snap)
+		return response.InternalError(errors.Wrap(err, "Checking snapshots"))
 	}
 
 	// Check if a storage volume entry for the container already exists.
@@ -858,8 +625,7 @@ func internalImport(d *Daemon, r *http.Request) response.Response {
 	if backupConf.Container.Config["security.privileged"] == "" {
 		isPrivileged = true
 	}
-	err = storagePools.CreateContainerMountpoint(containerMntPoint, containerPath,
-		isPrivileged)
+	err = storagePools.CreateContainerMountpoint(containerMntPoint, containerPath, isPrivileged)
 	if err != nil {
 		return response.InternalError(err)
 	}
@@ -953,8 +719,7 @@ func internalImport(d *Daemon, r *http.Request) response.Response {
 		}
 
 		// Recreate missing mountpoints and symlinks.
-		snapshotMountPoint := storagePools.GetSnapshotMountPoint(projectName, backupConf.Pool.Name,
-			snap.Name)
+		snapshotMountPoint := storagePools.GetSnapshotMountPoint(projectName, backupConf.Pool.Name, snap.Name)
 		sourceName, _, _ := shared.InstanceGetParentAndSnapshotName(snap.Name)
 		sourceName = project.Prefix(projectName, sourceName)
 		snapshotMntPointSymlinkTarget := shared.VarPath("storage-pools", backupConf.Pool.Name, "containers-snapshots", sourceName)

From e7e669ae5a2ee4df8fb473dbce858b5cfc65f1e1 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Fri, 21 Feb 2020 12:55:36 +0000
Subject: [PATCH 11/36] lxd/storage/pool/interface: Adds
 CheckInstanceBackupFileSnapshots

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/storage/pool_interface.go | 1 +
 1 file changed, 1 insertion(+)

diff --git a/lxd/storage/pool_interface.go b/lxd/storage/pool_interface.go
index 40a995ed2b..1808b6a037 100644
--- a/lxd/storage/pool_interface.go
+++ b/lxd/storage/pool_interface.go
@@ -37,6 +37,7 @@ type Pool interface {
 	DeleteInstance(inst instance.Instance, op *operations.Operation) error
 	UpdateInstance(inst instance.Instance, newDesc string, newConfig map[string]string, op *operations.Operation) error
 	UpdateInstanceBackupFile(inst instance.Instance, op *operations.Operation) error
+	CheckInstanceBackupFileSnapshots(backupConf *backup.InstanceConfig, projectName string, deleteMissing bool, op *operations.Operation) ([]*api.InstanceSnapshot, error)
 
 	MigrateInstance(inst instance.Instance, conn io.ReadWriteCloser, args *migration.VolumeSourceArgs, op *operations.Operation) error
 	RefreshInstance(inst instance.Instance, src instance.Instance, srcSnapshots []instance.Instance, op *operations.Operation) error

From 1f28f3872cc0c62f8ab69c9ad5f639765825e8e3 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Fri, 21 Feb 2020 12:55:59 +0000
Subject: [PATCH 12/36] lxd/storage/errors: Adds ErrBackupSnapshotsMismatch
 error

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/storage/errors.go | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/lxd/storage/errors.go b/lxd/storage/errors.go
index 64df5f1579..b328c28f07 100644
--- a/lxd/storage/errors.go
+++ b/lxd/storage/errors.go
@@ -12,3 +12,6 @@ var ErrNotImplemented = fmt.Errorf("Not implemented")
 
 // ErrRunningQuotaResizeNotSupported is the "Running quota resize not supported" error.
 var ErrRunningQuotaResizeNotSupported = fmt.Errorf("Running quota resize not supported")
+
+// ErrBackupSnapshotsMismatch is the "Backup snapshots mismatch" error.
+var ErrBackupSnapshotsMismatch = fmt.Errorf("Backup snapshots mismatch")

From 9194157ffd02b4da105f79cd79ab37d2c4a18239 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Fri, 21 Feb 2020 12:56:15 +0000
Subject: [PATCH 13/36] lxd/storage/backend/mock: Adds
 CheckInstanceBackupFileSnapshots

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/storage/backend_mock.go | 4 ++++
 1 file changed, 4 insertions(+)

diff --git a/lxd/storage/backend_mock.go b/lxd/storage/backend_mock.go
index 90f15951ff..c54563ae4f 100644
--- a/lxd/storage/backend_mock.go
+++ b/lxd/storage/backend_mock.go
@@ -100,6 +100,10 @@ func (b *mockBackend) UpdateInstanceBackupFile(inst instance.Instance, op *opera
 	return nil
 }
 
+func (b *mockBackend) CheckInstanceBackupFileSnapshots(backupConf *backup.InstanceConfig, projectName string, deleteMissing bool, op *operations.Operation) ([]*api.InstanceSnapshot, error) {
+	return nil, nil
+}
+
 func (b *mockBackend) MigrateInstance(inst instance.Instance, conn io.ReadWriteCloser, args *migration.VolumeSourceArgs, op *operations.Operation) error {
 	return nil
 }

From 852b9be190f6505bc893a554255fb90d1a1c3980 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Fri, 21 Feb 2020 12:56:35 +0000
Subject: [PATCH 14/36] lxd/storage/backend/lxd: Adds
 CheckInstanceBackupFileSnapshots implementation

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/storage/backend_lxd.go | 97 ++++++++++++++++++++++++++++++++++++++
 1 file changed, 97 insertions(+)

diff --git a/lxd/storage/backend_lxd.go b/lxd/storage/backend_lxd.go
index b42036f22d..4925ad1119 100644
--- a/lxd/storage/backend_lxd.go
+++ b/lxd/storage/backend_lxd.go
@@ -2935,3 +2935,100 @@ func (b *lxdBackend) UpdateInstanceBackupFile(inst instance.Instance, op *operat
 
 	return err
 }
+
+// CheckInstanceBackupFileSnapshots compares the snapshots on the storage device to those defined in the backup
+// config supplied and returns an error if they do not match (if deleteMissing argument is false).
+// If deleteMissing argument is true, then any snapshots that exist on the storage device but not in the backup
+// config are removed from the storage device, and any snapshots that exist in the backup config but do not exist
+// on the storage device are ignored. The remaining set of snapshots that exist on both the storage device and the
+// backup config are returned. They set can be used to re-create the snapshot database entries when importing.
+func (b *lxdBackend) CheckInstanceBackupFileSnapshots(backupConf *backup.InstanceConfig, projectName string, deleteMissing bool, op *operations.Operation) ([]*api.InstanceSnapshot, error) {
+	logger := logging.AddContext(b.logger, log.Ctx{"project": projectName, "instance": backupConf.Container.Name, "deleteMissing": deleteMissing})
+	logger.Debug("CheckInstanceBackupFileSnapshots started")
+	defer logger.Debug("CheckInstanceBackupFileSnapshots finished")
+
+	instType, err := instancetype.New(string(backupConf.Container.Type))
+	if err != nil {
+		return nil, err
+	}
+
+	volType, err := InstanceTypeToVolumeType(instType)
+	if err != nil {
+		return nil, err
+	}
+
+	// Get the volume name on storage.
+	volStorageName := project.Prefix(projectName, backupConf.Container.Name)
+
+	// We don't need to use the volume's config for mounting so set to nil.
+	vol := b.newVolume(volType, drivers.ContentTypeFS, volStorageName, nil)
+
+	// Get a list of snapshots that exist on storage device.
+	driverSnapshots, err := vol.Snapshots(op)
+	if err != nil {
+		return nil, err
+	}
+
+	if len(backupConf.Snapshots) != len(driverSnapshots) {
+		if !deleteMissing {
+			return nil, errors.Wrap(ErrBackupSnapshotsMismatch, "Snapshot count in backup config and storage device are different")
+		}
+	}
+
+	// Delete snapshots that do not exist in backup config.
+	for _, driverSnapVol := range driverSnapshots {
+		_, driverSnapOnly, _ := shared.InstanceGetParentAndSnapshotName(driverSnapVol.Name())
+
+		inBackupFile := false
+		for _, backupFileSnap := range backupConf.Snapshots {
+			_, backupFileSnapOnly, _ := shared.InstanceGetParentAndSnapshotName(backupFileSnap.Name)
+			if driverSnapOnly == backupFileSnapOnly {
+				inBackupFile = true
+				break
+			}
+		}
+
+		if inBackupFile {
+			continue
+		}
+
+		if !deleteMissing {
+			return nil, errors.Wrapf(ErrBackupSnapshotsMismatch, "Snapshot %q exists on storage device but not in backup config", driverSnapOnly)
+		}
+
+		err = b.driver.DeleteVolumeSnapshot(driverSnapVol, op)
+		if err != nil {
+			return nil, errors.Wrapf(err, "Failed to delete snapshot %q", driverSnapOnly)
+		}
+
+		logger.Debug("Deleted snapshot as not present in backup config", log.Ctx{"snapshot": driverSnapOnly})
+	}
+
+	// Check the snapshots in backup config exist on storage device.
+	existingSnapshots := []*api.InstanceSnapshot{}
+	for _, backupFileSnap := range backupConf.Snapshots {
+		_, backupFileSnapOnly, _ := shared.InstanceGetParentAndSnapshotName(backupFileSnap.Name)
+
+		onStorageDevice := false
+		for _, driverSnapVol := range driverSnapshots {
+			_, driverSnapOnly, _ := shared.InstanceGetParentAndSnapshotName(driverSnapVol.Name())
+			if driverSnapOnly == backupFileSnapOnly {
+				onStorageDevice = true
+				break
+			}
+		}
+
+		if !onStorageDevice {
+			if !deleteMissing {
+				return nil, errors.Wrapf(ErrBackupSnapshotsMismatch, "Snapshot %q exists in backup config but not on storage device", backupFileSnapOnly)
+			}
+
+			logger.Debug("Skipped snapshot in backup config as not present on storage device", log.Ctx{"snapshot": backupFileSnap})
+			continue // Skip snapshots missing on storage device.
+		}
+
+		existingSnapshots = append(existingSnapshots, backupFileSnap)
+	}
+
+	return existingSnapshots, nil
+}

From 7edaa197f51400a45f87c607250dfd554d7fb6a8 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Fri, 21 Feb 2020 13:34:29 +0000
Subject: [PATCH 15/36] lxd/patches/utils: Removes unused functions

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/patches_utils.go | 308 -------------------------------------------
 1 file changed, 308 deletions(-)

diff --git a/lxd/patches_utils.go b/lxd/patches_utils.go
index 959ebb87f4..c58f4b2007 100644
--- a/lxd/patches_utils.go
+++ b/lxd/patches_utils.go
@@ -11,49 +11,14 @@ import (
 	"strings"
 	"syscall"
 
-	"github.com/pborman/uuid"
-	"github.com/pkg/errors"
 	"golang.org/x/sys/unix"
 
 	"github.com/lxc/lxd/lxd/project"
 	"github.com/lxc/lxd/lxd/state"
-	driver "github.com/lxc/lxd/lxd/storage"
-	storageDrivers "github.com/lxc/lxd/lxd/storage/drivers"
 	"github.com/lxc/lxd/shared"
 	"github.com/lxc/lxd/shared/units"
 )
 
-// For 'dir' storage backend.
-func dirSnapshotDeleteInternal(projectName, poolName string, snapshotName string) error {
-	snapshotContainerMntPoint := driver.GetSnapshotMountPoint(projectName, poolName, snapshotName)
-	if shared.PathExists(snapshotContainerMntPoint) {
-		err := os.RemoveAll(snapshotContainerMntPoint)
-		if err != nil {
-			return err
-		}
-	}
-
-	sourceContainerName, _, _ := shared.InstanceGetParentAndSnapshotName(snapshotName)
-	snapshotContainerPath := driver.GetSnapshotMountPoint(projectName, poolName, sourceContainerName)
-	empty, _ := shared.PathIsEmpty(snapshotContainerPath)
-	if empty == true {
-		err := os.Remove(snapshotContainerPath)
-		if err != nil {
-			return err
-		}
-
-		snapshotSymlink := shared.VarPath("snapshots", project.Prefix(projectName, sourceContainerName))
-		if shared.PathExists(snapshotSymlink) {
-			err := os.Remove(snapshotSymlink)
-			if err != nil {
-				return err
-			}
-		}
-	}
-
-	return nil
-}
-
 // For 'btrfs' storage backend.
 func btrfsSubVolumeCreate(subvol string) error {
 	parentDestPath := filepath.Dir(subvol)
@@ -76,35 +41,6 @@ func btrfsSubVolumeCreate(subvol string) error {
 	return nil
 }
 
-func btrfsSnapshotDeleteInternal(projectName, poolName string, snapshotName string) error {
-	snapshotSubvolumeName := driver.GetSnapshotMountPoint(projectName, poolName, snapshotName)
-	// Also delete any leftover .ro snapshot.
-	roSnapshotSubvolumeName := fmt.Sprintf("%s.ro", snapshotSubvolumeName)
-	names := []string{snapshotSubvolumeName, roSnapshotSubvolumeName}
-	for _, name := range names {
-		if shared.PathExists(name) && btrfsIsSubVolume(name) {
-			err := btrfsSubVolumesDelete(name)
-			if err != nil {
-				return err
-			}
-		}
-	}
-
-	sourceSnapshotMntPoint := shared.VarPath("snapshots", project.Prefix(projectName, snapshotName))
-	os.Remove(sourceSnapshotMntPoint)
-	os.Remove(snapshotSubvolumeName)
-
-	sourceName, _, _ := shared.InstanceGetParentAndSnapshotName(snapshotName)
-	snapshotSubvolumePath := driver.GetSnapshotMountPoint(projectName, poolName, sourceName)
-	os.Remove(snapshotSubvolumePath)
-	if !shared.PathExists(snapshotSubvolumePath) {
-		snapshotMntPointSymlink := shared.VarPath("snapshots", project.Prefix(projectName, sourceName))
-		os.Remove(snapshotMntPointSymlink)
-	}
-
-	return nil
-}
-
 func btrfsSubVolumeQGroup(subvol string) (string, error) {
 	output, err := shared.RunCommand(
 		"btrfs",
@@ -288,187 +224,6 @@ func btrfsSubVolumesGet(path string) ([]string, error) {
 	return result, nil
 }
 
-// For 'zfs' storage backend.
-func zfsPoolListSnapshots(pool string, path string) ([]string, error) {
-	path = strings.TrimRight(path, "/")
-	fullPath := pool
-	if path != "" {
-		fullPath = fmt.Sprintf("%s/%s", pool, path)
-	}
-
-	output, err := shared.RunCommand("zfs", "list", "-t", "snapshot", "-o", "name", "-H", "-d", "1", "-s", "creation", "-r", fullPath)
-	if err != nil {
-		return []string{}, errors.Wrap(err, "Failed to list ZFS snapshots")
-	}
-
-	children := []string{}
-	for _, entry := range strings.Split(output, "\n") {
-		if entry == "" {
-			continue
-		}
-
-		if entry == fullPath {
-			continue
-		}
-
-		children = append(children, strings.SplitN(entry, "@", 2)[1])
-	}
-
-	return children, nil
-}
-
-func zfsSnapshotDeleteInternal(projectName, poolName string, ctName string, onDiskPoolName string) error {
-	sourceContainerName, sourceContainerSnapOnlyName, _ := shared.InstanceGetParentAndSnapshotName(ctName)
-	snapName := fmt.Sprintf("snapshot-%s", sourceContainerSnapOnlyName)
-
-	if zfsFilesystemEntityExists(onDiskPoolName,
-		fmt.Sprintf("containers/%s@%s",
-			project.Prefix(projectName, sourceContainerName), snapName)) {
-		removable, err := zfsPoolVolumeSnapshotRemovable(onDiskPoolName,
-			fmt.Sprintf("containers/%s",
-				project.Prefix(projectName, sourceContainerName)),
-			snapName)
-		if err != nil {
-			return err
-		}
-
-		if removable {
-			err = zfsPoolVolumeSnapshotDestroy(onDiskPoolName,
-				fmt.Sprintf("containers/%s",
-					project.Prefix(projectName, sourceContainerName)),
-				snapName)
-		} else {
-			err = zfsPoolVolumeSnapshotRename(onDiskPoolName,
-				fmt.Sprintf("containers/%s",
-					project.Prefix(projectName, sourceContainerName)),
-				snapName,
-				fmt.Sprintf("copy-%s", uuid.NewRandom().String()))
-		}
-		if err != nil {
-			return err
-		}
-	}
-
-	// Delete the snapshot on its storage pool:
-	// ${POOL}/snapshots/<snapshot_name>
-	snapshotContainerMntPoint := driver.GetSnapshotMountPoint(projectName, poolName, ctName)
-	if shared.PathExists(snapshotContainerMntPoint) {
-		err := os.RemoveAll(snapshotContainerMntPoint)
-		if err != nil {
-			return err
-		}
-	}
-
-	// Check if we can remove the snapshot symlink:
-	// ${LXD_DIR}/snapshots/<container_name> to ${POOL}/snapshots/<container_name>
-	// by checking if the directory is empty.
-	snapshotContainerPath := driver.GetSnapshotMountPoint(projectName, poolName, sourceContainerName)
-	empty, _ := shared.PathIsEmpty(snapshotContainerPath)
-	if empty == true {
-		// Remove the snapshot directory for the container:
-		// ${POOL}/snapshots/<source_container_name>
-		err := os.Remove(snapshotContainerPath)
-		if err != nil {
-			return err
-		}
-
-		snapshotSymlink := shared.VarPath("snapshots", project.Prefix(projectName, sourceContainerName))
-		if shared.PathExists(snapshotSymlink) {
-			err := os.Remove(snapshotSymlink)
-			if err != nil {
-				return err
-			}
-		}
-	}
-
-	// Legacy
-	snapPath := shared.VarPath(fmt.Sprintf("snapshots/%s/%s.zfs", project.Prefix(projectName, sourceContainerName), sourceContainerSnapOnlyName))
-	if shared.PathExists(snapPath) {
-		err := os.Remove(snapPath)
-		if err != nil {
-			return err
-		}
-	}
-
-	// Legacy
-	parent := shared.VarPath(fmt.Sprintf("snapshots/%s", project.Prefix(projectName, sourceContainerName)))
-	if ok, _ := shared.PathIsEmpty(parent); ok {
-		err := os.Remove(parent)
-		if err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func zfsFilesystemEntityExists(pool string, path string) bool {
-	vdev := pool
-	if path != "" {
-		vdev = fmt.Sprintf("%s/%s", pool, path)
-	}
-
-	output, err := shared.RunCommand("zfs", "get", "-H", "-o", "name", "type", vdev)
-	if err != nil {
-		return false
-	}
-
-	detectedName := strings.TrimSpace(output)
-	return detectedName == vdev
-}
-
-func zfsPoolVolumeSnapshotRemovable(pool string, path string, name string) (bool, error) {
-	var snap string
-	if name == "" {
-		snap = path
-	} else {
-		snap = fmt.Sprintf("%s@%s", path, name)
-	}
-
-	clones, err := zfsFilesystemEntityPropertyGet(pool, snap, "clones")
-	if err != nil {
-		return false, err
-	}
-
-	if clones == "-" || clones == "" {
-		return true, nil
-	}
-
-	return false, nil
-}
-
-func zfsFilesystemEntityPropertyGet(pool string, path string, key string) (string, error) {
-	entity := pool
-	if path != "" {
-		entity = fmt.Sprintf("%s/%s", pool, path)
-	}
-
-	output, err := shared.RunCommand("zfs", "get", "-H", "-p", "-o", "value", key, entity)
-	if err != nil {
-		return "", errors.Wrap(err, "Failed to get ZFS config")
-	}
-
-	return strings.TrimRight(output, "\n"), nil
-}
-
-func zfsPoolVolumeSnapshotDestroy(pool, path string, name string) error {
-	_, err := shared.RunCommand("zfs", "destroy", "-r", fmt.Sprintf("%s/%s@%s", pool, path, name))
-	if err != nil {
-		return errors.Wrap(err, "Failed to destroy ZFS snapshot")
-	}
-
-	return nil
-}
-
-func zfsPoolVolumeSnapshotRename(pool string, path string, oldName string, newName string) error {
-	_, err := shared.RunCommand("zfs", "rename", "-r", fmt.Sprintf("%s/%s@%s", pool, path, oldName), fmt.Sprintf("%s/%s@%s", pool, path, newName))
-	if err != nil {
-		return errors.Wrap(err, "Failed to rename ZFS snapshot")
-	}
-
-	return nil
-}
-
 // For 'lvm' storage backend.
 func lvmLVRename(vgName string, oldName string, newName string) error {
 	_, err := shared.TryRunCommand("lvrename", vgName, oldName, newName)
@@ -540,66 +295,3 @@ func lvmGetLVSize(lvPath string) (string, error) {
 
 	return detectedSize, nil
 }
-
-func lvmLVName(lvmPool string, volumeType string, lvmVolume string) string {
-	if volumeType == "" {
-		return fmt.Sprintf("%s/%s", lvmPool, lvmVolume)
-	}
-
-	return fmt.Sprintf("%s/%s_%s", lvmPool, volumeType, lvmVolume)
-}
-
-func lvmContainerDeleteInternal(projectName, poolName string, ctName string, isSnapshot bool, vgName string, ctPath string) error {
-	containerMntPoint := ""
-	containerLvmName := lvmNameToLVName(ctName)
-	if isSnapshot {
-		containerMntPoint = driver.GetSnapshotMountPoint(projectName, poolName, ctName)
-	} else {
-		containerMntPoint = driver.GetContainerMountPoint(projectName, poolName, ctName)
-	}
-
-	if shared.IsMountPoint(containerMntPoint) {
-		err := storageDrivers.TryUnmount(containerMntPoint, 0)
-		if err != nil {
-			return fmt.Errorf(`Failed to unmount container path `+
-				`"%s": %s`, containerMntPoint, err)
-		}
-	}
-
-	containerLvmDevPath := lvmDevPath(projectName, vgName,
-		storagePoolVolumeAPIEndpointContainers, containerLvmName)
-
-	lvExists, _ := lvmLVExists(containerLvmDevPath)
-	if lvExists {
-		err := lvmRemoveLV(projectName, vgName, storagePoolVolumeAPIEndpointContainers, containerLvmName)
-		if err != nil {
-			return err
-		}
-	}
-
-	var err error
-	if isSnapshot {
-		sourceName, _, _ := shared.InstanceGetParentAndSnapshotName(ctName)
-		snapshotMntPointSymlinkTarget := shared.VarPath("storage-pools", poolName, "containers-snapshots", project.Prefix(projectName, sourceName))
-		snapshotMntPointSymlink := shared.VarPath("snapshots", project.Prefix(projectName, sourceName))
-		err = deleteSnapshotMountpoint(containerMntPoint, snapshotMntPointSymlinkTarget, snapshotMntPointSymlink)
-	} else {
-		err = deleteContainerMountpoint(containerMntPoint, ctPath, "lvm")
-	}
-	if err != nil {
-		return err
-	}
-
-	return nil
-}
-
-func lvmRemoveLV(project, vgName string, volumeType string, lvName string) error {
-	lvmVolumePath := lvmDevPath(project, vgName, volumeType, lvName)
-
-	_, err := shared.TryRunCommand("lvremove", "-f", lvmVolumePath)
-	if err != nil {
-		return fmt.Errorf("Could not remove LV named %s: %v", lvName, err)
-	}
-
-	return nil
-}

From b3863f1bc64d401b1e4fd30a02f090f78ee9c10a Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Fri, 21 Feb 2020 13:42:48 +0000
Subject: [PATCH 16/36] lxd/api/internal: Adds sanity check for instance name
 in internalImport

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/api_internal.go | 4 ++++
 1 file changed, 4 insertions(+)

diff --git a/lxd/api_internal.go b/lxd/api_internal.go
index 8eedb78c74..9d1dab7c1a 100644
--- a/lxd/api_internal.go
+++ b/lxd/api_internal.go
@@ -459,6 +459,10 @@ func internalImport(d *Daemon, r *http.Request) response.Response {
 		return response.SmartError(err)
 	}
 
+	if req.Name != backupConf.Container.Name {
+		return response.InternalError(fmt.Errorf("Instance name in request %q doesn't match instance name in backup config %q", req.Name, backupConf.Container.Name))
+	}
+
 	// Update snapshot names to include container name (if needed).
 	for i, snap := range backupConf.Snapshots {
 		if !strings.Contains(snap.Name, "/") {

From d782daf202cda0de0a88886e21aa056775024139 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Mon, 24 Feb 2020 13:31:40 +0000
Subject: [PATCH 17/36] lxd/storage/drivers/driver/lvm/volumes: Updates
 VolumeSnapshots to use lvs for snapshot list

Rather than listing the directories under /var/lib/lxd/storage/<pool>/<volume type>s-snapshots for snapshot lists, switches to using the lvs command to output logical volumes that match the prefix used for volume snapshots.

This is so if the volume has snapshots on the LVM storage layer, but the snapshot directory has been removed, they are still returned in the list.

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/storage/drivers/driver_lvm_volumes.go | 50 +++++++++++++++++++++--
 1 file changed, 47 insertions(+), 3 deletions(-)

diff --git a/lxd/storage/drivers/driver_lvm_volumes.go b/lxd/storage/drivers/driver_lvm_volumes.go
index d55449964e..016d163e73 100644
--- a/lxd/storage/drivers/driver_lvm_volumes.go
+++ b/lxd/storage/drivers/driver_lvm_volumes.go
@@ -1,10 +1,14 @@
 package drivers
 
 import (
+	"bufio"
 	"fmt"
 	"io"
+	"io/ioutil"
 	"math"
 	"os"
+	"os/exec"
+	"strings"
 
 	"github.com/pkg/errors"
 	"golang.org/x/sys/unix"
@@ -728,10 +732,50 @@ func (d *lvm) UnmountVolumeSnapshot(snapVol Volume, op *operations.Operation) (b
 
 // VolumeSnapshots returns a list of snapshots for the volume.
 func (d *lvm) VolumeSnapshots(vol Volume, op *operations.Operation) ([]string, error) {
-	// We use the vfsVolumeSnapshots rather than inspecting the logical volumes themselves because the origin
+	fullVolName := d.lvmFullVolumeName(vol.volType, vol.contentType, vol.name)
+
+	// We use the volume list rather than inspecting the logical volumes themselves because the origin
 	// property of an LVM snapshot can be removed/changed when restoring snapshots, such that they are no
-	// marked as origin of the parent volume.
-	return genericVFSVolumeSnapshots(d, vol, op)
+	// marked as origin of the parent volume. Instead we use prefix matching on the volume names to find the
+	// snapshot volumes.
+	cmd := exec.Command("lvs", "--noheadings", "-o", "lv_name", d.config["lvm.vg_name"])
+	stdout, err := cmd.StdoutPipe()
+	if err != nil {
+		return nil, err
+	}
+
+	stderr, err := cmd.StderrPipe()
+	if err != nil {
+		return nil, err
+	}
+
+	err = cmd.Start()
+	if err != nil {
+		return nil, err
+	}
+
+	snapshots := []string{}
+	scanner := bufio.NewScanner(stdout)
+	prefix := fmt.Sprintf("%s-", fullVolName)
+	for scanner.Scan() {
+		snapLine := strings.TrimSpace(scanner.Text())
+		if strings.HasPrefix(snapLine, prefix) {
+			// Remove volume name prefix (including snapshot delimiter) and unescape snapshot name.
+			snapshots = append(snapshots, strings.Replace(strings.TrimPrefix(snapLine, prefix), "--", "-", -1))
+		}
+	}
+
+	errMsg, err := ioutil.ReadAll(stderr)
+	if err != nil {
+		return nil, err
+	}
+
+	err = cmd.Wait()
+	if err != nil {
+		return nil, errors.Wrapf(err, "Failed to get snapshot list for volume %q: %v", fullVolName, strings.TrimSpace(string(errMsg)))
+	}
+
+	return snapshots, nil
 }
 
 // RestoreVolume restores a volume from a snapshot.

From 268df91fdc07d511aa2586c315421edb2bd9a70b Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Fri, 21 Feb 2020 16:13:39 +0000
Subject: [PATCH 18/36] lxd/backup: Removes old storage loader

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/backup.go | 46 +++++++++++-----------------------------------
 1 file changed, 11 insertions(+), 35 deletions(-)

diff --git a/lxd/backup.go b/lxd/backup.go
index 64c5e5b022..6583addc43 100644
--- a/lxd/backup.go
+++ b/lxd/backup.go
@@ -20,7 +20,6 @@ import (
 	"github.com/lxc/lxd/lxd/project"
 	"github.com/lxc/lxd/lxd/state"
 	storagePools "github.com/lxc/lxd/lxd/storage"
-	storageDrivers "github.com/lxc/lxd/lxd/storage/drivers"
 	"github.com/lxc/lxd/lxd/task"
 	"github.com/lxc/lxd/shared"
 	log "github.com/lxc/lxd/shared/log15"
@@ -65,31 +64,13 @@ func backupCreate(s *state.State, args db.InstanceBackupArgs, sourceInst instanc
 
 	// Check if we can load new storage layer for pool driver type.
 	pool, err := storagePools.GetPoolByInstance(s, sourceInst)
-	if err != storageDrivers.ErrUnknownDriver && err != storageDrivers.ErrNotImplemented {
-		if err != nil {
-			return errors.Wrap(err, "Load instance storage pool")
-		}
-
-		err = pool.BackupInstance(sourceInst, tmpPath, b.OptimizedStorage(), !b.InstanceOnly(), nil)
-		if err != nil {
-			return errors.Wrap(err, "Backup create")
-		}
-	} else if sourceInst.Type() == instancetype.Container {
-		ourStart, err := sourceInst.StorageStart()
-		if err != nil {
-			return err
-		}
-		if ourStart {
-			defer sourceInst.StorageStop()
-		}
+	if err != nil {
+		return errors.Wrap(err, "Load instance storage pool")
+	}
 
-		ct := sourceInst.(*containerLXC)
-		err = ct.Storage().ContainerBackupCreate(tmpPath, *b, sourceInst)
-		if err != nil {
-			return errors.Wrap(err, "Backup create")
-		}
-	} else {
-		return fmt.Errorf("Instance type not supported")
+	err = pool.BackupInstance(sourceInst, tmpPath, b.OptimizedStorage(), !b.InstanceOnly(), nil)
+	if err != nil {
+		return errors.Wrap(err, "Backup create")
 	}
 
 	// Pack the backup.
@@ -121,18 +102,13 @@ func backupCreateTarball(s *state.State, path string, b backup.Backup, c instanc
 	}
 
 	pool, err := storagePools.GetPoolByInstance(s, c)
-	if err != storageDrivers.ErrUnknownDriver && err != storageDrivers.ErrNotImplemented && err != db.ErrNoSuchObject {
-		if err != nil {
-			return err
-		}
-
-		info := pool.Driver().Info()
-		indexFile.Backend = info.Name
-	} else {
-		ct := c.(*containerLXC)
-		indexFile.Backend = ct.Storage().GetStorageTypeName()
+	if err != nil {
+		return err
 	}
 
+	info := pool.Driver().Info()
+	indexFile.Backend = info.Name
+
 	if !b.InstanceOnly() {
 		snaps, err := c.Snapshots()
 		if err != nil {

From b39faa7d6619293771edd851b145dc73435f8364 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Fri, 21 Feb 2020 16:25:50 +0000
Subject: [PATCH 19/36] lxd/container/lxc: Removes old storage loader

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/container_lxc.go | 315 +++++++++----------------------------------
 1 file changed, 64 insertions(+), 251 deletions(-)

diff --git a/lxd/container_lxc.go b/lxd/container_lxc.go
index f44b368301..26f1253331 100644
--- a/lxd/container_lxc.go
+++ b/lxd/container_lxc.go
@@ -249,26 +249,17 @@ func containerLXCCreate(s *state.State, args db.InstanceArgs) (instance.Instance
 		return nil, err
 	}
 
-	// Initialize the container storage
-	// Check if we can load new storage layer for pool driver type.
+	// Initialize the container storage.
 	pool, err := storagePools.GetPoolByInstance(c.state, c)
-	if err != storageDrivers.ErrUnknownDriver && err != storageDrivers.ErrNotImplemented {
-		if err != nil {
-			return nil, err
-		}
-		c.storagePool = pool
-	} else {
-		// Fallback to legacy storage layer.
-		cStorage, err := storagePoolVolumeContainerCreateInit(s, args.Project, storagePool, args.Name)
-		if err != nil {
-			c.Delete()
-			s.Cluster.StoragePoolVolumeDelete(args.Project, args.Name, storagePoolVolumeTypeContainer, poolID)
-			logger.Error("Failed to initialize container storage", ctxMap)
-			return nil, err
-		}
-		c.storage = cStorage
+	if err != nil {
+		c.Delete()
+		s.Cluster.StoragePoolVolumeDelete(args.Project, args.Name, storagePoolVolumeTypeContainer, poolID)
+		logger.Error("Failed to initialize container storage", ctxMap)
+		return nil, err
 	}
 
+	c.storagePool = pool
+
 	// Setup initial idmap config
 	var idmap *idmap.IdmapSet
 	base := int64(0)
@@ -3220,34 +3211,18 @@ func (c *containerLXC) Restore(sourceContainer instance.Instance, stateful bool)
 	var ctxMap log.Ctx
 
 	// Initialize storage interface for the container and mount the rootfs for criu state check.
-	// Check if we can load new storage layer for pool driver type.
 	pool, err := storagePools.GetPoolByInstance(c.state, c)
-	if err != storageDrivers.ErrUnknownDriver && err != storageDrivers.ErrNotImplemented {
-		if err != nil {
-			return err
-		}
-
-		// Ensure that storage is mounted for state path checks and for backup.yaml updates.
-		ourStart, err := pool.MountInstance(c, nil)
-		if err != nil {
-			return err
-		}
-		if ourStart {
-			defer pool.UnmountInstance(c, nil)
-		}
-	} else {
-		err = c.initStorage()
-		if err != nil {
-			return err
-		}
+	if err != nil {
+		return err
+	}
 
-		ourStart, err := c.mount()
-		if err != nil {
-			return err
-		}
-		if ourStart {
-			defer c.unmount()
-		}
+	// Ensure that storage is mounted for state path checks and for backup.yaml updates.
+	ourStart, err := pool.MountInstance(c, nil)
+	if err != nil {
+		return err
+	}
+	if ourStart {
+		defer pool.UnmountInstance(c, nil)
 	}
 
 	// Check for CRIU if necessary, before doing a bunch of filesystem manipulations.
@@ -3454,13 +3429,10 @@ func (c *containerLXC) Delete() error {
 		return err
 	}
 
-	// Check if we can load new storage layer for pool driver type.
 	pool, err := storagePools.GetPoolByInstance(c.state, c)
-	if err != storageDrivers.ErrUnknownDriver && err != storageDrivers.ErrNotImplemented && err != db.ErrNoSuchObject {
-		if err != nil {
-			return err
-		}
-
+	if err != nil && err != db.ErrNoSuchObject {
+		return err
+	} else if err != db.ErrNoSuchObject {
 		// Check if we're dealing with "lxd import".
 		// "lxd import" is used for disaster recovery, where you already have a container
 		// and snapshots on disk but no DB entry. As such if something has gone wrong during
@@ -3498,66 +3470,6 @@ func (c *containerLXC) Delete() error {
 				}
 			}
 		}
-	} else if err != db.ErrNoSuchObject {
-		// Attempt to initialize storage interface for the container.
-		err := c.initStorage()
-		if err != nil {
-			logger.Warnf("Failed to init storage: %v", err)
-		}
-
-		// Get the name and ID of the storage pool the container is attached to. This
-		// reverse-engineering works because container names are globally unique.
-		poolID, poolName, _ := c.storage.GetContainerPoolInfo()
-
-		// Check if we're dealing with "lxd import".
-		// "lxd import" is used for disaster recovery, where you already have a container
-		// and snapshots on disk but no DB entry. As such if something has gone wrong during
-		// the creation of the instance and we are now being asked to delete the instance,
-		// we should not remove the storage volumes themselves as this would cause data loss.
-		isImport := false
-		cName, _, _ := shared.InstanceGetParentAndSnapshotName(c.Name())
-		importingFilePath := storagePools.InstanceImportingFilePath(c.Type(), poolName, c.Project(), cName)
-		if shared.PathExists(importingFilePath) {
-			isImport = true
-		}
-
-		if c.IsSnapshot() {
-			// Remove the snapshot.
-			if c.storage != nil && !isImport {
-				err := c.storage.ContainerSnapshotDelete(c)
-				if err != nil {
-					logger.Warn("Failed to delete snapshot", log.Ctx{"name": c.Name(), "err": err})
-					return err
-				}
-			}
-		} else {
-			// Remove all snapshots.
-			err := instance.DeleteSnapshots(c.state, c.Project(), c.Name())
-			if err != nil {
-				logger.Warn("Failed to delete snapshots", log.Ctx{"name": c.Name(), "err": err})
-				return err
-			}
-
-			// Delete the container from disk.
-			if c.storage != nil && !isImport {
-				_, poolName, _ := c.storage.GetContainerPoolInfo()
-				containerMountPoint := storagePools.GetContainerMountPoint(c.Project(), poolName, c.Name())
-				if shared.PathExists(c.Path()) ||
-					shared.PathExists(containerMountPoint) {
-					err := c.storage.ContainerDelete(c)
-					if err != nil {
-						logger.Error("Failed deleting container storage", log.Ctx{"name": c.Name(), "err": err})
-						return err
-					}
-				}
-			}
-		}
-
-		// Remove volume from storage pool.
-		err = c.state.Cluster.StoragePoolVolumeDelete(c.Project(), c.Name(), storagePoolVolumeTypeContainer, poolID)
-		if err != nil {
-			return err
-		}
 	}
 
 	// Perform other cleanup steps if not snapshot.
@@ -3640,61 +3552,22 @@ func (c *containerLXC) Rename(newName string) error {
 	// Clean things up.
 	c.cleanup()
 
-	// Check if we can load new storage layer for pool driver type.
 	pool, err := storagePools.GetPoolByInstance(c.state, c)
-	if err != storageDrivers.ErrUnknownDriver && err != storageDrivers.ErrNotImplemented {
-		if err != nil {
-			return errors.Wrap(err, "Load instance storage pool")
-		}
+	if err != nil {
+		return errors.Wrap(err, "Load instance storage pool")
+	}
 
-		if c.IsSnapshot() {
-			_, newSnapName, _ := shared.InstanceGetParentAndSnapshotName(newName)
-			err = pool.RenameInstanceSnapshot(c, newSnapName, nil)
-			if err != nil {
-				return errors.Wrap(err, "Rename instance snapshot")
-			}
-		} else {
-			err = pool.RenameInstance(c, newName, nil)
-			if err != nil {
-				return errors.Wrap(err, "Rename instance")
-			}
-		}
-	} else if c.Type() == instancetype.Container {
-		// Initialize storage interface for the container.
-		err = c.initStorage()
+	if c.IsSnapshot() {
+		_, newSnapName, _ := shared.InstanceGetParentAndSnapshotName(newName)
+		err = pool.RenameInstanceSnapshot(c, newSnapName, nil)
 		if err != nil {
-			return err
+			return errors.Wrap(err, "Rename instance snapshot")
 		}
-
-		// Rename the storage entry.
-		if c.IsSnapshot() {
-			err := c.storage.ContainerSnapshotRename(c, newName)
-			if err != nil {
-				logger.Error("Failed renaming container", ctxMap)
-				return err
-			}
-		} else {
-			err := c.storage.ContainerRename(c, newName)
-			if err != nil {
-				logger.Error("Failed renaming container", ctxMap)
-				return err
-			}
-		}
-
-		poolID, _, _ := c.storage.GetContainerPoolInfo()
-
-		// Rename storage volume for the container.
-		err = c.state.Cluster.StoragePoolVolumeRename(c.project, oldName, newName, storagePoolVolumeTypeContainer, poolID)
+	} else {
+		err = pool.RenameInstance(c, newName, nil)
 		if err != nil {
-			logger.Error("Failed renaming storage volume", ctxMap)
-			return err
+			return errors.Wrap(err, "Rename instance")
 		}
-
-		// Update the storage volume name in the storage interface.
-		sNew := c.storage.GetStoragePoolVolumeWritable()
-		c.storage.SetStoragePoolVolumeWritable(&sNew)
-	} else {
-		return fmt.Errorf("Instance type not supported")
 	}
 
 	if !c.IsSnapshot() {
@@ -4951,18 +4824,13 @@ func (c *containerLXC) Migrate(args *CriuMigrationArgs) error {
 		logger.Warn("Unknown migrate call", log.Ctx{"cmd": args.cmd})
 	}
 
-	var preservesInodes bool
 	pool, err := c.getStoragePool()
-	if err != storageDrivers.ErrUnknownDriver && err != storageDrivers.ErrNotImplemented {
-		if err != nil {
-			return err
-		}
-
-		preservesInodes = pool.Driver().Info().PreservesInodes
-	} else {
-		preservesInodes = c.storage.PreservesInodes()
+	if err != nil {
+		return err
 	}
 
+	preservesInodes := pool.Driver().Info().PreservesInodes
+
 	/* This feature was only added in 2.0.1, let's not ask for it
 	 * before then or migrations will fail.
 	 */
@@ -5816,26 +5684,18 @@ func (c *containerLXC) diskState() map[string]api.InstanceStateDisk {
 
 		var usage int64
 
-		// Check if we can load new storage layer for pool driver type.
 		pool, err := storagePools.GetPoolByInstance(c.state, c)
-		if err != storageDrivers.ErrUnknownDriver && err != storageDrivers.ErrNotImplemented {
-			if err != nil {
-				logger.Error("Error loading storage pool", log.Ctx{"project": c.Project(), "instance": c.Name(), "err": err})
-				continue
-			}
+		if err != nil {
+			logger.Error("Error loading storage pool", log.Ctx{"project": c.Project(), "instance": c.Name(), "err": err})
+			continue
+		}
 
-			usage, err = pool.GetInstanceUsage(c)
-			if err != nil {
-				if err != storageDrivers.ErrNotSupported {
-					logger.Error("Error getting disk usage", log.Ctx{"project": c.Project(), "instance": c.Name(), "err": err})
-				}
-				continue
-			}
-		} else {
-			usage, err = c.storage.ContainerGetUsage(c)
-			if err != nil {
-				continue
+		usage, err = pool.GetInstanceUsage(c)
+		if err != nil {
+			if err != storageDrivers.ErrNotSupported {
+				logger.Error("Error getting disk usage", log.Ctx{"project": c.Project(), "instance": c.Name(), "err": err})
 			}
+			continue
 		}
 
 		disk[dev.Name] = api.InstanceStateDisk{Usage: usage}
@@ -6033,17 +5893,12 @@ func (c *containerLXC) getStoragePool() (storagePools.Pool, error) {
 
 // getStorageType returns the storage type of the instance's storage pool.
 func (c *containerLXC) getStorageType() (string, error) {
-	// Check if we can load new storage layer for pool driver type.
 	pool, err := c.getStoragePool()
-	if err != storageDrivers.ErrUnknownDriver && err != storageDrivers.ErrNotImplemented {
-		if err != nil {
-			return "", err
-		}
-
-		return pool.Driver().Info().Name, nil
+	if err != nil {
+		return "", err
 	}
 
-	return storageTypeToString(c.legacyStorage().GetStorageType())
+	return pool.Driver().Info().Name, nil
 }
 
 // StorageStart mounts the instance's rootfs volume. Deprecated.
@@ -6053,23 +5908,13 @@ func (c *containerLXC) StorageStart() (bool, error) {
 
 // mount the instance's rootfs volume if needed.
 func (c *containerLXC) mount() (bool, error) {
-	// Check if we can load new storage layer for pool driver type.
 	pool, err := c.getStoragePool()
-	if err != storageDrivers.ErrUnknownDriver && err != storageDrivers.ErrNotImplemented {
-		if err != nil {
-			return false, err
-		}
-
-		if c.IsSnapshot() {
-			ourMount, err := pool.MountInstanceSnapshot(c, nil)
-			if err != nil {
-				return false, err
-			}
-
-			return ourMount, nil
-		}
+	if err != nil {
+		return false, err
+	}
 
-		ourMount, err := pool.MountInstance(c, nil)
+	if c.IsSnapshot() {
+		ourMount, err := pool.MountInstanceSnapshot(c, nil)
 		if err != nil {
 			return false, err
 		}
@@ -6077,20 +5922,12 @@ func (c *containerLXC) mount() (bool, error) {
 		return ourMount, nil
 	}
 
-	// Initialize storage interface for the container.
-	err = c.initStorage()
+	ourMount, err := pool.MountInstance(c, nil)
 	if err != nil {
 		return false, err
 	}
 
-	var isOurOperation bool
-	if c.IsSnapshot() {
-		isOurOperation, err = c.storage.ContainerSnapshotStart(c)
-	} else {
-		isOurOperation, err = c.storage.ContainerMount(c)
-	}
-
-	return isOurOperation, err
+	return ourMount, nil
 }
 
 // StorageStop unmounts the instance's rootfs volume. Deprecated.
@@ -6100,23 +5937,13 @@ func (c *containerLXC) StorageStop() (bool, error) {
 
 // unmount the instance's rootfs volume if needed.
 func (c *containerLXC) unmount() (bool, error) {
-	// Check if we can load new storage layer for pool driver type.
 	pool, err := c.getStoragePool()
-	if err != storageDrivers.ErrUnknownDriver && err != storageDrivers.ErrNotImplemented {
-		if err != nil {
-			return false, err
-		}
-
-		if c.IsSnapshot() {
-			unmounted, err := pool.UnmountInstanceSnapshot(c, nil)
-			if err != nil {
-				return false, err
-			}
-
-			return unmounted, nil
-		}
+	if err != nil {
+		return false, err
+	}
 
-		unmounted, err := pool.UnmountInstance(c, nil)
+	if c.IsSnapshot() {
+		unmounted, err := pool.UnmountInstanceSnapshot(c, nil)
 		if err != nil {
 			return false, err
 		}
@@ -6124,20 +5951,12 @@ func (c *containerLXC) unmount() (bool, error) {
 		return unmounted, nil
 	}
 
-	// Initialize legacy storage interface for the container.
-	err = c.initStorage()
+	unmounted, err := pool.UnmountInstance(c, nil)
 	if err != nil {
 		return false, err
 	}
 
-	var isOurOperation bool
-	if c.IsSnapshot() {
-		isOurOperation, err = c.storage.ContainerSnapshotStop(c)
-	} else {
-		isOurOperation, err = c.storage.ContainerUmount(c, c.Path())
-	}
-
-	return isOurOperation, err
+	return unmounted, nil
 }
 
 // Mount handling
@@ -7050,16 +6869,10 @@ func (rw *lxcCgroupReadWriter) Set(version cgroup.Backend, controller string, ke
 
 // UpdateBackupFile writes the instance's backup.yaml file to storage.
 func (c *containerLXC) UpdateBackupFile() error {
-	// Check if we can load new storage layer for pool driver type.
 	pool, err := c.getStoragePool()
-	if err != storageDrivers.ErrUnknownDriver && err != storageDrivers.ErrNotImplemented {
-		if err != nil {
-			return err
-		}
-
-		return pool.UpdateInstanceBackupFile(c, nil)
+	if err != nil {
+		return err
 	}
 
-	// Fallback to legacy backup function for old storage drivers.
-	return instance.WriteBackupFile(c.state, c)
+	return pool.UpdateInstanceBackupFile(c, nil)
 }

From 543137047cb061631d868334658f6c01c4d9e7a6 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Fri, 21 Feb 2020 16:43:12 +0000
Subject: [PATCH 20/36] lxd/storage: Removes unused
 storagePoolVolumeContainerCreateInit

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/storage.go | 4 ----
 1 file changed, 4 deletions(-)

diff --git a/lxd/storage.go b/lxd/storage.go
index 89f02e758b..45ea9fa6d5 100644
--- a/lxd/storage.go
+++ b/lxd/storage.go
@@ -479,10 +479,6 @@ func storagePoolVolumeImageInit(s *state.State, poolName string, imageFingerprin
 	return storagePoolVolumeInit(s, "default", poolName, imageFingerprint, storagePoolVolumeTypeImage)
 }
 
-func storagePoolVolumeContainerCreateInit(s *state.State, project string, poolName string, containerName string) (storage, error) {
-	return storagePoolVolumeInit(s, project, poolName, containerName, storagePoolVolumeTypeContainer)
-}
-
 func storagePoolVolumeContainerLoadInit(s *state.State, project, containerName string) (storage, error) {
 	// Get the storage pool of a given container.
 	poolName, err := s.Cluster.InstancePool(project, containerName)

From 7b2c260315746a321136992b1120462b7e07338d Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Fri, 21 Feb 2020 16:56:00 +0000
Subject: [PATCH 21/36] lxd/container: Removes old storage loader

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/container.go | 294 +++++++++--------------------------------------
 1 file changed, 53 insertions(+), 241 deletions(-)

diff --git a/lxd/container.go b/lxd/container.go
index f0d4b8d549..028efcaaa9 100644
--- a/lxd/container.go
+++ b/lxd/container.go
@@ -23,17 +23,13 @@ import (
 	"github.com/lxc/lxd/lxd/instance"
 	"github.com/lxc/lxd/lxd/instance/instancetype"
 	"github.com/lxc/lxd/lxd/operations"
-	"github.com/lxc/lxd/lxd/project"
 	"github.com/lxc/lxd/lxd/state"
 	storagePools "github.com/lxc/lxd/lxd/storage"
-	storageDrivers "github.com/lxc/lxd/lxd/storage/drivers"
 	"github.com/lxc/lxd/lxd/task"
 	"github.com/lxc/lxd/shared"
-	"github.com/lxc/lxd/shared/ioprogress"
 	log "github.com/lxc/lxd/shared/log15"
 	"github.com/lxc/lxd/shared/logger"
 	"github.com/lxc/lxd/shared/osarch"
-	"github.com/lxc/lxd/shared/units"
 )
 
 // Helper functions
@@ -55,27 +51,14 @@ func instanceCreateAsEmpty(d *Daemon, args db.InstanceArgs) (instance.Instance,
 		inst.Delete()
 	}()
 
-	// Check if we can load new storage layer for pool driver type.
 	pool, err := storagePools.GetPoolByInstance(d.State(), inst)
-	if err != storageDrivers.ErrUnknownDriver && err != storageDrivers.ErrNotImplemented {
-		if err != nil {
-			return nil, errors.Wrap(err, "Load instance storage pool")
-		}
-
-		err = pool.CreateInstance(inst, nil)
-		if err != nil {
-			return nil, errors.Wrap(err, "Create instance")
-		}
-	} else if inst.Type() == instancetype.Container {
-		ct := inst.(*containerLXC)
+	if err != nil {
+		return nil, errors.Wrap(err, "Load instance storage pool")
+	}
 
-		// Now create the empty storage.
-		err = ct.Storage().ContainerCreate(inst)
-		if err != nil {
-			return nil, err
-		}
-	} else {
-		return nil, fmt.Errorf("Instance type not supported")
+	err = pool.CreateInstance(inst, nil)
+	if err != nil {
+		return nil, errors.Wrap(err, "Create instance")
 	}
 
 	// Apply any post-storage configuration.
@@ -94,84 +77,14 @@ func instanceCreateAsEmpty(d *Daemon, args db.InstanceArgs) (instance.Instance,
 // created in the database to run any storage layer finalisations, and a revert hook that can be
 // run if the instance database load process fails that will remove anything created thus far.
 func instanceCreateFromBackup(s *state.State, info backup.Info, srcData io.ReadSeeker) (func(instance.Instance) error, func(), error) {
-	// Define hook functions that will be returned to caller.
-	var postHook func(instance.Instance) error
-	var revertHook func()
-
-	// Check if we can load new storage layer for pool driver type.
 	pool, err := storagePools.GetPoolByName(s, info.Pool)
-
-	supportedInstanceType := false
-	if pool != nil {
-		// No concept of instance type in backups yet, so default to container type.
-		volType, err := storagePools.InstanceTypeToVolumeType(instancetype.Container)
-		if err != nil {
-			return nil, nil, err
-		}
-
-		// We don't have an instance yet so cannot use GetPoolByInstance, so interrogate the driver
-		// directly for instance type support.
-		for _, supportedType := range pool.Driver().Info().VolumeTypes {
-			if supportedType == volType {
-				supportedInstanceType = true
-			}
-		}
+	if err != nil {
+		return nil, nil, err
 	}
 
-	if err != storageDrivers.ErrUnknownDriver && supportedInstanceType {
-		if err != nil {
-			return nil, nil, err
-		}
-
-		postHook, revertHook, err = pool.CreateInstanceFromBackup(info, srcData, nil)
-		if err != nil {
-			return nil, nil, err
-		}
-	} else { // Fallback to old storage layer.
-
-		// Find the compression algorithm.
-		srcData.Seek(0, 0)
-		tarArgs, _, _, err := shared.DetectCompressionFile(srcData)
-		if err != nil {
-			return nil, nil, err
-		}
-
-		pool, err := storagePoolInit(s, info.Pool)
-		if err != nil {
-			return nil, nil, err
-		}
-
-		// Unpack tarball from the source tar stream.
-		srcData.Seek(0, 0)
-		err = pool.ContainerBackupLoad(info, srcData, tarArgs)
-		if err != nil {
-			return nil, nil, err
-		}
-
-		// Update pool information in the backup.yaml file.
-		// Requires the volume and snapshots be mounted from pool.ContainerBackupLoad().
-		mountPath := shared.VarPath("storage-pools", info.Pool, "containers", project.Prefix(info.Project, info.Name))
-		err = backup.UpdateInstanceConfigStoragePool(s.Cluster, info, mountPath)
-		if err != nil {
-			return nil, nil, err
-		}
-
-		// Set revert function to remove the files created so far.
-		revertHook = func() {
-			// Create a temporary container struct (because the container DB record
-			// hasn't been imported yet) for use with storage layer.
-			ctTmp := &containerLXC{name: info.Name, project: info.Project}
-			pool.ContainerDelete(ctTmp)
-		}
-
-		postHook = func(inst instance.Instance) error {
-			_, err = inst.StorageStop()
-			if err != nil {
-				return errors.Wrap(err, "Stop storage pool")
-			}
-
-			return nil
-		}
+	postHook, revertHook, err := pool.CreateInstanceFromBackup(info, srcData, nil)
+	if err != nil {
+		return nil, nil, err
 	}
 
 	return postHook, revertHook, nil
@@ -281,36 +194,14 @@ func instanceCreateFromImage(d *Daemon, args db.InstanceArgs, hash string, op *o
 		return nil, fmt.Errorf("Error updating image last use date: %s", err)
 	}
 
-	// Check if we can load new storage layer for pool driver type.
 	pool, err := storagePools.GetPoolByInstance(d.State(), inst)
-	if err != storageDrivers.ErrUnknownDriver && err != storageDrivers.ErrNotImplemented {
-		if err != nil {
-			return nil, errors.Wrap(err, "Load instance storage pool")
-		}
-
-		err = pool.CreateInstanceFromImage(inst, hash, op)
-		if err != nil {
-			return nil, errors.Wrap(err, "Create instance from image")
-		}
-	} else if inst.Type() == instancetype.Container {
-		metadata := make(map[string]interface{})
-		var tracker *ioprogress.ProgressTracker
-		if op != nil {
-			tracker = &ioprogress.ProgressTracker{
-				Handler: func(percent, speed int64) {
-					shared.SetProgressMetadata(metadata, "create_instance_from_image_unpack", "Unpack", percent, 0, speed)
-					op.UpdateMetadata(metadata)
-				}}
-		}
+	if err != nil {
+		return nil, errors.Wrap(err, "Load instance storage pool")
+	}
 
-		// Now create the storage from an image.
-		ct := inst.(*containerLXC)
-		err = ct.Storage().ContainerCreateFromImage(inst, hash, tracker)
-		if err != nil {
-			return nil, errors.Wrap(err, "Create instance from image")
-		}
-	} else {
-		return nil, fmt.Errorf("Instance type not supported")
+	err = pool.CreateInstanceFromImage(inst, hash, op)
+	if err != nil {
+		return nil, errors.Wrap(err, "Create instance from image")
 	}
 
 	// Apply any post-storage configuration.
@@ -447,41 +338,21 @@ func instanceCreateAsCopy(s *state.State, args db.InstanceArgs, sourceInst insta
 		}
 	}
 
-	// Check if we can load new storage layer for both target and source pool driver types.
 	pool, err := storagePools.GetPoolByInstance(s, inst)
-	_, srcPoolErr := storagePools.GetPoolByInstance(s, sourceInst)
-	if err != storageDrivers.ErrUnknownDriver && err != storageDrivers.ErrNotImplemented && srcPoolErr != storageDrivers.ErrUnknownDriver && srcPoolErr != storageDrivers.ErrNotImplemented {
-		if err != nil {
-			return nil, errors.Wrap(err, "Load instance storage pool")
-		}
-
-		if refresh {
-			err = pool.RefreshInstance(inst, sourceInst, snapshots, op)
-			if err != nil {
-				return nil, errors.Wrap(err, "Refresh instance")
-			}
-		} else {
-			err = pool.CreateInstanceFromCopy(inst, sourceInst, !instanceOnly, op)
-			if err != nil {
-				return nil, errors.Wrap(err, "Create instance from copy")
-			}
-		}
-	} else if inst.Type() == instancetype.Container {
-		ct := inst.(*containerLXC)
+	if err != nil {
+		return nil, errors.Wrap(err, "Load instance storage pool")
+	}
 
-		if refresh {
-			err = ct.Storage().ContainerRefresh(inst, sourceInst, snapshots)
-			if err != nil {
-				return nil, err
-			}
-		} else {
-			err = ct.Storage().ContainerCopy(inst, sourceInst, instanceOnly)
-			if err != nil {
-				return nil, err
-			}
+	if refresh {
+		err = pool.RefreshInstance(inst, sourceInst, snapshots, op)
+		if err != nil {
+			return nil, errors.Wrap(err, "Refresh instance")
 		}
 	} else {
-		return nil, fmt.Errorf("Instance type not supported")
+		err = pool.CreateInstanceFromCopy(inst, sourceInst, !instanceOnly, op)
+		if err != nil {
+			return nil, errors.Wrap(err, "Create instance from copy")
+		}
 	}
 
 	// Apply any post-storage configuration.
@@ -569,44 +440,23 @@ func instanceCreateAsSnapshot(s *state.State, args db.InstanceArgs, sourceInstan
 		inst.Delete()
 	}()
 
-	// Check if we can load new storage layer for pool driver type.
 	pool, err := storagePools.GetPoolByInstance(s, inst)
-	if err != storageDrivers.ErrUnknownDriver && err != storageDrivers.ErrNotImplemented {
-		if err != nil {
-			return nil, err
-		}
-
-		err = pool.CreateInstanceSnapshot(inst, sourceInstance, op)
-		if err != nil {
-			return nil, errors.Wrap(err, "Create instance snapshot")
-		}
-
-		// Mount volume for backup.yaml writing.
-		ourStart, err := pool.MountInstance(sourceInstance, op)
-		if err != nil {
-			return nil, errors.Wrap(err, "Create instance snapshot (mount source)")
-		}
-		if ourStart {
-			defer pool.UnmountInstance(sourceInstance, op)
-		}
-	} else if inst.Type() == instancetype.Container {
-		ct := sourceInstance.(*containerLXC)
-		err = ct.Storage().ContainerSnapshotCreate(inst, sourceInstance)
-		if err != nil {
-			return nil, err
-		}
+	if err != nil {
+		return nil, err
+	}
 
-		// Mount volume for backup.yaml writing.
-		ourStart, err := sourceInstance.StorageStart()
-		if err != nil {
-			return nil, err
-		}
-		if ourStart {
-			defer sourceInstance.StorageStop()
-		}
+	err = pool.CreateInstanceSnapshot(inst, sourceInstance, op)
+	if err != nil {
+		return nil, errors.Wrap(err, "Create instance snapshot")
+	}
 
-	} else {
-		return nil, fmt.Errorf("Instance type not supported")
+	// Mount volume for backup.yaml writing.
+	ourStart, err := pool.MountInstance(sourceInstance, op)
+	if err != nil {
+		return nil, errors.Wrap(err, "Create instance snapshot (mount source)")
+	}
+	if ourStart {
+		defer pool.UnmountInstance(sourceInstance, op)
 	}
 
 	// Attempt to update backup.yaml for instance.
@@ -841,67 +691,29 @@ func instanceCreateInternal(s *state.State, args db.InstanceArgs) (instance.Inst
 
 // instanceConfigureInternal applies quota set in volatile "apply_quota" and writes a backup file.
 func instanceConfigureInternal(state *state.State, c instance.Instance) error {
-	// Find the root device
+	// Find the root device.
 	rootDiskDeviceKey, rootDiskDevice, err := shared.GetRootDiskDevice(c.ExpandedDevices().CloneNative())
 	if err != nil {
 		return err
 	}
 
-	// Check if we can load new storage layer for pool driver type.
 	pool, err := storagePools.GetPoolByInstance(state, c)
-	if err != storageDrivers.ErrUnknownDriver && err != storageDrivers.ErrNotImplemented {
-		if err != nil {
-			return errors.Wrap(err, "Load instance storage pool")
-		}
+	if err != nil {
+		return errors.Wrap(err, "Load instance storage pool")
+	}
 
-		if rootDiskDevice["size"] != "" {
-			err = pool.SetInstanceQuota(c, rootDiskDevice["size"], nil)
+	if rootDiskDevice["size"] != "" {
+		err = pool.SetInstanceQuota(c, rootDiskDevice["size"], nil)
 
-			// If the storage driver can't set the quota now, store in volatile.
-			if err == storagePools.ErrRunningQuotaResizeNotSupported {
-				err = c.VolatileSet(map[string]string{fmt.Sprintf("volatile.%s.apply_quota", rootDiskDeviceKey): rootDiskDevice["size"]})
-				if err != nil {
-					return err
-				}
-			} else if err != nil {
+		// If the storage driver can't set the quota now, store in volatile.
+		if err == storagePools.ErrRunningQuotaResizeNotSupported {
+			err = c.VolatileSet(map[string]string{fmt.Sprintf("volatile.%s.apply_quota", rootDiskDeviceKey): rootDiskDevice["size"]})
+			if err != nil {
 				return err
 			}
-		}
-	} else if c.Type() == instancetype.Container {
-		ourStart, err := c.StorageStart()
-		if err != nil {
+		} else if err != nil {
 			return err
 		}
-
-		ct := c.(*containerLXC)
-
-		// handle quota: at this point, storage is guaranteed to be ready.
-		storage := ct.Storage()
-		if rootDiskDevice["size"] != "" {
-			storageTypeName := storage.GetStorageTypeName()
-			if (storageTypeName == "lvm" || storageTypeName == "ceph") && c.IsRunning() {
-				err = c.VolatileSet(map[string]string{fmt.Sprintf("volatile.%s.apply_quota", rootDiskDeviceKey): rootDiskDevice["size"]})
-				if err != nil {
-					return err
-				}
-			} else {
-				size, err := units.ParseByteSizeString(rootDiskDevice["size"])
-				if err != nil {
-					return err
-				}
-
-				err = storage.StorageEntitySetQuota(storagePoolVolumeTypeContainer, size, c)
-				if err != nil {
-					return err
-				}
-			}
-		}
-
-		if ourStart {
-			defer c.StorageStop()
-		}
-	} else {
-		return fmt.Errorf("Instance type not supported")
 	}
 
 	err = c.UpdateBackupFile()

From c4af78725e20a2aea61cc3ac3270b7b9dcccb870 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Fri, 21 Feb 2020 17:27:59 +0000
Subject: [PATCH 22/36] lxd/containers/post: Removes old storage loader

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/containers_post.go | 84 +++++-------------------------------------
 1 file changed, 10 insertions(+), 74 deletions(-)

diff --git a/lxd/containers_post.go b/lxd/containers_post.go
index 6e02ea22cf..8e9471d070 100644
--- a/lxd/containers_post.go
+++ b/lxd/containers_post.go
@@ -23,11 +23,9 @@ import (
 	deviceConfig "github.com/lxc/lxd/lxd/device/config"
 	"github.com/lxc/lxd/lxd/instance"
 	"github.com/lxc/lxd/lxd/instance/instancetype"
-	"github.com/lxc/lxd/lxd/migration"
 	"github.com/lxc/lxd/lxd/operations"
 	"github.com/lxc/lxd/lxd/response"
 	storagePools "github.com/lxc/lxd/lxd/storage"
-	storageDrivers "github.com/lxc/lxd/lxd/storage/drivers"
 	"github.com/lxc/lxd/shared"
 	"github.com/lxc/lxd/shared/api"
 	log "github.com/lxc/lxd/shared/log15"
@@ -265,79 +263,17 @@ func createFromMigration(d *Daemon, project string, req *api.InstancesPost) resp
 	if !req.Source.Refresh {
 		// Check if we can load new storage layer for pool driver type.
 		_, err := storagePools.GetPoolByName(d.State(), storagePool)
-		if err != storageDrivers.ErrUnknownDriver {
-			if err != nil {
-				return response.InternalError(err)
-			}
+		if err != nil {
+			return response.InternalError(err)
+		}
 
-			// Create the instance DB records only and let the storage layer populate
-			// the storage devices. Note: At this stage we do not yet know if snapshots
-			// are going to be received and so we cannot create their DB records. This
-			// will be done if needed in the migrationSink.Do() function called as part
-			// of the operation below.
-			inst, err = instanceCreateInternal(d.State(), args)
-			if err != nil {
-				return response.InternalError(err)
-			}
-		} else {
-			/* Only create a container from an image if we're going to
-			 * rsync over the top of it. In the case of a better file
-			 * transfer mechanism, let's just use that.
-			 *
-			 * TODO: we could invent some negotiation here, where if the
-			 * source and sink both have the same image, we can clone from
-			 * it, but we have to know before sending the snapshot that
-			 * we're sending the whole thing or just a delta from the
-			 * image, so one extra negotiation round trip is needed. An
-			 * alternative is to move actual container object to a later
-			 * point and just negotiate it over the migration control
-			 * socket. Anyway, it'll happen later :)
-			 */
-			_, _, err = d.cluster.ImageGet(args.Project, req.Source.BaseImage, false, true)
-			if err != nil {
-				inst, err = instanceCreateAsEmpty(d, args)
-				if err != nil {
-					return response.InternalError(err)
-				}
-			} else {
-				// Retrieve the future storage pool.
-				tmpInst, err := instance.Load(d.State(), args, nil)
-				if err != nil {
-					return response.InternalError(err)
-				}
-
-				_, rootDiskDevice, err := shared.GetRootDiskDevice(tmpInst.ExpandedDevices().CloneNative())
-				if err != nil {
-					return response.InternalError(err)
-				}
-
-				if rootDiskDevice["pool"] == "" {
-					return response.BadRequest(fmt.Errorf("The container's root device is missing the pool property"))
-				}
-
-				storagePool = rootDiskDevice["pool"]
-
-				var migrationType migration.MigrationFSType
-
-				ps, err := storagePoolInit(d.State(), storagePool)
-				if err != nil {
-					return response.InternalError(err)
-				}
-
-				migrationType = ps.MigrationType()
-
-				if migrationType == migration.MigrationFSType_RSYNC {
-					inst, err = instanceCreateFromImage(d, args, req.Source.BaseImage, nil)
-					if err != nil {
-						return response.InternalError(err)
-					}
-				} else {
-					inst, err = instanceCreateAsEmpty(d, args)
-					if err != nil {
-						return response.InternalError(err)
-					}
-				}
-			}
+		// Create the instance DB records only and let the storage layer populate the storage devices.
+		// Note: At this stage we do not yet know if snapshots are going to be received and so we cannot
+		// create their DB records. This will be done if needed in the migrationSink.Do() function called
+		// as part of the operation below.
+		inst, err = instanceCreateInternal(d.State(), args)
+		if err != nil {
+			return response.InternalError(err)
 		}
 	}
 

From 0ed0ab6c22be0f226ede0cafb448a137cb151459 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Fri, 21 Feb 2020 17:32:45 +0000
Subject: [PATCH 23/36] lxd/daemon/storage: Consistent comment ending

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/daemon_storage.go | 77 ++++++++++++++++++-------------------------
 1 file changed, 32 insertions(+), 45 deletions(-)

diff --git a/lxd/daemon_storage.go b/lxd/daemon_storage.go
index 522861a9cd..2c3c853cca 100644
--- a/lxd/daemon_storage.go
+++ b/lxd/daemon_storage.go
@@ -37,7 +37,7 @@ func daemonStorageMount(s *state.State) error {
 	}
 
 	mount := func(storageType string, source string) error {
-		// Parse the source
+		// Parse the source.
 		fields := strings.Split(source, "/")
 		if len(fields) != 2 {
 			return fmt.Errorf("Invalid syntax for volume, must be <pool>/<volume>")
@@ -46,7 +46,7 @@ func daemonStorageMount(s *state.State) error {
 		poolName := fields[0]
 		volumeName := fields[1]
 
-		// Mount volume
+		// Mount volume.
 		pool, err := storagePools.GetPoolByName(s, poolName)
 		if err != storageDrivers.ErrUnknownDriver {
 			if err != nil {
@@ -116,7 +116,7 @@ func daemonStorageUsed(s *state.State, poolName string, volumeName string) (bool
 }
 
 func daemonStorageValidate(s *state.State, target string) error {
-	// Check syntax
+	// Check syntax.
 	if target == "" {
 		return nil
 	}
@@ -129,18 +129,18 @@ func daemonStorageValidate(s *state.State, target string) error {
 	poolName := fields[0]
 	volumeName := fields[1]
 
-	// Validate pool exists
+	// Validate pool exists.
 	poolID, dbPool, err := s.Cluster.StoragePoolGet(poolName)
 	if err != nil {
 		return errors.Wrapf(err, "Unable to load storage pool \"%s\"", poolName)
 	}
 
-	// Validate pool driver (can't be CEPH or CEPHFS)
+	// Validate pool driver (can't be CEPH or CEPHFS).
 	if dbPool.Driver == "ceph" || dbPool.Driver == "cephfs" {
 		return fmt.Errorf("Server storage volumes cannot be stored on Ceph")
 	}
 
-	// Confirm volume exists
+	// Confirm volume exists.
 	_, _, err = s.Cluster.StoragePoolNodeVolumeGetType(volumeName, storagePoolVolumeTypeCustom, poolID)
 	if err != nil {
 		return errors.Wrapf(err, "Unable to load storage volume \"%s\"", target)
@@ -185,7 +185,7 @@ func daemonStorageValidate(s *state.State, target string) error {
 		}
 	}
 
-	// Validate volume is empty (ignore lost+found)
+	// Validate volume is empty (ignore lost+found).
 	mountpoint := shared.VarPath("storage-pools", poolName, "custom", volumeName)
 
 	entries, err := ioutil.ReadDir(mountpoint)
@@ -209,7 +209,7 @@ func daemonStorageValidate(s *state.State, target string) error {
 func daemonStorageMove(s *state.State, storageType string, target string) error {
 	destPath := shared.VarPath(storageType)
 
-	// Track down the current storage
+	// Track down the current storage.
 	var sourcePool string
 	var sourceVolume string
 
@@ -223,13 +223,13 @@ func daemonStorageMove(s *state.State, storageType string, target string) error
 	}
 
 	moveContent := func(source string, target string) error {
-		// Copy the content
+		// Copy the content.
 		_, err := rsync.LocalCopy(source, target, "", false)
 		if err != nil {
 			return err
 		}
 
-		// Remove the source content
+		// Remove the source content.
 		entries, err := ioutil.ReadDir(source)
 		if err != nil {
 			return err
@@ -245,26 +245,26 @@ func daemonStorageMove(s *state.State, storageType string, target string) error
 		return nil
 	}
 
-	// Deal with unsetting
+	// Deal with unsetting.
 	if target == "" {
-		// Things already look correct
+		// Things already look correct.
 		if sourcePath == destPath {
 			return nil
 		}
 
-		// Remove the symlink
+		// Remove the symlink.
 		err = os.Remove(destPath)
 		if err != nil {
 			return errors.Wrapf(err, "Failed to delete storage symlink at \"%s\"", destPath)
 		}
 
-		// Re-create as a directory
+		// Re-create as a directory.
 		err = os.MkdirAll(destPath, 0700)
 		if err != nil {
 			return errors.Wrapf(err, "Failed to create directory \"%s\"", destPath)
 		}
 
-		// Move the data across
+		// Move the data across.
 		err = moveContent(sourcePath, destPath)
 		if err != nil {
 			return errors.Wrapf(err, "Failed to move data over to directory \"%s\"", destPath)
@@ -297,7 +297,7 @@ func daemonStorageMove(s *state.State, storageType string, target string) error
 		return nil
 	}
 
-	// Parse the target
+	// Parse the target.
 	fields := strings.Split(target, "/")
 	if len(fields) != 2 {
 		return fmt.Errorf("Invalid syntax for volume, must be <pool>/<volume>")
@@ -307,30 +307,17 @@ func daemonStorageMove(s *state.State, storageType string, target string) error
 	volumeName := fields[1]
 
 	pool, err := storagePools.GetPoolByName(s, poolName)
-	if err != storageDrivers.ErrUnknownDriver {
-		if err != nil {
-			return err
-		}
-
-		// Mount volume
-		_, err = pool.MountCustomVolume(volumeName, nil)
-		if err != nil {
-			return errors.Wrapf(err, "Failed to mount storage volume \"%s\"", target)
-		}
-	} else {
-		// Mount volume
-		volume, err := storageInit(s, "default", poolName, volumeName, storagePoolVolumeTypeCustom)
-		if err != nil {
-			return errors.Wrapf(err, "Unable to load storage volume \"%s\"", target)
-		}
+	if err != nil {
+		return err
+	}
 
-		_, err = volume.StoragePoolVolumeMount()
-		if err != nil {
-			return errors.Wrapf(err, "Failed to mount storage volume \"%s\"", target)
-		}
+	// Mount volume.
+	_, err = pool.MountCustomVolume(volumeName, nil)
+	if err != nil {
+		return errors.Wrapf(err, "Failed to mount storage volume \"%s\"", target)
 	}
 
-	// Set ownership & mode
+	// Set ownership & mode.
 	mountpoint := shared.VarPath("storage-pools", poolName, "custom", volumeName)
 	destPath = mountpoint
 
@@ -344,21 +331,21 @@ func daemonStorageMove(s *state.State, storageType string, target string) error
 		return errors.Wrapf(err, "Failed to set ownership on \"%s\"", mountpoint)
 	}
 
-	// Handle changes
+	// Handle changes.
 	if sourcePath != shared.VarPath(storageType) {
-		// Remove the symlink
+		// Remove the symlink.
 		err := os.Remove(shared.VarPath(storageType))
 		if err != nil {
 			return errors.Wrapf(err, "Failed to remove the new symlink at \"%s\"", shared.VarPath(storageType))
 		}
 
-		// Create the new symlink
+		// Create the new symlink.
 		err = os.Symlink(destPath, shared.VarPath(storageType))
 		if err != nil {
 			return errors.Wrapf(err, "Failed to create the new symlink at \"%s\"", shared.VarPath(storageType))
 		}
 
-		// Move the data across
+		// Move the data across.
 		err = moveContent(sourcePath, destPath)
 		if err != nil {
 			return errors.Wrapf(err, "Failed to move data over to directory \"%s\"", destPath)
@@ -393,25 +380,25 @@ func daemonStorageMove(s *state.State, storageType string, target string) error
 
 	sourcePath = shared.VarPath(storageType) + ".temp"
 
-	// Rename the existing storage
+	// Rename the existing storage.
 	err = os.Rename(shared.VarPath(storageType), sourcePath)
 	if err != nil {
 		return errors.Wrapf(err, "Failed to rename existing storage \"%s\"", shared.VarPath(storageType))
 	}
 
-	// Create the new symlink
+	// Create the new symlink.
 	err = os.Symlink(destPath, shared.VarPath(storageType))
 	if err != nil {
 		return errors.Wrapf(err, "Failed to create the new symlink at \"%s\"", shared.VarPath(storageType))
 	}
 
-	// Move the data across
+	// Move the data across.
 	err = moveContent(sourcePath, destPath)
 	if err != nil {
 		return errors.Wrapf(err, "Failed to move data over to directory \"%s\"", destPath)
 	}
 
-	// Remove the old data
+	// Remove the old data.
 	err = os.RemoveAll(sourcePath)
 	if err != nil {
 		return errors.Wrapf(err, "Failed to cleanup old directory \"%s\"", sourcePath)

From cc1bf38dab593e505f5e901ddc6604d0646038d2 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Fri, 21 Feb 2020 17:33:03 +0000
Subject: [PATCH 24/36] lxd/daemon/storage: Removes old storage loader

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/daemon_storage.go | 115 +++++++++++-------------------------------
 1 file changed, 30 insertions(+), 85 deletions(-)

diff --git a/lxd/daemon_storage.go b/lxd/daemon_storage.go
index 2c3c853cca..52635a5c5b 100644
--- a/lxd/daemon_storage.go
+++ b/lxd/daemon_storage.go
@@ -14,7 +14,6 @@ import (
 	"github.com/lxc/lxd/lxd/rsync"
 	"github.com/lxc/lxd/lxd/state"
 	storagePools "github.com/lxc/lxd/lxd/storage"
-	storageDrivers "github.com/lxc/lxd/lxd/storage/drivers"
 	"github.com/lxc/lxd/shared"
 )
 
@@ -48,25 +47,13 @@ func daemonStorageMount(s *state.State) error {
 
 		// Mount volume.
 		pool, err := storagePools.GetPoolByName(s, poolName)
-		if err != storageDrivers.ErrUnknownDriver {
-			if err != nil {
-				return err
-			}
-
-			_, err = pool.MountCustomVolume(volumeName, nil)
-			if err != nil {
-				return errors.Wrapf(err, "Failed to mount storage volume \"%s\"", source)
-			}
-		} else {
-			volume, err := storageInit(s, "default", poolName, volumeName, storagePoolVolumeTypeCustom)
-			if err != nil {
-				return errors.Wrapf(err, "Unable to load storage volume \"%s\"", source)
-			}
+		if err != nil {
+			return err
+		}
 
-			_, err = volume.StoragePoolVolumeMount()
-			if err != nil {
-				return errors.Wrapf(err, "Failed to mount storage volume \"%s\"", source)
-			}
+		_, err = pool.MountCustomVolume(volumeName, nil)
+		if err != nil {
+			return errors.Wrapf(err, "Failed to mount storage volume \"%s\"", source)
 		}
 
 		return nil
@@ -156,33 +143,17 @@ func daemonStorageValidate(s *state.State, target string) error {
 	}
 
 	pool, err := storagePools.GetPoolByName(s, poolName)
-	if err != storageDrivers.ErrUnknownDriver {
-		if err != nil {
-			return err
-		}
-
-		// Mount volume
-		ourMount, err := pool.MountCustomVolume(volumeName, nil)
-		if err != nil {
-			return errors.Wrapf(err, "Failed to mount storage volume \"%s\"", target)
-		}
-		if ourMount {
-			defer pool.UnmountCustomVolume(volumeName, nil)
-		}
-	} else {
-		volume, err := storageInit(s, "default", poolName, volumeName, storagePoolVolumeTypeCustom)
-		if err != nil {
-			return errors.Wrapf(err, "Unable to load storage volume \"%s/%s\"", poolName, volumeName)
-		}
+	if err != nil {
+		return err
+	}
 
-		// Mount volume
-		ourMount, err := volume.StoragePoolVolumeMount()
-		if err != nil {
-			return errors.Wrapf(err, "Failed to mount storage volume \"%s\"", target)
-		}
-		if ourMount {
-			defer volume.StoragePoolUmount()
-		}
+	// Mount volume.
+	ourMount, err := pool.MountCustomVolume(volumeName, nil)
+	if err != nil {
+		return errors.Wrapf(err, "Failed to mount storage volume \"%s\"", target)
+	}
+	if ourMount {
+		defer pool.UnmountCustomVolume(volumeName, nil)
 	}
 
 	// Validate volume is empty (ignore lost+found).
@@ -271,27 +242,14 @@ func daemonStorageMove(s *state.State, storageType string, target string) error
 		}
 
 		pool, err := storagePools.GetPoolByName(s, sourcePool)
-		if err != storageDrivers.ErrUnknownDriver {
-			if err != nil {
-				return err
-			}
-
-			// Unmount old volume
-			_, err = pool.UnmountCustomVolume(sourceVolume, nil)
-			if err != nil {
-				return errors.Wrapf(err, "Failed to umount storage volume \"%s/%s\"", sourcePool, sourceVolume)
-			}
-		} else {
-			// Unmount old volume
-			volume, err := storageInit(s, "default", sourcePool, sourceVolume, storagePoolVolumeTypeCustom)
-			if err != nil {
-				return errors.Wrapf(err, "Unable to load storage volume \"%s/%s\"", sourcePool, sourceVolume)
-			}
+		if err != nil {
+			return err
+		}
 
-			_, err = volume.StoragePoolVolumeUmount()
-			if err != nil {
-				return errors.Wrapf(err, "Failed to umount storage volume \"%s/%s\"", sourcePool, sourceVolume)
-			}
+		// Unmount old volume.
+		_, err = pool.UnmountCustomVolume(sourceVolume, nil)
+		if err != nil {
+			return errors.Wrapf(err, "Failed to umount storage volume \"%s/%s\"", sourcePool, sourceVolume)
 		}
 
 		return nil
@@ -352,27 +310,14 @@ func daemonStorageMove(s *state.State, storageType string, target string) error
 		}
 
 		pool, err := storagePools.GetPoolByName(s, sourcePool)
-		if err != storageDrivers.ErrUnknownDriver {
-			if err != nil {
-				return err
-			}
-
-			// Unmount old volume
-			_, err = pool.UnmountCustomVolume(sourceVolume, nil)
-			if err != nil {
-				return errors.Wrapf(err, "Failed to umount storage volume \"%s/%s\"", sourcePool, sourceVolume)
-			}
-		} else {
-			// Unmount old volume
-			volume, err := storageInit(s, "default", sourcePool, sourceVolume, storagePoolVolumeTypeCustom)
-			if err != nil {
-				return errors.Wrapf(err, "Unable to load storage volume \"%s/%s\"", sourcePool, sourceVolume)
-			}
+		if err != nil {
+			return err
+		}
 
-			_, err = volume.StoragePoolVolumeUmount()
-			if err != nil {
-				return errors.Wrapf(err, "Failed to umount storage volume \"%s/%s\"", sourcePool, sourceVolume)
-			}
+		// Unmount old volume.
+		_, err = pool.UnmountCustomVolume(sourceVolume, nil)
+		if err != nil {
+			return errors.Wrapf(err, "Failed to umount storage volume \"%s/%s\"", sourcePool, sourceVolume)
 		}
 
 		return nil

From 6d0ff14c132ef6e6ed32f97ba5e397eb7fd94a86 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Fri, 21 Feb 2020 17:35:16 +0000
Subject: [PATCH 25/36] lxd/images: Removes old storage loader

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/images.go | 54 +++++++--------------------------------------------
 1 file changed, 7 insertions(+), 47 deletions(-)

diff --git a/lxd/images.go b/lxd/images.go
index 81b96dc2a4..f4da10d3b1 100644
--- a/lxd/images.go
+++ b/lxd/images.go
@@ -36,7 +36,6 @@ import (
 	"github.com/lxc/lxd/lxd/response"
 	"github.com/lxc/lxd/lxd/state"
 	storagePools "github.com/lxc/lxd/lxd/storage"
-	storageDrivers "github.com/lxc/lxd/lxd/storage/drivers"
 	"github.com/lxc/lxd/lxd/task"
 	"github.com/lxc/lxd/lxd/util"
 	"github.com/lxc/lxd/shared"
@@ -628,31 +627,14 @@ func imageCreateInPool(d *Daemon, info *api.Image, storagePool string) error {
 		return fmt.Errorf("No storage pool specified")
 	}
 
-	// Check if we can load new storage layer for pool driver type.
 	pool, err := storagePools.GetPoolByName(d.State(), storagePool)
-	if err != storageDrivers.ErrUnknownDriver {
-		if err != nil {
-			return err
-		}
-
-		err = pool.EnsureImage(info.Fingerprint, nil)
-		if err != nil {
-			return err
-		}
-	} else {
-		// Initialize a new storage interface.
-		s, err := storagePoolInit(d.State(), storagePool)
-		if err != nil {
-			return err
-		}
-
-		// Create the storage volume for the image on the requested storage
-		// pool.
-		err = s.ImageCreate(info.Fingerprint, nil)
-		if err != nil {
-			return err
-		}
+	if err != nil {
+		return err
+	}
 
+	err = pool.EnsureImage(info.Fingerprint, nil)
+	if err != nil {
+		return err
 	}
 
 	return nil
@@ -1385,34 +1367,12 @@ func pruneExpiredImages(ctx context.Context, d *Daemon) error {
 }
 
 func doDeleteImageFromPool(state *state.State, fingerprint string, storagePool string) error {
-	// New storage pool handling.
 	pool, err := storagePools.GetPoolByName(state, storagePool)
-	if err != storageDrivers.ErrUnknownDriver {
-		if err != nil {
-			return err
-		}
-
-		err = pool.DeleteImage(fingerprint, nil)
-		if err != nil {
-			return err
-		}
-
-		return nil
-	}
-
-	// Initialize a new storage interface.
-	s, err := storagePoolVolumeImageInit(state, storagePool, fingerprint)
 	if err != nil {
 		return err
 	}
 
-	// Delete the storage volume for the image from the storage pool.
-	err = s.ImageDelete(fingerprint)
-	if err != nil {
-		return err
-	}
-
-	return nil
+	return pool.DeleteImage(fingerprint, nil)
 }
 
 func imageDelete(d *Daemon, r *http.Request) response.Response {

From f77accc8a282763ad6bf5b990b96eb1486f35397 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Mon, 24 Feb 2020 13:48:55 +0000
Subject: [PATCH 26/36] lxd/migrate/container: Removes old storage loader

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/migrate_container.go | 216 ++++++++++++++-------------------------
 1 file changed, 77 insertions(+), 139 deletions(-)

diff --git a/lxd/migrate_container.go b/lxd/migrate_container.go
index 39ded5915a..68fc01ebcc 100644
--- a/lxd/migrate_container.go
+++ b/lxd/migrate_container.go
@@ -23,7 +23,6 @@ import (
 	"github.com/lxc/lxd/lxd/rsync"
 	"github.com/lxc/lxd/lxd/state"
 	storagePools "github.com/lxc/lxd/lxd/storage"
-	storageDrivers "github.com/lxc/lxd/lxd/storage/drivers"
 	"github.com/lxc/lxd/lxd/util"
 	"github.com/lxc/lxd/shared"
 	"github.com/lxc/lxd/shared/api"
@@ -345,52 +344,23 @@ func (s *migrationSourceWs) Do(state *state.State, migrateOp *operations.Operati
 
 	// Check if we can load new storage layer for pool driver type.
 	pool, err := storagePools.GetPoolByInstance(state, s.instance)
-	if err != storageDrivers.ErrUnknownDriver && err != storageDrivers.ErrNotImplemented && err != db.ErrNoSuchObject {
-		if err != nil {
-			return err
-		}
-
-		// The refresh argument passed to MigrationTypes() is always set
-		// to false here. The migration source/sender doesn't need to care whether
-		// or not it's doing a refresh as the migration sink/receiver will know
-		// this, and adjust the migration types accordingly.
-		poolMigrationTypes = pool.MigrationTypes(storagePools.InstanceContentType(s.instance), false)
-		if len(poolMigrationTypes) < 0 {
-			return fmt.Errorf("No source migration types available")
-		}
-
-		// Convert the pool's migration type options to an offer header to target.
-		// Populate the Fs, ZfsFeatures and RsyncFeatures fields.
-		offerHeader = migration.TypesToHeader(poolMigrationTypes...)
-	} else if s.instance.Type() == instancetype.Container {
-		// Fallback to legacy storage layer and populate the Fs, ZfsFeatures and
-		// RsyncFeatures fields.
-
-		// Storage needs to start unconditionally now, since we need to initialize a new
-		// storage interface.
-		ourStart, err := s.instance.StorageStart()
-		if err != nil {
-			return err
-		}
-		if ourStart {
-			defer s.instance.StorageStop()
-		}
+	if err != nil {
+		return err
+	}
 
-		myType := ct.Storage().MigrationType()
-		hasFeature := true
-		offerHeader = migration.MigrationHeader{
-			Fs: &myType,
-			RsyncFeatures: &migration.RsyncFeatures{
-				Xattrs:        &hasFeature,
-				Delete:        &hasFeature,
-				Compress:      &hasFeature,
-				Bidirectional: &hasFeature,
-			},
-		}
-	} else {
-		return fmt.Errorf("Instance type not supported")
+	// The refresh argument passed to MigrationTypes() is always set
+	// to false here. The migration source/sender doesn't need to care whether
+	// or not it's doing a refresh as the migration sink/receiver will know
+	// this, and adjust the migration types accordingly.
+	poolMigrationTypes = pool.MigrationTypes(storagePools.InstanceContentType(s.instance), false)
+	if len(poolMigrationTypes) < 0 {
+		return fmt.Errorf("No source migration types available")
 	}
 
+	// Convert the pool's migration type options to an offer header to target.
+	// Populate the Fs, ZfsFeatures and RsyncFeatures fields.
+	offerHeader = migration.TypesToHeader(poolMigrationTypes...)
+
 	// Add CRIO info to source header.
 	criuType := migration.CRIUType_CRIU_RSYNC.Enum()
 	if !s.live {
@@ -931,114 +901,82 @@ func (c *migrationSink) Do(state *state.State, migrateOp *operations.Operation)
 	var respHeader migration.MigrationHeader
 
 	pool, err := storagePools.GetPoolByInstance(state, c.src.instance)
-	if err != storageDrivers.ErrUnknownDriver && err != storageDrivers.ErrNotImplemented {
-		if err != nil {
-			return err
-		}
+	if err != nil {
+		return err
+	}
 
-		// Extract the source's migration type and then match it against our pool's
-		// supported types and features. If a match is found the combined features list
-		// will be sent back to requester.
-		respTypes, err := migration.MatchTypes(offerHeader, migration.MigrationFSType_RSYNC, pool.MigrationTypes(storagePools.InstanceContentType(c.src.instance), c.refresh))
-		if err != nil {
-			return err
-		}
+	// Extract the source's migration type and then match it against our pool's
+	// supported types and features. If a match is found the combined features list
+	// will be sent back to requester.
+	respTypes, err := migration.MatchTypes(offerHeader, migration.MigrationFSType_RSYNC, pool.MigrationTypes(storagePools.InstanceContentType(c.src.instance), c.refresh))
+	if err != nil {
+		return err
+	}
 
-		// Convert response type to response header and copy snapshot info into it.
-		respHeader = migration.TypesToHeader(respTypes...)
-		respHeader.SnapshotNames = offerHeader.SnapshotNames
-		respHeader.Snapshots = offerHeader.Snapshots
-		respHeader.Refresh = &c.refresh
-
-		// Translate the legacy MigrationSinkArgs to a VolumeTargetArgs suitable for use
-		// with the new storage layer.
-		myTarget = func(conn *websocket.Conn, op *operations.Operation, args MigrationSinkArgs) error {
-			volTargetArgs := migration.VolumeTargetArgs{
-				Name:          args.Instance.Name(),
-				MigrationType: respTypes[0],
-				Refresh:       args.Refresh, // Indicate to receiver volume should exist.
-				TrackProgress: false,        // Do not use a progress tracker on receiver.
-				Live:          args.Live,    // Indicates we will get a final rootfs sync.
-			}
+	// Convert response type to response header and copy snapshot info into it.
+	respHeader = migration.TypesToHeader(respTypes...)
+	respHeader.SnapshotNames = offerHeader.SnapshotNames
+	respHeader.Snapshots = offerHeader.Snapshots
+	respHeader.Refresh = &c.refresh
+
+	// Translate the legacy MigrationSinkArgs to a VolumeTargetArgs suitable for use
+	// with the new storage layer.
+	myTarget = func(conn *websocket.Conn, op *operations.Operation, args MigrationSinkArgs) error {
+		volTargetArgs := migration.VolumeTargetArgs{
+			Name:          args.Instance.Name(),
+			MigrationType: respTypes[0],
+			Refresh:       args.Refresh, // Indicate to receiver volume should exist.
+			TrackProgress: false,        // Do not use a progress tracker on receiver.
+			Live:          args.Live,    // Indicates we will get a final rootfs sync.
+		}
 
-			// At this point we have already figured out the parent container's root
-			// disk device so we can simply retrieve it from the expanded devices.
-			parentStoragePool := ""
-			parentExpandedDevices := args.Instance.ExpandedDevices()
-			parentLocalRootDiskDeviceKey, parentLocalRootDiskDevice, _ := shared.GetRootDiskDevice(parentExpandedDevices.CloneNative())
-			if parentLocalRootDiskDeviceKey != "" {
-				parentStoragePool = parentLocalRootDiskDevice["pool"]
-			}
+		// At this point we have already figured out the parent container's root
+		// disk device so we can simply retrieve it from the expanded devices.
+		parentStoragePool := ""
+		parentExpandedDevices := args.Instance.ExpandedDevices()
+		parentLocalRootDiskDeviceKey, parentLocalRootDiskDevice, _ := shared.GetRootDiskDevice(parentExpandedDevices.CloneNative())
+		if parentLocalRootDiskDeviceKey != "" {
+			parentStoragePool = parentLocalRootDiskDevice["pool"]
+		}
 
-			if parentStoragePool == "" {
-				return fmt.Errorf("Instance's root device is missing the pool property")
-			}
+		if parentStoragePool == "" {
+			return fmt.Errorf("Instance's root device is missing the pool property")
+		}
 
-			// A zero length Snapshots slice indicates volume only migration in
-			// VolumeTargetArgs. So if VolumeOnly was requested, do not populate them.
-			if !args.VolumeOnly {
-				volTargetArgs.Snapshots = make([]string, 0, len(args.Snapshots))
-				for _, snap := range args.Snapshots {
-					volTargetArgs.Snapshots = append(volTargetArgs.Snapshots, *snap.Name)
-					snapArgs := snapshotProtobufToInstanceArgs(args.Instance.Project(), args.Instance.Name(), snap)
-
-					// Ensure that snapshot and parent container have the same
-					// storage pool in their local root disk device. If the root
-					// disk device for the snapshot comes from a profile on the
-					// new instance as well we don't need to do anything.
-					if snapArgs.Devices != nil {
-						snapLocalRootDiskDeviceKey, _, _ := shared.GetRootDiskDevice(snapArgs.Devices.CloneNative())
-						if snapLocalRootDiskDeviceKey != "" {
-							snapArgs.Devices[snapLocalRootDiskDeviceKey]["pool"] = parentStoragePool
-						}
+		// A zero length Snapshots slice indicates volume only migration in
+		// VolumeTargetArgs. So if VolumeOnly was requested, do not populate them.
+		if !args.VolumeOnly {
+			volTargetArgs.Snapshots = make([]string, 0, len(args.Snapshots))
+			for _, snap := range args.Snapshots {
+				volTargetArgs.Snapshots = append(volTargetArgs.Snapshots, *snap.Name)
+				snapArgs := snapshotProtobufToInstanceArgs(args.Instance.Project(), args.Instance.Name(), snap)
+
+				// Ensure that snapshot and parent container have the same
+				// storage pool in their local root disk device. If the root
+				// disk device for the snapshot comes from a profile on the
+				// new instance as well we don't need to do anything.
+				if snapArgs.Devices != nil {
+					snapLocalRootDiskDeviceKey, _, _ := shared.GetRootDiskDevice(snapArgs.Devices.CloneNative())
+					if snapLocalRootDiskDeviceKey != "" {
+						snapArgs.Devices[snapLocalRootDiskDeviceKey]["pool"] = parentStoragePool
 					}
+				}
 
-					// Check if snapshot exists already and if not then create
-					// a new snapshot DB record so that the storage layer can
-					// populate the volume on the storage device.
-					_, err := instance.LoadByProjectAndName(args.Instance.DaemonState(), args.Instance.Project(), snapArgs.Name)
+				// Check if snapshot exists already and if not then create
+				// a new snapshot DB record so that the storage layer can
+				// populate the volume on the storage device.
+				_, err := instance.LoadByProjectAndName(args.Instance.DaemonState(), args.Instance.Project(), snapArgs.Name)
+				if err != nil {
+					// Create the snapshot as it doesn't seem to exist.
+					_, err := instanceCreateInternal(state, snapArgs)
 					if err != nil {
-						// Create the snapshot as it doesn't seem to exist.
-						_, err := instanceCreateInternal(state, snapArgs)
-						if err != nil {
-							return err
-						}
+						return err
 					}
 				}
 			}
-
-			return pool.CreateInstanceFromMigration(args.Instance, &shared.WebsocketIO{Conn: conn}, volTargetArgs, op)
-		}
-	} else if c.src.instance.Type() == instancetype.Container {
-		ct := c.src.instance.(*containerLXC)
-		myTarget = ct.Storage().MigrationSink
-		myType := ct.Storage().MigrationType()
-
-		respHeader = migration.MigrationHeader{
-			Fs:            &myType,
-			Snapshots:     offerHeader.Snapshots,
-			SnapshotNames: offerHeader.SnapshotNames,
-			Refresh:       &c.refresh,
 		}
 
-		// Return those rsync features we know about (with the value sent by the remote).
-		if offerHeader.RsyncFeatures != nil {
-			respHeader.RsyncFeatures = &migration.RsyncFeatures{
-				Xattrs:        offerHeader.RsyncFeatures.Xattrs,
-				Delete:        offerHeader.RsyncFeatures.Delete,
-				Compress:      offerHeader.RsyncFeatures.Compress,
-				Bidirectional: offerHeader.RsyncFeatures.Bidirectional,
-			}
-		}
-
-		// If refresh mode or the storage type the source has doesn't match what we have,
-		// then we have to use rsync.
-		if c.refresh || *offerHeader.Fs != *respHeader.Fs {
-			myTarget = rsyncMigrationSink
-			myType = migration.MigrationFSType_RSYNC
-		}
-	} else {
-		return fmt.Errorf("Instance type not supported")
+		return pool.CreateInstanceFromMigration(args.Instance, &shared.WebsocketIO{Conn: conn}, volTargetArgs, op)
 	}
 
 	// Add CRIU info to response.

From 9263ac4f6957e722149177e90f84baa7fe9af56f Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Mon, 24 Feb 2020 13:50:35 +0000
Subject: [PATCH 27/36] lxd/migrate/storage/volumes: Removes old storage loader

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/migrate_storage_volumes.go | 156 ++++++++++-----------------------
 1 file changed, 45 insertions(+), 111 deletions(-)

diff --git a/lxd/migrate_storage_volumes.go b/lxd/migrate_storage_volumes.go
index ad2c5d15e1..5bc36dd1e7 100644
--- a/lxd/migrate_storage_volumes.go
+++ b/lxd/migrate_storage_volumes.go
@@ -45,52 +45,22 @@ func (s *migrationSourceWs) DoStorage(state *state.State, poolName string, volNa
 
 	// Check if sending storage pool supports new storage layer.
 	pool, err := storagePools.GetPoolByName(state, poolName)
-	if err != storageDrivers.ErrUnknownDriver {
-		if err != nil {
-			return err
-		}
-
-		// The refresh argument passed to MigrationTypes() is always set
-		// to false here. The migration source/sender doesn't need to care whether
-		// or not it's doing a refresh as the migration sink/receiver will know
-		// this, and adjust the migration types accordingly.
-		poolMigrationTypes = pool.MigrationTypes(storageDrivers.ContentTypeFS, false)
-		if len(poolMigrationTypes) < 0 {
-			return fmt.Errorf("No source migration types available")
-		}
-
-		// Convert the pool's migration type options to an offer header to target.
-		offerHeader = migration.TypesToHeader(poolMigrationTypes...)
-	} else {
-		storage, err := storagePoolVolumeInit(state, "default", poolName, volName, storagePoolVolumeTypeCustom)
-		if err != nil {
-			return err
-		}
-		s.storage = storage
-		myType := s.storage.MigrationType()
-		hasFeature := true
-		offerHeader = migration.MigrationHeader{
-			Fs: &myType,
-			RsyncFeatures: &migration.RsyncFeatures{
-				Xattrs:        &hasFeature,
-				Delete:        &hasFeature,
-				Compress:      &hasFeature,
-				Bidirectional: &hasFeature,
-			},
-		}
+	if err != nil {
+		return err
+	}
 
-		// Storage needs to start unconditionally now, since we need to initialize a new
-		// storage interface.
-		ourMount, err := s.storage.StoragePoolVolumeMount()
-		if err != nil {
-			logger.Errorf("Failed to mount storage volume")
-			return err
-		}
-		if ourMount {
-			defer s.storage.StoragePoolVolumeUmount()
-		}
+	// The refresh argument passed to MigrationTypes() is always set
+	// to false here. The migration source/sender doesn't need to care whether
+	// or not it's doing a refresh as the migration sink/receiver will know
+	// this, and adjust the migration types accordingly.
+	poolMigrationTypes = pool.MigrationTypes(storageDrivers.ContentTypeFS, false)
+	if len(poolMigrationTypes) < 0 {
+		return fmt.Errorf("No source migration types available")
 	}
 
+	// Convert the pool's migration type options to an offer header to target.
+	offerHeader = migration.TypesToHeader(poolMigrationTypes...)
+
 	snapshots := []*migration.Snapshot{}
 	snapshotNames := []string{}
 
@@ -332,80 +302,44 @@ func (c *migrationSink) DoStorage(state *state.State, poolName string, req *api.
 
 	// Check if we can load new storage layer for pool driver type.
 	pool, err := storagePools.GetPoolByName(state, poolName)
-	if err != storageDrivers.ErrUnknownDriver {
-		if err != nil {
-			return err
-		}
-
-		// Extract the source's migration type and then match it against our pool's
-		// supported types and features. If a match is found the combined features list
-		// will be sent back to requester.
-		respTypes, err := migration.MatchTypes(offerHeader, migration.MigrationFSType_RSYNC, pool.MigrationTypes(storageDrivers.ContentTypeFS, c.refresh))
-		if err != nil {
-			return err
-		}
-
-		// Convert response type to response header and copy snapshot info into it.
-		respHeader = migration.TypesToHeader(respTypes...)
-		respHeader.SnapshotNames = offerHeader.SnapshotNames
-		respHeader.Snapshots = offerHeader.Snapshots
-
-		// Translate the legacy MigrationSinkArgs to a VolumeTargetArgs suitable for use
-		// with the new storage layer.
-		myTarget = func(conn *websocket.Conn, op *operations.Operation, args MigrationSinkArgs) error {
-			volTargetArgs := migration.VolumeTargetArgs{
-				Name:          req.Name,
-				Config:        req.Config,
-				Description:   req.Description,
-				MigrationType: respTypes[0],
-				TrackProgress: true,
-			}
+	if err != nil {
+		return err
+	}
 
-			// A zero length Snapshots slice indicates volume only migration in
-			// VolumeTargetArgs. So if VoluneOnly was requested, do not populate them.
-			if !args.VolumeOnly {
-				volTargetArgs.Snapshots = make([]string, 0, len(args.Snapshots))
-				for _, snap := range args.Snapshots {
-					volTargetArgs.Snapshots = append(volTargetArgs.Snapshots, *snap.Name)
-				}
-			}
+	// Extract the source's migration type and then match it against our pool's
+	// supported types and features. If a match is found the combined features list
+	// will be sent back to requester.
+	respTypes, err := migration.MatchTypes(offerHeader, migration.MigrationFSType_RSYNC, pool.MigrationTypes(storageDrivers.ContentTypeFS, c.refresh))
+	if err != nil {
+		return err
+	}
 
-			return pool.CreateCustomVolumeFromMigration(&shared.WebsocketIO{Conn: conn}, volTargetArgs, op)
-		}
-	} else {
-		// Setup legacy storage migration sink if destination pool isn't supported yet by
-		// new storage layer.
-		storage, err := storagePoolVolumeDBCreateInternal(state, poolName, req)
-		if err != nil {
-			return err
+	// Convert response type to response header and copy snapshot info into it.
+	respHeader = migration.TypesToHeader(respTypes...)
+	respHeader.SnapshotNames = offerHeader.SnapshotNames
+	respHeader.Snapshots = offerHeader.Snapshots
+
+	// Translate the legacy MigrationSinkArgs to a VolumeTargetArgs suitable for use
+	// with the new storage layer.
+	myTarget = func(conn *websocket.Conn, op *operations.Operation, args MigrationSinkArgs) error {
+		volTargetArgs := migration.VolumeTargetArgs{
+			Name:          req.Name,
+			Config:        req.Config,
+			Description:   req.Description,
+			MigrationType: respTypes[0],
+			TrackProgress: true,
 		}
 
-		// Link the storage variable into the migrationSink (like NewStorageMigrationSink
-		// would have done originally).
-		c.src.storage = storage
-		c.dest.storage = storage
-		myTarget = c.src.storage.StorageMigrationSink
-		myType := c.src.storage.MigrationType()
-
-		hasFeature := true
-		respHeader = migration.MigrationHeader{
-			Fs:            &myType,
-			Snapshots:     offerHeader.Snapshots,
-			SnapshotNames: offerHeader.SnapshotNames,
-			RsyncFeatures: &migration.RsyncFeatures{
-				Xattrs:        &hasFeature,
-				Delete:        &hasFeature,
-				Compress:      &hasFeature,
-				Bidirectional: &hasFeature,
-			},
+		// A zero length Snapshots slice indicates volume only migration in
+		// VolumeTargetArgs. So if VoluneOnly was requested, do not populate them.
+		if !args.VolumeOnly {
+			volTargetArgs.Snapshots = make([]string, 0, len(args.Snapshots))
+			for _, snap := range args.Snapshots {
+				volTargetArgs.Snapshots = append(volTargetArgs.Snapshots, *snap.Name)
+			}
 		}
 
-		// If the storage type the source has doesn't match what we have, then we have to
-		// use rsync.
-		if *offerHeader.Fs != *respHeader.Fs {
-			myTarget = rsyncStorageMigrationSink
-			myType = migration.MigrationFSType_RSYNC
-		}
+		return pool.CreateCustomVolumeFromMigration(&shared.WebsocketIO{Conn: conn}, volTargetArgs, op)
 	}
 
 	err = sender(&respHeader)

From 4b604e81306c1ec7df4006cd7126b4b7b8db7599 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Mon, 24 Feb 2020 13:52:08 +0000
Subject: [PATCH 28/36] lxd/resources: Removes old storage loader

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/resources.go | 30 ++++++------------------------
 1 file changed, 6 insertions(+), 24 deletions(-)

diff --git a/lxd/resources.go b/lxd/resources.go
index 88852a9a13..3306655b3e 100644
--- a/lxd/resources.go
+++ b/lxd/resources.go
@@ -8,7 +8,6 @@ import (
 	"github.com/lxc/lxd/lxd/resources"
 	"github.com/lxc/lxd/lxd/response"
 	storagePools "github.com/lxc/lxd/lxd/storage"
-	storageDrivers "github.com/lxc/lxd/lxd/storage/drivers"
 	"github.com/lxc/lxd/shared/api"
 )
 
@@ -57,30 +56,13 @@ func storagePoolResourcesGet(d *Daemon, r *http.Request) response.Response {
 
 	// Check if we can load new storage layer for pool driver type.
 	pool, err := storagePools.GetPoolByName(d.State(), poolName)
-	if err != storageDrivers.ErrUnknownDriver {
-		if err != nil {
-			return response.InternalError(err)
-		}
-
-		res, err = pool.GetResources()
-		if err != nil {
-			return response.InternalError(err)
-		}
-	} else { // Fallback to old storage layer.
-		s, err := storagePoolInit(d.State(), poolName)
-		if err != nil {
-			return response.InternalError(err)
-		}
-
-		err = s.StoragePoolCheck()
-		if err != nil {
-			return response.InternalError(err)
-		}
+	if err != nil {
+		return response.InternalError(err)
+	}
 
-		res, err = s.StoragePoolResources()
-		if err != nil {
-			return response.InternalError(err)
-		}
+	res, err = pool.GetResources()
+	if err != nil {
+		return response.InternalError(err)
 	}
 
 	return response.SyncResponse(true, res)

From a59b24f62bfb9c247b419215c130440958f41375 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Mon, 24 Feb 2020 13:54:42 +0000
Subject: [PATCH 29/36] lxd/storage/pools/utils: Removes old storage loader

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/storage_pools_utils.go | 136 +++++--------------------------------
 1 file changed, 17 insertions(+), 119 deletions(-)

diff --git a/lxd/storage_pools_utils.go b/lxd/storage_pools_utils.go
index 500ac48b20..c8721f0c16 100644
--- a/lxd/storage_pools_utils.go
+++ b/lxd/storage_pools_utils.go
@@ -2,92 +2,23 @@ package main
 
 import (
 	"fmt"
-	"os"
 	"strings"
 
 	"github.com/lxc/lxd/lxd/db"
 	"github.com/lxc/lxd/lxd/state"
 	storagePools "github.com/lxc/lxd/lxd/storage"
-	storageDrivers "github.com/lxc/lxd/lxd/storage/drivers"
 	"github.com/lxc/lxd/shared"
 	"github.com/lxc/lxd/shared/api"
 	"github.com/lxc/lxd/shared/version"
 )
 
 func storagePoolUpdate(state *state.State, name, newDescription string, newConfig map[string]string, withDB bool) error {
-	// Handle the new logic
 	pool, err := storagePools.GetPoolByName(state, name)
-	if err != storageDrivers.ErrUnknownDriver {
-		if err != nil {
-			return err
-		}
-
-		return pool.Update(!withDB, newDescription, newConfig, nil)
-	}
-
-	// Old logic
-	s, err := storagePoolInit(state, name)
-	if err != nil {
-		return err
-	}
-
-	oldWritable := s.GetStoragePoolWritable()
-	newWritable := oldWritable
-
-	// Backup the current state
-	oldDescription := oldWritable.Description
-	oldConfig := map[string]string{}
-	err = shared.DeepCopy(&oldWritable.Config, &oldConfig)
 	if err != nil {
 		return err
 	}
 
-	// Define a function which reverts everything.  Defer this function
-	// so that it doesn't need to be explicitly called in every failing
-	// return path. Track whether or not we want to undo the changes
-	// using a closure.
-	undoChanges := true
-	defer func() {
-		if undoChanges {
-			s.SetStoragePoolWritable(&oldWritable)
-		}
-	}()
-
-	changedConfig, userOnly := storagePools.ConfigDiff(oldConfig, newConfig)
-	// Apply config changes if there are any
-	if len(changedConfig) != 0 {
-		newWritable.Description = newDescription
-		newWritable.Config = newConfig
-
-		// Update the storage pool
-		if !userOnly {
-			if shared.StringInSlice("driver", changedConfig) {
-				return fmt.Errorf("the \"driver\" property of a storage pool cannot be changed")
-			}
-
-			err = s.StoragePoolUpdate(&newWritable, changedConfig)
-			if err != nil {
-				return err
-			}
-		}
-
-		// Apply the new configuration
-		s.SetStoragePoolWritable(&newWritable)
-	}
-
-	// Update the database if something changed and the withDB flag is true
-	// (i.e. this is not a clustering notification.
-	if withDB && (len(changedConfig) != 0 || newDescription != oldDescription) {
-		err = state.Cluster.StoragePoolUpdate(name, newDescription, newConfig)
-		if err != nil {
-			return err
-		}
-	}
-
-	// Success, update the closure to mark that the changes should be kept.
-	undoChanges = false
-
-	return nil
+	return pool.Update(!withDB, newDescription, newConfig, nil)
 }
 
 // Report all LXD objects that are currently using the given storage pool.
@@ -265,61 +196,28 @@ func storagePoolCreateLocal(state *state.State, id int64, req api.StoragePoolsPo
 	var updatedReq api.StoragePoolsPost
 	shared.DeepCopy(&req, &updatedReq)
 
-	// Attempt to create using the new storage pool logic.
 	pool, err := storagePools.CreatePool(state, id, &updatedReq, isNotification, nil)
-	if err != storageDrivers.ErrUnknownDriver {
-		if err != nil {
-			return nil, err
-		}
-
-		// Mount the pool
-		_, err = pool.Mount()
-		if err != nil {
-			return nil, err
-		}
-
-		// Record the updated config.
-		updatedConfig = updatedReq.Config
-
-		// Setup revert function.
-		defer func() {
-			if !tryUndo {
-				return
-			}
+	if err != nil {
+		return nil, err
+	}
 
-			pool.Delete(isNotification, nil)
-		}()
-	} else {
-		// Load the old storage struct
-		s, err := storagePoolInit(state, req.Name)
-		if err != nil {
-			return nil, err
-		}
+	// Mount the pool.
+	_, err = pool.Mount()
+	if err != nil {
+		return nil, err
+	}
 
-		// If this is a clustering notification for a ceph storage, we don't
-		// want this node to actually create the pool, as it's already been
-		// done by the node that triggered this notification. We just need to
-		// create the storage pool directory.
-		if s, ok := s.(*storageCeph); ok && isNotification {
-			volumeMntPoint := storagePools.GetStoragePoolVolumeMountPoint(s.pool.Name, s.volume.Name)
-			return nil, os.MkdirAll(volumeMntPoint, 0711)
-		}
+	// Record the updated config.
+	updatedConfig = updatedReq.Config
 
-		// Create the pool
-		err = s.StoragePoolCreate()
-		if err != nil {
-			return nil, err
+	// Setup revert function.
+	defer func() {
+		if !tryUndo {
+			return
 		}
 
-		updatedConfig = s.GetStoragePoolWritable().Config
-
-		defer func() {
-			if !tryUndo {
-				return
-			}
-			s.StoragePoolDelete()
-		}()
-	}
+		pool.Delete(isNotification, nil)
+	}()
 
 	// In case the storage pool config was changed during the pool creation,
 	// we need to update the database to reflect this change. This can e.g.

From b6aea8c1b75b34716c47473e7655683b71fe8e5f Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Mon, 24 Feb 2020 13:56:13 +0000
Subject: [PATCH 30/36] lxd/storage/pools: Removes old storage loader

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/storage_pools.go | 60 +++++++-------------------------------------
 1 file changed, 9 insertions(+), 51 deletions(-)

diff --git a/lxd/storage_pools.go b/lxd/storage_pools.go
index e0ed97f00c..327e8ca2de 100644
--- a/lxd/storage_pools.go
+++ b/lxd/storage_pools.go
@@ -4,7 +4,6 @@ import (
 	"encoding/json"
 	"fmt"
 	"net/http"
-	"os"
 	"strings"
 	"sync"
 
@@ -15,7 +14,6 @@ import (
 	"github.com/lxc/lxd/lxd/db"
 	"github.com/lxc/lxd/lxd/response"
 	storagePools "github.com/lxc/lxd/lxd/storage"
-	storageDrivers "github.com/lxc/lxd/lxd/storage/drivers"
 	"github.com/lxc/lxd/lxd/util"
 	"github.com/lxc/lxd/shared"
 	"github.com/lxc/lxd/shared/api"
@@ -568,52 +566,12 @@ func storagePoolDelete(d *Daemon, r *http.Request) response.Response {
 	}
 
 	pool, err := storagePools.GetPoolByName(d.State(), poolName)
-	if err != storageDrivers.ErrUnknownDriver {
-		if err != nil {
-			return response.InternalError(err)
-		}
-
-		// Only delete images if locally stored or running on initial member.
-		if !isClusterNotification(r) || !pool.Driver().Info().Remote {
-			for _, volume := range volumeNames {
-				_, imgInfo, err := d.cluster.ImageGet(projectParam(r), volume, false, false)
-				if err != nil {
-					return response.InternalError(err)
-				}
-
-				err = doDeleteImageFromPool(d.State(), imgInfo.Fingerprint, poolName)
-				if err != nil {
-					return response.InternalError(err)
-				}
-			}
-		}
-
-		err = pool.Delete(isClusterNotification(r), nil)
-		if err != nil {
-			return response.InternalError(err)
-		}
-	} else {
-		s, err := storagePoolInit(d.State(), poolName)
-		if err != nil {
-			return response.InternalError(err)
-		}
-
-		// If this is a notification for a ceph pool deletion, we don't want to
-		// actually delete the pool, since that will be done by the node that
-		// notified us. We just need to delete the local mountpoint.
-		if s, ok := s.(*storageCeph); ok && isClusterNotification(r) {
-			// Delete the mountpoint for the storage pool.
-			poolMntPoint := storagePools.GetStoragePoolMountPoint(s.pool.Name)
-			if shared.PathExists(poolMntPoint) {
-				err := os.RemoveAll(poolMntPoint)
-				if err != nil {
-					return response.SmartError(err)
-				}
-			}
-
-			return response.EmptySyncResponse
-		}
+	if err != nil {
+		return response.InternalError(err)
+	}
 
+	// Only delete images if locally stored or running on initial member.
+	if !isClusterNotification(r) || !pool.Driver().Info().Remote {
 		for _, volume := range volumeNames {
 			_, imgInfo, err := d.cluster.ImageGet(projectParam(r), volume, false, false)
 			if err != nil {
@@ -625,11 +583,11 @@ func storagePoolDelete(d *Daemon, r *http.Request) response.Response {
 				return response.InternalError(err)
 			}
 		}
+	}
 
-		err = s.StoragePoolDelete()
-		if err != nil {
-			return response.InternalError(err)
-		}
+	err = pool.Delete(isClusterNotification(r), nil)
+	if err != nil {
+		return response.InternalError(err)
 	}
 
 	// If this is a cluster notification, we're done, any database work

From 6d1e17863bfb21f24f488dc72b6fac878753a046 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Mon, 24 Feb 2020 13:58:35 +0000
Subject: [PATCH 31/36] lxd/storage/volumes/snapshot: Removes old storage
 loader

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/storage_volumes_snapshot.go | 113 ++++----------------------------
 1 file changed, 13 insertions(+), 100 deletions(-)

diff --git a/lxd/storage_volumes_snapshot.go b/lxd/storage_volumes_snapshot.go
index 7ba418592e..fc40abb748 100644
--- a/lxd/storage_volumes_snapshot.go
+++ b/lxd/storage_volumes_snapshot.go
@@ -12,7 +12,6 @@ import (
 	"github.com/lxc/lxd/lxd/operations"
 	"github.com/lxc/lxd/lxd/response"
 	storagePools "github.com/lxc/lxd/lxd/storage"
-	storageDrivers "github.com/lxc/lxd/lxd/storage/drivers"
 	"github.com/lxc/lxd/lxd/util"
 	"github.com/lxc/lxd/shared"
 	"github.com/lxc/lxd/shared/api"
@@ -112,57 +111,12 @@ func storagePoolVolumeSnapshotsTypePost(d *Daemon, r *http.Request) response.Res
 	}
 
 	snapshot := func(op *operations.Operation) error {
-		// Check if we can load new storage layer for pool driver type.
 		pool, err := storagePools.GetPoolByName(d.State(), poolName)
-		if err != storageDrivers.ErrUnknownDriver {
-			if err != nil {
-				return err
-			}
-
-			err = pool.CreateCustomVolumeSnapshot(volumeName, req.Name, op)
-			if err != nil {
-				return err
-			}
-		} else {
-			// Ensure that the storage volume exists.
-			storage, err := storagePoolVolumeInit(d.State(), "default", poolName, volumeName, volumeType)
-			if err != nil {
-				return err
-			}
-
-			// Start the storage.
-			ourMount, err := storage.StoragePoolVolumeMount()
-			if err != nil {
-				return err
-			}
-			if ourMount {
-				defer storage.StoragePoolVolumeUmount()
-			}
-
-			volWritable := storage.GetStoragePoolVolumeWritable()
-			fullSnapName := fmt.Sprintf("%s%s%s", volumeName, shared.SnapshotDelimiter, req.Name)
-			req.Name = fullSnapName
-			dbArgs := &db.StorageVolumeArgs{
-				Name:        fullSnapName,
-				PoolName:    poolName,
-				TypeName:    volumeTypeName,
-				Snapshot:    true,
-				Config:      volWritable.Config,
-				Description: volWritable.Description,
-			}
-
-			err = storage.StoragePoolVolumeSnapshotCreate(&req)
-			if err != nil {
-				return err
-			}
-
-			_, err = storagePoolVolumeSnapshotDBCreateInternal(d.State(), dbArgs)
-			if err != nil {
-				return err
-			}
+		if err != nil {
+			return err
 		}
 
-		return nil
+		return pool.CreateCustomVolumeSnapshot(volumeName, req.Name, op)
 	}
 
 	resources := map[string][]string{}
@@ -310,25 +264,12 @@ func storagePoolVolumeSnapshotTypePost(d *Daemon, r *http.Request) response.Resp
 	}
 
 	snapshotRename := func(op *operations.Operation) error {
-		// Check if we can load new storage layer for pool driver type.
 		pool, err := storagePools.GetPoolByName(d.State(), poolName)
-		if err != storageDrivers.ErrUnknownDriver {
-			if err != nil {
-				return err
-			}
-
-			err = pool.RenameCustomVolumeSnapshot(fullSnapshotName, req.Name, op)
-		} else {
-			var s storage
-			s, err = storagePoolVolumeInit(d.State(), "default", poolName, fullSnapshotName, volumeType)
-			if err != nil {
-				return err
-			}
-
-			err = s.StoragePoolVolumeSnapshotRename(req.Name)
+		if err != nil {
+			return err
 		}
 
-		return err
+		return pool.RenameCustomVolumeSnapshot(fullSnapshotName, req.Name, op)
 	}
 
 	resources := map[string][]string{}
@@ -459,29 +400,13 @@ func storagePoolVolumeSnapshotTypePut(d *Daemon, r *http.Request) response.Respo
 	}
 
 	do := func(op *operations.Operation) error {
-		// Check if we can load new storage layer for pool driver type.
 		pool, err := storagePools.GetPoolByName(d.State(), poolName)
-		if err != storageDrivers.ErrUnknownDriver {
-			if err != nil {
-				return err
-			}
-
-			// Handle custom volume update requests.
-			err = pool.UpdateCustomVolumeSnapshot(vol.Name, req.Description, nil, op)
-			if err != nil {
-				return err
-			}
-		} else {
-			// Update the database if description changed. Use current config.
-			if req.Description != vol.Description {
-				err = d.cluster.StoragePoolVolumeUpdateByProject("default", vol.Name, volumeType, poolID, req.Description, vol.Config)
-				if err != nil {
-					return err
-				}
-			}
+		if err != nil {
+			return err
 		}
 
-		return nil
+		// Handle custom volume update requests.
+		return pool.UpdateCustomVolumeSnapshot(vol.Name, req.Description, nil, op)
 	}
 
 	resources := map[string][]string{}
@@ -539,23 +464,11 @@ func storagePoolVolumeSnapshotTypeDelete(d *Daemon, r *http.Request) response.Re
 	snapshotDelete := func(op *operations.Operation) error {
 		// Check if we can load new storage layer for pool driver type.
 		pool, err := storagePools.GetPoolByName(d.State(), poolName)
-		if err != storageDrivers.ErrUnknownDriver {
-			if err != nil {
-				return err
-			}
-
-			err = pool.DeleteCustomVolumeSnapshot(fullSnapshotName, op)
-		} else {
-			var s storage
-			s, err = storagePoolVolumeInit(d.State(), "default", poolName, fullSnapshotName, volumeType)
-			if err != nil {
-				return err
-			}
-
-			err = s.StoragePoolVolumeSnapshotDelete()
+		if err != nil {
+			return err
 		}
 
-		return err
+		return pool.DeleteCustomVolumeSnapshot(fullSnapshotName, op)
 	}
 
 	resources := map[string][]string{}

From afb027fac5d6e4a1042152fb2f47020a7c45440f Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Mon, 24 Feb 2020 14:08:41 +0000
Subject: [PATCH 32/36] lxd/storage/volumes: Removes old storage loader

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/storage_volumes.go | 374 ++++++++++-------------------------------
 1 file changed, 92 insertions(+), 282 deletions(-)

diff --git a/lxd/storage_volumes.go b/lxd/storage_volumes.go
index 6748d3f2b7..6cd6bcf2fc 100644
--- a/lxd/storage_volumes.go
+++ b/lxd/storage_volumes.go
@@ -17,7 +17,6 @@ import (
 	"github.com/lxc/lxd/lxd/response"
 	"github.com/lxc/lxd/lxd/state"
 	storagePools "github.com/lxc/lxd/lxd/storage"
-	storageDrivers "github.com/lxc/lxd/lxd/storage/drivers"
 	"github.com/lxc/lxd/lxd/util"
 	"github.com/lxc/lxd/shared"
 	"github.com/lxc/lxd/shared/api"
@@ -311,25 +310,17 @@ func storagePoolVolumesTypePost(d *Daemon, r *http.Request) response.Response {
 func doVolumeCreateOrCopy(d *Daemon, poolName string, req *api.StorageVolumesPost) response.Response {
 	var run func(op *operations.Operation) error
 
-	// Check if we can load new storage layer for both target and source pool driver types.
 	pool, err := storagePools.GetPoolByName(d.State(), poolName)
-	_, srcPoolErr := storagePools.GetPoolByName(d.State(), req.Source.Pool)
-	if err != storageDrivers.ErrUnknownDriver && srcPoolErr != storageDrivers.ErrUnknownDriver {
-		if err != nil {
-			return response.SmartError(err)
-		}
-
-		run = func(op *operations.Operation) error {
-			if req.Source.Name == "" {
-				return pool.CreateCustomVolume(req.Name, req.Description, req.Config, op)
-			}
+	if err != nil {
+		return response.SmartError(err)
+	}
 
-			return pool.CreateCustomVolumeFromCopy(req.Name, req.Description, req.Config, req.Source.Pool, req.Source.Name, req.Source.VolumeOnly, op)
-		}
-	} else {
-		run = func(op *operations.Operation) error {
-			return storagePoolVolumeCreateInternal(d.State(), poolName, req)
+	run = func(op *operations.Operation) error {
+		if req.Source.Name == "" {
+			return pool.CreateCustomVolume(req.Name, req.Description, req.Config, op)
 		}
+
+		return pool.CreateCustomVolumeFromCopy(req.Name, req.Description, req.Config, req.Source.Pool, req.Source.Name, req.Source.VolumeOnly, op)
 	}
 
 	// If no source name supplied then this a volume create operation.
@@ -661,31 +652,16 @@ func storagePoolVolumeTypePostRename(d *Daemon, poolName string, volumeName stri
 		return response.SmartError(err)
 	}
 
-	// Check if we can load new storage layer for pool driver type.
 	pool, err := storagePools.GetPoolByName(d.State(), poolName)
-	if err != storageDrivers.ErrUnknownDriver {
-		if err != nil {
-			return response.SmartError(err)
-		}
-
-		err = pool.RenameCustomVolume(volumeName, req.Name, nil)
-		if err != nil {
-			// Notify users of the volume that it's name is changing back.
-			storagePoolVolumeUpdateUsers(d, req.Pool, req.Name, poolName, volumeName)
-			return response.SmartError(err)
-		}
-	} else {
-		s, err := storagePoolVolumeInit(d.State(), "default", poolName, volumeName, volumeType)
-		if err != nil {
-			return response.InternalError(err)
-		}
+	if err != nil {
+		return response.SmartError(err)
+	}
 
-		err = s.StoragePoolVolumeRename(req.Name)
-		if err != nil {
-			// Notify users of the volume that it's name is changing back.
-			storagePoolVolumeUpdateUsers(d, req.Pool, req.Name, poolName, volumeName)
-			return response.SmartError(err)
-		}
+	err = pool.RenameCustomVolume(volumeName, req.Name, nil)
+	if err != nil {
+		// Notify users of the volume that it's name is changing back.
+		storagePoolVolumeUpdateUsers(d, req.Pool, req.Name, poolName, volumeName)
+		return response.SmartError(err)
 	}
 
 	return response.SyncResponseLocation(true, nil, fmt.Sprintf("/%s/storage-pools/%s/volumes/%s", version.APIVersion, poolName, storagePoolVolumeAPIEndpointCustom))
@@ -695,98 +671,33 @@ func storagePoolVolumeTypePostRename(d *Daemon, poolName string, volumeName stri
 func storagePoolVolumeTypePostMove(d *Daemon, poolName string, volumeName string, volumeType int, req api.StorageVolumePost) response.Response {
 	var run func(op *operations.Operation) error
 
-	// Check if we can load new storage layer for both target and source pool driver types.
-	srcPool, srcPoolErr := storagePools.GetPoolByName(d.State(), poolName)
-	pool, err := storagePools.GetPoolByName(d.State(), req.Pool)
-	if err != storageDrivers.ErrUnknownDriver && srcPoolErr != storageDrivers.ErrUnknownDriver {
-		if err != nil {
-			return response.SmartError(err)
-		}
-
-		run = func(op *operations.Operation) error {
-			// Notify users of the volume that it's name is changing.
-			err := storagePoolVolumeUpdateUsers(d, poolName, volumeName, req.Pool, req.Name)
-			if err != nil {
-				return err
-			}
-
-			// Provide empty description and nil config to instruct
-			// CreateCustomVolumeFromCopy to copy it from source volume.
-			err = pool.CreateCustomVolumeFromCopy(req.Name, "", nil, poolName, volumeName, false, op)
-			if err != nil {
-				// Notify users of the volume that it's name is changing back.
-				storagePoolVolumeUpdateUsers(d, req.Pool, req.Name, poolName, volumeName)
-				return err
-			}
+	srcPool, err := storagePools.GetPoolByName(d.State(), poolName)
+	if err != nil {
+		return response.SmartError(err)
+	}
 
-			return srcPool.DeleteCustomVolume(volumeName, op)
-		}
-	} else {
-		// Convert poolName to poolID.
-		poolID, _, err := d.cluster.StoragePoolGet(poolName)
-		if err != nil {
-			return response.SmartError(err)
-		}
+	pool, err := storagePools.GetPoolByName(d.State(), req.Pool)
+	if err != nil {
+		return response.SmartError(err)
+	}
 
-		// Get the storage volume.
-		_, volume, err := d.cluster.StoragePoolNodeVolumeGetType(volumeName, volumeType, poolID)
+	run = func(op *operations.Operation) error {
+		// Notify users of the volume that it's name is changing.
+		err := storagePoolVolumeUpdateUsers(d, poolName, volumeName, req.Pool, req.Name)
 		if err != nil {
-			return response.SmartError(err)
+			return err
 		}
 
-		// Get storage volume snapshots.
-		snapshots, err := d.cluster.StoragePoolVolumeSnapshotsGetType(volumeName, volumeType, poolID)
+		// Provide empty description and nil config to instruct
+		// CreateCustomVolumeFromCopy to copy it from source volume.
+		err = pool.CreateCustomVolumeFromCopy(req.Name, "", nil, poolName, volumeName, false, op)
 		if err != nil {
-			return response.SmartError(err)
+			// Notify users of the volume that it's name is changing back.
+			storagePoolVolumeUpdateUsers(d, req.Pool, req.Name, poolName, volumeName)
+			return err
 		}
 
-		// This is a move request, so copy the volume and then delete the original.
-		moveReq := api.StorageVolumesPost{}
-		moveReq.Name = req.Name
-		moveReq.Type = "custom"
-		moveReq.Config = volume.Config
-		moveReq.Source.Name = volumeName
-		moveReq.Source.Pool = poolName
-
-		run = func(op *operations.Operation) error {
-			// Notify users of the volume that it's name is changing.
-			err := storagePoolVolumeUpdateUsers(d, poolName, volumeName, req.Pool, req.Name)
-			if err != nil {
-				return err
-			}
-
-			err = storagePoolVolumeCreateInternal(d.State(), req.Pool, &moveReq)
-			if err != nil {
-				// Notify users of the volume that it's name is changing back.
-				storagePoolVolumeUpdateUsers(d, req.Pool, req.Name, poolName, volumeName)
-				return err
-			}
-
-			// Delete snapshot volumes.
-			for _, snapshot := range snapshots {
-				s, err := storagePoolVolumeInit(d.State(), "default", poolName, snapshot.Name, volumeType)
-				if err != nil {
-					return err
-				}
-
-				err = s.StoragePoolVolumeSnapshotDelete()
-				if err != nil {
-					return err
-				}
-			}
-
-			s, err := storagePoolVolumeInit(d.State(), "default", poolName, volumeName, volumeType)
-			if err != nil {
-				return err
-			}
-
-			err = s.StoragePoolVolumeDelete()
-			if err != nil {
-				return err
-			}
-
-			return nil
-		}
+		return srcPool.DeleteCustomVolume(volumeName, op)
 	}
 
 	op, err := operations.OperationCreate(d.State(), "", operations.OperationClassTask, db.OperationVolumeMove, nil, nil, run, nil, nil)
@@ -932,7 +843,7 @@ func storagePoolVolumeTypePut(d *Daemon, r *http.Request, volumeTypeName string)
 		return response.BadRequest(fmt.Errorf("Invalid storage volume type %s", volumeTypeName))
 	}
 
-	poolID, poolRow, err := d.cluster.StoragePoolGet(poolName)
+	pool, err := storagePools.GetPoolByName(d.State(), poolName)
 	if err != nil {
 		return response.SmartError(err)
 	}
@@ -942,13 +853,13 @@ func storagePoolVolumeTypePut(d *Daemon, r *http.Request, volumeTypeName string)
 		return resp
 	}
 
-	resp = ForwardedResponseIfVolumeIsRemote(d, r, poolID, volumeName, volumeType)
+	resp = ForwardedResponseIfVolumeIsRemote(d, r, pool.ID(), volumeName, volumeType)
 	if resp != nil {
 		return resp
 	}
 
 	// Get the existing storage volume.
-	_, vol, err := d.cluster.StoragePoolNodeVolumeGetTypeByProject(project, volumeName, volumeType, poolID)
+	_, vol, err := d.cluster.StoragePoolNodeVolumeGetTypeByProject(project, volumeName, volumeType, pool.ID())
 	if err != nil {
 		return response.SmartError(err)
 	}
@@ -966,89 +877,53 @@ func storagePoolVolumeTypePut(d *Daemon, r *http.Request, volumeTypeName string)
 		return response.BadRequest(err)
 	}
 
-	// Check if we can load new storage layer for pool driver type.
-	pool, err := storagePools.GetPoolByName(d.State(), poolName)
-	if err != storageDrivers.ErrUnknownDriver {
-		if err != nil {
-			return response.SmartError(err)
-		}
-
-		if volumeType == db.StoragePoolVolumeTypeCustom {
-			// Restore custom volume from snapshot if requested. This should occur first
-			// before applying config changes so that changes are applied to the
-			// restored volume.
-			if req.Restore != "" {
-				err = pool.RestoreCustomVolume(vol.Name, req.Restore, nil)
-				if err != nil {
-					return response.SmartError(err)
-				}
-			}
-
-			// Handle custom volume update requests.
-			err = pool.UpdateCustomVolume(vol.Name, req.Description, req.Config, nil)
-			if err != nil {
-				return response.SmartError(err)
-			}
-		} else if volumeType == db.StoragePoolVolumeTypeContainer || volumeType == db.StoragePoolVolumeTypeVM {
-			inst, err := instance.LoadByProjectAndName(d.State(), project, vol.Name)
-			if err != nil {
-				return response.NotFound(err)
-			}
-
-			// There is a bug in the lxc client (lxc/storage_volume.go#L829-L865) which
-			// means that modifying an instance snapshot's description gets routed here
-			// rather than the dedicated snapshot editing route. So need to handle
-			// snapshot volumes here too.
-			if inst.IsSnapshot() {
-				// Handle instance snapshot volume update requests.
-				err = pool.UpdateInstanceSnapshot(inst, req.Description, req.Config, nil)
-				if err != nil {
-					return response.SmartError(err)
-				}
-			} else {
-				// Handle instance volume update requests.
-				err = pool.UpdateInstance(inst, req.Description, req.Config, nil)
-				if err != nil {
-					return response.SmartError(err)
-				}
-			}
-		} else if volumeType == db.StoragePoolVolumeTypeImage {
-			// Handle image update requests.
-			err = pool.UpdateImage(vol.Name, req.Description, req.Config, nil)
+	if volumeType == db.StoragePoolVolumeTypeCustom {
+		// Restore custom volume from snapshot if requested. This should occur first
+		// before applying config changes so that changes are applied to the
+		// restored volume.
+		if req.Restore != "" {
+			err = pool.RestoreCustomVolume(vol.Name, req.Restore, nil)
 			if err != nil {
 				return response.SmartError(err)
 			}
-		} else {
-			return response.SmartError(fmt.Errorf("Invalid volume type"))
 		}
-	} else {
 
-		if req.Restore != "" {
-			ctsUsingVolume, err := storagePoolVolumeUsedByRunningInstancesWithProfilesGet(d.State(), poolName, vol.Name, storagePoolVolumeTypeNameCustom, true)
-			if err != nil {
-				return response.InternalError(err)
-			}
-
-			if len(ctsUsingVolume) != 0 {
-				return response.BadRequest(fmt.Errorf("Cannot restore custom volume used by running containers"))
-			}
+		// Handle custom volume update requests.
+		err = pool.UpdateCustomVolume(vol.Name, req.Description, req.Config, nil)
+		if err != nil {
+			return response.SmartError(err)
+		}
+	} else if volumeType == db.StoragePoolVolumeTypeContainer || volumeType == db.StoragePoolVolumeTypeVM {
+		inst, err := instance.LoadByProjectAndName(d.State(), project, vol.Name)
+		if err != nil {
+			return response.NotFound(err)
+		}
 
-			err = storagePoolVolumeRestore(d.State(), poolName, volumeName, volumeType, req.Restore)
+		// There is a bug in the lxc client (lxc/storage_volume.go#L829-L865) which
+		// means that modifying an instance snapshot's description gets routed here
+		// rather than the dedicated snapshot editing route. So need to handle
+		// snapshot volumes here too.
+		if inst.IsSnapshot() {
+			// Handle instance snapshot volume update requests.
+			err = pool.UpdateInstanceSnapshot(inst, req.Description, req.Config, nil)
 			if err != nil {
 				return response.SmartError(err)
 			}
 		} else {
-			// Validate the configuration
-			err = storagePools.VolumeValidateConfig(d.State(), volumeName, req.Config, poolRow)
-			if err != nil {
-				return response.BadRequest(err)
-			}
-
-			err = storagePoolVolumeUpdate(d.State(), poolName, volumeName, volumeType, req.Description, req.Config)
+			// Handle instance volume update requests.
+			err = pool.UpdateInstance(inst, req.Description, req.Config, nil)
 			if err != nil {
 				return response.SmartError(err)
 			}
 		}
+	} else if volumeType == db.StoragePoolVolumeTypeImage {
+		// Handle image update requests.
+		err = pool.UpdateImage(vol.Name, req.Description, req.Config, nil)
+		if err != nil {
+			return response.SmartError(err)
+		}
+	} else {
+		return response.SmartError(fmt.Errorf("Invalid volume type"))
 	}
 
 	return response.EmptySyncResponse
@@ -1093,8 +968,7 @@ func storagePoolVolumeTypePatch(d *Daemon, r *http.Request, volumeTypeName strin
 		return response.BadRequest(fmt.Errorf("Invalid storage volume type %s", volumeTypeName))
 	}
 
-	// Get the ID of the storage pool the storage volume is supposed to be attached to.
-	poolID, poolRow, err := d.cluster.StoragePoolGet(poolName)
+	pool, err := storagePools.GetPoolByName(d.State(), poolName)
 	if err != nil {
 		return response.SmartError(err)
 	}
@@ -1104,13 +978,13 @@ func storagePoolVolumeTypePatch(d *Daemon, r *http.Request, volumeTypeName strin
 		return resp
 	}
 
-	resp = ForwardedResponseIfVolumeIsRemote(d, r, poolID, volumeName, volumeType)
+	resp = ForwardedResponseIfVolumeIsRemote(d, r, pool.ID(), volumeName, volumeType)
 	if resp != nil {
 		return resp
 	}
 
 	// Get the existing storage volume.
-	_, vol, err := d.cluster.StoragePoolNodeVolumeGetType(volumeName, volumeType, poolID)
+	_, vol, err := d.cluster.StoragePoolNodeVolumeGetType(volumeName, volumeType, pool.ID())
 	if err != nil {
 		return response.SmartError(err)
 	}
@@ -1140,28 +1014,9 @@ func storagePoolVolumeTypePatch(d *Daemon, r *http.Request, volumeTypeName strin
 		}
 	}
 
-	// Check if we can load new storage layer for pool driver type.
-	pool, err := storagePools.GetPoolByName(d.State(), poolName)
-	if err != storageDrivers.ErrUnknownDriver {
-		if err != nil {
-			return response.SmartError(err)
-		}
-
-		err = pool.UpdateCustomVolume(vol.Name, req.Description, req.Config, nil)
-		if err != nil {
-			return response.SmartError(err)
-		}
-	} else {
-		// Validate the configuration.
-		err = storagePools.VolumeValidateConfig(d.State(), volumeName, req.Config, poolRow)
-		if err != nil {
-			return response.BadRequest(err)
-		}
-
-		err = storagePoolVolumeUpdate(d.State(), poolName, volumeName, volumeType, req.Description, req.Config)
-		if err != nil {
-			return response.SmartError(err)
-		}
+	err = pool.UpdateCustomVolume(vol.Name, req.Description, req.Config, nil)
+	if err != nil {
+		return response.SmartError(err)
 	}
 
 	return response.EmptySyncResponse
@@ -1229,7 +1084,7 @@ func storagePoolVolumeTypeDelete(d *Daemon, r *http.Request, volumeTypeName stri
 	case storagePoolVolumeTypeImage:
 		// allowed
 	default:
-		return response.BadRequest(fmt.Errorf("storage volumes of type \"%s\" cannot be deleted with the storage api", volumeTypeName))
+		return response.BadRequest(fmt.Errorf("Storage volumes of type %q cannot be deleted with the storage API", volumeTypeName))
 	}
 
 	volumeUsedBy, err := storagePoolVolumeUsedByGet(d.State(), project, poolName, volumeName, volumeTypeName)
@@ -1238,71 +1093,26 @@ func storagePoolVolumeTypeDelete(d *Daemon, r *http.Request, volumeTypeName stri
 	}
 
 	if len(volumeUsedBy) > 0 {
-		if len(volumeUsedBy) != 1 ||
-			volumeType != storagePoolVolumeTypeImage ||
-			volumeUsedBy[0] != fmt.Sprintf(
-				"/%s/images/%s",
-				version.APIVersion,
-				volumeName) {
+		if len(volumeUsedBy) != 1 || volumeType != storagePoolVolumeTypeImage || volumeUsedBy[0] != fmt.Sprintf("/%s/images/%s", version.APIVersion, volumeName) {
 			return response.BadRequest(fmt.Errorf("The storage volume is still in use"))
 		}
 	}
 
-	// Check if we can load new storage layer for pool driver type.
 	pool, err := storagePools.GetPoolByName(d.State(), poolName)
-	if err != storageDrivers.ErrUnknownDriver {
-		if err != nil {
-			return response.SmartError(err)
-		}
-
-		switch volumeType {
-		case storagePoolVolumeTypeCustom:
-			err = pool.DeleteCustomVolume(volumeName, nil)
-		case storagePoolVolumeTypeImage:
-			err = pool.DeleteImage(volumeName, nil)
-		default:
-			return response.BadRequest(fmt.Errorf(`Storage volumes of type "%s" cannot be deleted with the storage api`, volumeTypeName))
-		}
-		if err != nil {
-			return response.SmartError(err)
-		}
-	} else {
-		s, err := storagePoolVolumeInit(d.State(), project, poolName, volumeName, volumeType)
-		if err != nil {
-			return response.NotFound(err)
-		}
-
-		switch volumeType {
-		case storagePoolVolumeTypeCustom:
-			var snapshots []db.StorageVolumeArgs
-
-			// Delete storage volume snapshots
-			snapshots, err = d.cluster.StoragePoolVolumeSnapshotsGetType(volumeName, volumeType, poolID)
-			if err != nil {
-				return response.SmartError(err)
-			}
-
-			for _, snapshot := range snapshots {
-				s, err := storagePoolVolumeInit(d.State(), project, poolName, snapshot.Name, volumeType)
-				if err != nil {
-					return response.NotFound(err)
-				}
-
-				err = s.StoragePoolVolumeSnapshotDelete()
-				if err != nil {
-					return response.SmartError(err)
-				}
-			}
+	if err != nil {
+		return response.SmartError(err)
+	}
 
-			err = s.StoragePoolVolumeDelete()
-		case storagePoolVolumeTypeImage:
-			err = s.ImageDelete(volumeName)
-		default:
-			return response.BadRequest(fmt.Errorf(`Storage volumes of type "%s" cannot be deleted with the storage api`, volumeTypeName))
-		}
-		if err != nil {
-			return response.SmartError(err)
-		}
+	switch volumeType {
+	case storagePoolVolumeTypeCustom:
+		err = pool.DeleteCustomVolume(volumeName, nil)
+	case storagePoolVolumeTypeImage:
+		err = pool.DeleteImage(volumeName, nil)
+	default:
+		return response.BadRequest(fmt.Errorf(`Storage volumes of type %q cannot be deleted with the storage api`, volumeTypeName))
+	}
+	if err != nil {
+		return response.SmartError(err)
 	}
 
 	return response.EmptySyncResponse

From cafda17ca32fb582c0db895d1ce1209968e404f5 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Mon, 24 Feb 2020 14:11:38 +0000
Subject: [PATCH 33/36] lxd/storage: Removes old storage loader

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/storage.go | 153 ++++++++++---------------------------------------
 1 file changed, 31 insertions(+), 122 deletions(-)

diff --git a/lxd/storage.go b/lxd/storage.go
index 45ea9fa6d5..c03e29f325 100644
--- a/lxd/storage.go
+++ b/lxd/storage.go
@@ -26,7 +26,6 @@ import (
 	"github.com/lxc/lxd/shared/idmap"
 	"github.com/lxc/lxd/shared/ioprogress"
 	"github.com/lxc/lxd/shared/logger"
-	"github.com/lxc/lxd/shared/units"
 	"github.com/lxc/lxd/shared/version"
 )
 
@@ -638,26 +637,13 @@ func setupStorageDriver(s *state.State, forceCheck bool) error {
 		errPrefix := fmt.Sprintf("Failed initializing storage pool %q", poolName)
 
 		pool, err := storagePools.GetPoolByName(s, poolName)
-		if err != storageDrivers.ErrUnknownDriver {
-			if err != nil {
-				return errors.Wrap(err, errPrefix)
-			}
-
-			_, err = pool.Mount()
-			if err != nil {
-				return errors.Wrap(err, errPrefix)
-			}
-		} else {
-			s, err := storagePoolInit(s, poolName)
-			if err != nil {
-				logger.Errorf("Error initializing storage pool \"%s\": %s, correct functionality of the storage pool cannot be guaranteed", pool, err)
-				continue
-			}
+		if err != nil {
+			return errors.Wrap(err, errPrefix)
+		}
 
-			err = s.StoragePoolCheck()
-			if err != nil {
-				return errors.Wrap(err, errPrefix)
-			}
+		_, err = pool.Mount()
+		if err != nil {
+			return errors.Wrap(err, errPrefix)
 		}
 	}
 
@@ -730,86 +716,39 @@ func storageVolumeMount(state *state.State, poolName string, volumeName string,
 
 	volumeType, _ := storagePools.VolumeTypeNameToType(volumeTypeName)
 	pool, err := storagePools.GetPoolByName(state, poolName)
-	if err != storageDrivers.ErrUnknownDriver && err != storageDrivers.ErrNotImplemented {
-		// Mount the storage volume
-		ourMount, err := pool.MountCustomVolume(volumeName, nil)
-		if err != nil {
-			return err
-		}
-
-		revert := true
-		if ourMount {
-			defer func() {
-				if !revert {
-					return
-				}
-
-				pool.UnmountCustomVolume(volumeName, nil)
-			}()
-		}
-
-		// Custom storage volumes do not currently support projects, so hardcode "default" project.
-		err = storagePoolVolumeAttachPrepare(state, poolName, volumeName, volumeType, c)
-		if err != nil {
-			return err
-		}
-
-		revert = false
-	} else {
-		// Load the volume
-		s, err := storageInit(state, "default", poolName, volumeName, volumeType)
-		if err != nil {
-			return err
-		}
-
-		// Mount the storage volume
-		ourMount, err := s.StoragePoolVolumeMount()
-		if err != nil {
-			return err
-		}
-
-		revert := true
-		if ourMount {
-			defer func() {
-				if !revert {
-					return
-				}
+	// Mount the storage volume
+	ourMount, err := pool.MountCustomVolume(volumeName, nil)
+	if err != nil {
+		return err
+	}
 
-				s.StoragePoolVolumeUmount()
-			}()
-		}
+	revert := true
+	if ourMount {
+		defer func() {
+			if !revert {
+				return
+			}
 
-		// Custom storage volumes do not currently support projects, so hardcode "default" project.
-		err = storagePoolVolumeAttachPrepare(state, poolName, volumeName, volumeType, c)
-		if err != nil {
-			return err
-		}
+			pool.UnmountCustomVolume(volumeName, nil)
+		}()
+	}
 
-		revert = false
+	// Custom storage volumes do not currently support projects, so hardcode "default" project.
+	err = storagePoolVolumeAttachPrepare(state, poolName, volumeName, volumeType, c)
+	if err != nil {
+		return err
 	}
 
+	revert = false
 	return nil
 }
 
 // storageVolumeUmount unmounts a storage volume on a pool.
 func storageVolumeUmount(state *state.State, poolName string, volumeName string, volumeType int) error {
 	pool, err := storagePools.GetPoolByName(state, poolName)
-	if err != storageDrivers.ErrUnknownDriver && err != storageDrivers.ErrNotImplemented {
-		_, err = pool.UnmountCustomVolume(volumeName, nil)
-		if err != nil {
-			return err
-		}
-	} else {
-		// Custom storage volumes do not currently support projects, so hardcode "default" project.
-		s, err := storagePoolVolumeInit(state, "default", poolName, volumeName, volumeType)
-		if err != nil {
-			return err
-		}
-
-		_, err = s.StoragePoolVolumeUmount()
-		if err != nil {
-			return err
-		}
+	_, err = pool.UnmountCustomVolume(volumeName, nil)
+	if err != nil {
+		return err
 	}
 
 	return nil
@@ -819,39 +758,9 @@ func storageVolumeUmount(state *state.State, poolName string, volumeName string,
 // return false indicating that the quota needs to be stored in volatile to be applied on next boot.
 func storageRootFSApplyQuota(state *state.State, inst instance.Instance, size string) error {
 	pool, err := storagePools.GetPoolByInstance(state, inst)
-	if err != storageDrivers.ErrUnknownDriver && err != storageDrivers.ErrNotImplemented {
-		err = pool.SetInstanceQuota(inst, size, nil)
-		if err != nil {
-			return err
-		}
-	} else {
-		c, ok := inst.(*containerLXC)
-		if !ok {
-			return fmt.Errorf("Received non-LXC container instance")
-		}
-
-		err := c.initStorage()
-		if err != nil {
-			return errors.Wrap(err, "Initialize storage")
-		}
-
-		storageTypeName := c.storage.GetStorageTypeName()
-		storageIsReady := c.storage.ContainerStorageReady(c)
-
-		// If we cannot apply the quota now, then return false as needs to be applied on next boot.
-		if (storageTypeName == "lvm" || storageTypeName == "ceph") && c.IsRunning() || !storageIsReady {
-			return storagePools.ErrRunningQuotaResizeNotSupported
-		}
-
-		newSizeBytes, err := units.ParseByteSizeString(size)
-		if err != nil {
-			return err
-		}
-
-		err = c.storage.StorageEntitySetQuota(storagePoolVolumeTypeContainer, newSizeBytes, c)
-		if err != nil {
-			return errors.Wrap(err, "Set storage quota")
-		}
+	err = pool.SetInstanceQuota(inst, size, nil)
+	if err != nil {
+		return err
 	}
 
 	return nil

From 3dae21cf83f4f6ca4853a59ea882ea9f2d036be8 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Mon, 24 Feb 2020 14:14:26 +0000
Subject: [PATCH 34/36] lxd/instance/drivers/driver/qemu: Removes storage layer
 transition workaround

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/instance/drivers/driver_qemu.go | 33 ++---------------------------
 1 file changed, 2 insertions(+), 31 deletions(-)

diff --git a/lxd/instance/drivers/driver_qemu.go b/lxd/instance/drivers/driver_qemu.go
index 0ceba5bca9..3380ed286e 100644
--- a/lxd/instance/drivers/driver_qemu.go
+++ b/lxd/instance/drivers/driver_qemu.go
@@ -2582,37 +2582,8 @@ func (vm *qemu) Delete() error {
 	// Attempt to initialize storage interface for the instance.
 	pool, err := vm.getStoragePool()
 	if err != nil && err != db.ErrNoSuchObject {
-		// Because of the way QemuCreate creates the storage volume record before loading
-		// the storage pool driver, Delete() may be called as part of a revertion if the
-		// pool being used to create the VM on doesn't support VMs. This deletion will then
-		// fail too, so we need to detect this scenario and just remove the storage volume
-		// DB record.
-		// TODO: This can be removed once all pool drivers are ported to new storage layer.
-		if err == storageDrivers.ErrUnknownDriver || err == storageDrivers.ErrNotImplemented {
-			logger.Warn("Unsupported storage pool type, removing DB volume record", log.Ctx{"project": vm.Project(), "instance": vm.Name(), "err": err})
-			// Remove the volume record from the database. This deletion would
-			// normally be handled by DeleteInstance() call below but since the storage
-			// driver (new storage) is not implemented, we need to do it here manually.
-			poolName, err := vm.StoragePool()
-			if err != nil {
-				return err
-			}
-
-			poolID, err := vm.state.Cluster.StoragePoolGetID(poolName)
-			if err != nil {
-				return err
-			}
-
-			err = vm.state.Cluster.StoragePoolVolumeDelete(vm.Project(), vm.Name(), db.StoragePoolVolumeTypeVM, poolID)
-			if err != nil {
-				return err
-			}
-		} else {
-			return err
-		}
-	}
-
-	if pool != nil {
+		return err
+	} else if pool != nil {
 		if vm.IsSnapshot() {
 			if !isImport {
 				// Remove snapshot volume and database record.

From 1d014591b54fd08224ce0e5d7a10208ba76ef98e Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Mon, 24 Feb 2020 14:22:47 +0000
Subject: [PATCH 35/36] lxd/container/lxc: Makes Delete pool load logic same as
 VM type

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/container_lxc.go | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/lxd/container_lxc.go b/lxd/container_lxc.go
index 26f1253331..e2827b564e 100644
--- a/lxd/container_lxc.go
+++ b/lxd/container_lxc.go
@@ -3432,7 +3432,7 @@ func (c *containerLXC) Delete() error {
 	pool, err := storagePools.GetPoolByInstance(c.state, c)
 	if err != nil && err != db.ErrNoSuchObject {
 		return err
-	} else if err != db.ErrNoSuchObject {
+	} else if pool != nil {
 		// Check if we're dealing with "lxd import".
 		// "lxd import" is used for disaster recovery, where you already have a container
 		// and snapshots on disk but no DB entry. As such if something has gone wrong during

From 1cd02de4c6abafe3ac8bc9acba9ad14c84a1052b Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Mon, 24 Feb 2020 14:23:15 +0000
Subject: [PATCH 36/36] lxd: Storage loader comments

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/backup.go                       | 1 -
 lxd/containers_post.go              | 1 -
 lxd/daemon_storage.go               | 2 +-
 lxd/instance/drivers/driver_qemu.go | 3 +--
 lxd/migrate_container.go            | 1 -
 lxd/migrate_storage_volumes.go      | 2 --
 lxd/resources.go                    | 1 -
 lxd/storage_volumes_snapshot.go     | 1 -
 8 files changed, 2 insertions(+), 10 deletions(-)

diff --git a/lxd/backup.go b/lxd/backup.go
index 6583addc43..74e03ccc2a 100644
--- a/lxd/backup.go
+++ b/lxd/backup.go
@@ -62,7 +62,6 @@ func backupCreate(s *state.State, args db.InstanceBackupArgs, sourceInst instanc
 	}
 	defer os.RemoveAll(tmpPath)
 
-	// Check if we can load new storage layer for pool driver type.
 	pool, err := storagePools.GetPoolByInstance(s, sourceInst)
 	if err != nil {
 		return errors.Wrap(err, "Load instance storage pool")
diff --git a/lxd/containers_post.go b/lxd/containers_post.go
index 8e9471d070..54203cbf82 100644
--- a/lxd/containers_post.go
+++ b/lxd/containers_post.go
@@ -261,7 +261,6 @@ func createFromMigration(d *Daemon, project string, req *api.InstancesPost) resp
 	instanceOnly := req.Source.InstanceOnly || req.Source.ContainerOnly
 
 	if !req.Source.Refresh {
-		// Check if we can load new storage layer for pool driver type.
 		_, err := storagePools.GetPoolByName(d.State(), storagePool)
 		if err != nil {
 			return response.InternalError(err)
diff --git a/lxd/daemon_storage.go b/lxd/daemon_storage.go
index 52635a5c5b..4ff8b30fde 100644
--- a/lxd/daemon_storage.go
+++ b/lxd/daemon_storage.go
@@ -45,12 +45,12 @@ func daemonStorageMount(s *state.State) error {
 		poolName := fields[0]
 		volumeName := fields[1]
 
-		// Mount volume.
 		pool, err := storagePools.GetPoolByName(s, poolName)
 		if err != nil {
 			return err
 		}
 
+		// Mount volume.
 		_, err = pool.MountCustomVolume(volumeName, nil)
 		if err != nil {
 			return errors.Wrapf(err, "Failed to mount storage volume \"%s\"", source)
diff --git a/lxd/instance/drivers/driver_qemu.go b/lxd/instance/drivers/driver_qemu.go
index 3380ed286e..a8f0e41006 100644
--- a/lxd/instance/drivers/driver_qemu.go
+++ b/lxd/instance/drivers/driver_qemu.go
@@ -1729,7 +1729,7 @@ func (vm *qemu) Restore(source instance.Instance, stateful bool) error {
 
 	var ctxMap log.Ctx
 
-	// Load the storage driver
+	// Load the storage driver.
 	pool, err := storagePools.GetPoolByInstance(vm.state, vm)
 	if err != nil {
 		return err
@@ -1904,7 +1904,6 @@ func (vm *qemu) Rename(newName string) error {
 	// Clean things up.
 	vm.cleanup()
 
-	// Check if we can load new storage layer for pool driver type.
 	pool, err := storagePools.GetPoolByInstance(vm.state, vm)
 	if err != nil {
 		return errors.Wrap(err, "Load instance storage pool")
diff --git a/lxd/migrate_container.go b/lxd/migrate_container.go
index 68fc01ebcc..f242b42eb5 100644
--- a/lxd/migrate_container.go
+++ b/lxd/migrate_container.go
@@ -342,7 +342,6 @@ func (s *migrationSourceWs) Do(state *state.State, migrateOp *operations.Operati
 	var offerHeader migration.MigrationHeader
 	var poolMigrationTypes []migration.Type
 
-	// Check if we can load new storage layer for pool driver type.
 	pool, err := storagePools.GetPoolByInstance(state, s.instance)
 	if err != nil {
 		return err
diff --git a/lxd/migrate_storage_volumes.go b/lxd/migrate_storage_volumes.go
index 5bc36dd1e7..fdf17d7f1b 100644
--- a/lxd/migrate_storage_volumes.go
+++ b/lxd/migrate_storage_volumes.go
@@ -43,7 +43,6 @@ func (s *migrationSourceWs) DoStorage(state *state.State, poolName string, volNa
 	var offerHeader migration.MigrationHeader
 	var poolMigrationTypes []migration.Type
 
-	// Check if sending storage pool supports new storage layer.
 	pool, err := storagePools.GetPoolByName(state, poolName)
 	if err != nil {
 		return err
@@ -300,7 +299,6 @@ func (c *migrationSink) DoStorage(state *state.State, poolName string, req *api.
 	// The migration header to be sent back to source with our target options.
 	var respHeader migration.MigrationHeader
 
-	// Check if we can load new storage layer for pool driver type.
 	pool, err := storagePools.GetPoolByName(state, poolName)
 	if err != nil {
 		return err
diff --git a/lxd/resources.go b/lxd/resources.go
index 3306655b3e..78b257af9f 100644
--- a/lxd/resources.go
+++ b/lxd/resources.go
@@ -54,7 +54,6 @@ func storagePoolResourcesGet(d *Daemon, r *http.Request) response.Response {
 	poolName := mux.Vars(r)["name"]
 	var res *api.ResourcesStoragePool
 
-	// Check if we can load new storage layer for pool driver type.
 	pool, err := storagePools.GetPoolByName(d.State(), poolName)
 	if err != nil {
 		return response.InternalError(err)
diff --git a/lxd/storage_volumes_snapshot.go b/lxd/storage_volumes_snapshot.go
index fc40abb748..0325936ba2 100644
--- a/lxd/storage_volumes_snapshot.go
+++ b/lxd/storage_volumes_snapshot.go
@@ -462,7 +462,6 @@ func storagePoolVolumeSnapshotTypeDelete(d *Daemon, r *http.Request) response.Re
 	}
 
 	snapshotDelete := func(op *operations.Operation) error {
-		// Check if we can load new storage layer for pool driver type.
 		pool, err := storagePools.GetPoolByName(d.State(), poolName)
 		if err != nil {
 			return err


More information about the lxc-devel mailing list