[lxc-devel] [lxd/master] Storage: Removes legacy storage interface

tomponline on Github lxc-bot at linuxcontainers.org
Thu Feb 27 16:28:33 UTC 2020


A non-text attachment was scrubbed...
Name: not available
Type: text/x-mailbox
Size: 323 bytes
Desc: not available
URL: <http://lists.linuxcontainers.org/pipermail/lxc-devel/attachments/20200227/bda33af8/attachment-0001.bin>
-------------- next part --------------
From 05d9bf036f6e171b092176eaae89001844d12e19 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Thu, 27 Feb 2020 14:50:30 +0000
Subject: [PATCH 01/13] lxd/storage: Removes unused files

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/storage_ceph.go           | 3069 ---------------------------------
 lxd/storage_ceph_utils.go     | 2091 ----------------------
 lxd/storage_migration_ceph.go |  228 ---
 lxd/storage_mock.go           |  263 ---
 lxd/storage_shared.go         |   88 -
 5 files changed, 5739 deletions(-)
 delete mode 100644 lxd/storage_ceph.go
 delete mode 100644 lxd/storage_ceph_utils.go
 delete mode 100644 lxd/storage_migration_ceph.go
 delete mode 100644 lxd/storage_mock.go
 delete mode 100644 lxd/storage_shared.go

diff --git a/lxd/storage_ceph.go b/lxd/storage_ceph.go
deleted file mode 100644
index bddb5dd6bf..0000000000
--- a/lxd/storage_ceph.go
+++ /dev/null
@@ -1,3069 +0,0 @@
-package main
-
-import (
-	"bytes"
-	"encoding/json"
-	"fmt"
-	"io"
-	"io/ioutil"
-	"os"
-	"os/exec"
-	"strings"
-
-	"github.com/gorilla/websocket"
-	"github.com/pkg/errors"
-	"golang.org/x/sys/unix"
-
-	"github.com/lxc/lxd/lxd/backup"
-	"github.com/lxc/lxd/lxd/db"
-	"github.com/lxc/lxd/lxd/instance"
-	"github.com/lxc/lxd/lxd/instance/instancetype"
-	"github.com/lxc/lxd/lxd/migration"
-	"github.com/lxc/lxd/lxd/operations"
-	"github.com/lxc/lxd/lxd/project"
-	"github.com/lxc/lxd/lxd/rsync"
-	driver "github.com/lxc/lxd/lxd/storage"
-	storageDrivers "github.com/lxc/lxd/lxd/storage/drivers"
-	"github.com/lxc/lxd/shared"
-	"github.com/lxc/lxd/shared/api"
-	"github.com/lxc/lxd/shared/ioprogress"
-	"github.com/lxc/lxd/shared/logger"
-	"github.com/lxc/lxd/shared/units"
-)
-
-type storageCeph struct {
-	ClusterName     string
-	OSDPoolName     string
-	OSDDataPoolName string
-	UserName        string
-	PGNum           string
-	storageShared
-}
-
-var cephVersion = ""
-
-func (s *storageCeph) StorageCoreInit() error {
-	s.sType = storageTypeCeph
-	typeName, err := storageTypeToString(s.sType)
-	if err != nil {
-		return err
-	}
-	s.sTypeName = typeName
-
-	if cephVersion != "" {
-		s.sTypeVersion = cephVersion
-		return nil
-	}
-
-	msg, err := shared.RunCommand("rbd", "--version")
-	if err != nil {
-		return fmt.Errorf("Error getting CEPH version: %s", err)
-	}
-	s.sTypeVersion = strings.TrimSpace(msg)
-	cephVersion = s.sTypeVersion
-
-	return nil
-}
-
-func (s *storageCeph) StoragePoolInit() error {
-	var err error
-
-	err = s.StorageCoreInit()
-	if err != nil {
-		return errors.Wrap(err, "Storage pool init")
-	}
-
-	// set cluster name
-	if s.pool.Config["ceph.cluster_name"] != "" {
-		s.ClusterName = s.pool.Config["ceph.cluster_name"]
-	} else {
-		s.ClusterName = "ceph"
-	}
-
-	// set osd pool name
-	if s.pool.Config["ceph.osd.pool_name"] != "" {
-		s.OSDPoolName = s.pool.Config["ceph.osd.pool_name"]
-	}
-
-	// set osd data pool name
-	if s.pool.Config["ceph.osd.data_pool_name"] != "" {
-		s.OSDDataPoolName = s.pool.Config["ceph.osd.data_pool_name"]
-	}
-
-	// set ceph user name
-	if s.pool.Config["ceph.user.name"] != "" {
-		s.UserName = s.pool.Config["ceph.user.name"]
-	} else {
-		s.UserName = "admin"
-	}
-
-	// set default placement group number
-	if s.pool.Config["ceph.osd.pg_num"] != "" {
-		_, err = units.ParseByteSizeString(s.pool.Config["ceph.osd.pg_num"])
-		if err != nil {
-			return err
-		}
-		s.PGNum = s.pool.Config["ceph.osd.pg_num"]
-	} else {
-		s.PGNum = "32"
-	}
-
-	return nil
-}
-
-func (s *storageCeph) StoragePoolCheck() error {
-	logger.Debugf(`Checking CEPH storage pool "%s" (noop)`, s.pool.Name)
-	logger.Debugf(`Checked CEPH storage pool "%s" (noop)`, s.pool.Name)
-	return nil
-}
-
-func (s *storageCeph) StoragePoolCreate() error {
-	logger.Infof(`Creating CEPH OSD storage pool "%s" in cluster "%s"`,
-		s.pool.Name, s.ClusterName)
-
-	revert := true
-
-	s.pool.Config["volatile.initial_source"] = s.pool.Config["source"]
-
-	// sanity check
-	if s.pool.Config["source"] != "" &&
-		s.pool.Config["ceph.osd.pool_name"] != "" &&
-		s.pool.Config["source"] != s.pool.Config["ceph.osd.pool_name"] {
-		msg := fmt.Sprintf(`The "source" and "ceph.osd.pool_name" ` +
-			`property must not differ for CEPH OSD storage pools`)
-		logger.Errorf(msg)
-		return fmt.Errorf(msg)
-	}
-
-	// use an existing OSD pool
-	if s.pool.Config["source"] != "" {
-		s.OSDPoolName = s.pool.Config["source"]
-		s.pool.Config["ceph.osd.pool_name"] = s.pool.Config["source"]
-	}
-
-	if s.pool.Config["ceph.osd.pool_name"] == "" {
-		s.pool.Config["ceph.osd.pool_name"] = s.pool.Name
-		s.pool.Config["source"] = s.pool.Name
-		s.OSDPoolName = s.pool.Name
-	}
-
-	if !cephOSDPoolExists(s.ClusterName, s.OSDPoolName, s.UserName) {
-		logger.Debugf(`CEPH OSD storage pool "%s" does not exist`, s.OSDPoolName)
-
-		// Create new osd pool
-		msg, err := shared.TryRunCommand("ceph", "--name", fmt.Sprintf("client.%s", s.UserName), "--cluster", s.ClusterName, "osd", "pool", "create", s.OSDPoolName, s.PGNum)
-		if err != nil {
-			logger.Errorf(`Failed to create CEPH osd storage pool "%s" in cluster "%s": %s`, s.OSDPoolName, s.ClusterName, msg)
-			return err
-		}
-		logger.Debugf(`Created CEPH osd storage pool "%s" in cluster "%s"`, s.OSDPoolName, s.ClusterName)
-
-		defer func() {
-			if !revert {
-				return
-			}
-
-			err := cephOSDPoolDestroy(s.ClusterName, s.OSDPoolName, s.UserName)
-			if err != nil {
-				logger.Warnf(`Failed to delete ceph storage pool "%s" in cluster "%s": %s`, s.OSDPoolName, s.ClusterName, err)
-			}
-		}()
-
-		// Create dummy storage volume. Other LXD instances will use this to detect whether this osd pool is already in use by another LXD instance.
-		err = cephRBDVolumeCreate(s.ClusterName, s.OSDPoolName, s.OSDPoolName, "lxd", "0", s.UserName, s.OSDDataPoolName)
-		if err != nil {
-			logger.Errorf(`Failed to create RBD storage volume "%s" on storage pool "%s": %s`, s.pool.Name, s.pool.Name, err)
-			return err
-		}
-		s.pool.Config["volatile.pool.pristine"] = "true"
-		logger.Debugf(`Created RBD storage volume "%s" on storage pool "%s"`, s.pool.Name, s.pool.Name)
-	} else {
-		logger.Debugf(`CEPH OSD storage pool "%s" does exist`, s.OSDPoolName)
-
-		ok := cephRBDVolumeExists(s.ClusterName, s.OSDPoolName, s.OSDPoolName, "lxd", s.UserName)
-		s.pool.Config["volatile.pool.pristine"] = "false"
-		if ok {
-			if s.pool.Config["ceph.osd.force_reuse"] == "" || !shared.IsTrue(s.pool.Config["ceph.osd.force_reuse"]) {
-				return fmt.Errorf("CEPH OSD storage pool \"%s\" in cluster \"%s\" seems to be in use by another LXD instance. Use \"ceph.osd.force_reuse=true\" to force.", s.pool.Name, s.ClusterName)
-			}
-		}
-
-		// Use existing osd pool
-		msg, err := shared.RunCommand("ceph", "--name", fmt.Sprintf("client.%s", s.UserName), "--cluster", s.ClusterName, "osd", "pool", "get", s.OSDPoolName, "pg_num")
-		if err != nil {
-			logger.Errorf(`Failed to retrieve number of placement groups for CEPH osd storage pool "%s" in cluster "%s": %s`, s.OSDPoolName, s.ClusterName, msg)
-			return err
-		}
-
-		logger.Debugf(`Retrieved number of placement groups or CEPH osd storage pool "%s" in cluster "%s"`, s.OSDPoolName, s.ClusterName)
-		idx := strings.Index(msg, "pg_num:")
-		if idx == -1 {
-			logger.Errorf(`Failed to parse number of placement groups for CEPH osd storage pool "%s" in cluster "%s": %s`, s.OSDPoolName, s.ClusterName, msg)
-		}
-
-		msg = msg[(idx + len("pg_num:")):]
-		msg = strings.TrimSpace(msg)
-
-		// It is ok to update the pool configuration since storage pool
-		// creation via API is implemented such that the storage pool is
-		// checked for a changed config after this function returns and
-		// if so the db for it is updated.
-		s.pool.Config["ceph.osd.pg_num"] = msg
-	}
-
-	if s.pool.Config["source"] == "" {
-		s.pool.Config["source"] = s.OSDPoolName
-	}
-
-	// set immutable ceph.cluster_name property
-	if s.pool.Config["ceph.cluster_name"] == "" {
-		s.pool.Config["ceph.cluster_name"] = "ceph"
-	}
-
-	// set immutable ceph.osd.pool_name property
-	if s.pool.Config["ceph.osd.pool_name"] == "" {
-		s.pool.Config["ceph.osd.pool_name"] = s.pool.Name
-	}
-
-	if s.pool.Config["ceph.osd.pg_num"] == "" {
-		s.pool.Config["ceph.osd.pg_num"] = "32"
-	}
-
-	// Create the mountpoint for the storage pool.
-	poolMntPoint := driver.GetStoragePoolMountPoint(s.pool.Name)
-	err := os.MkdirAll(poolMntPoint, 0711)
-	if err != nil {
-		logger.Errorf(`Failed to create mountpoint "%s" for ceph storage pool "%s" in cluster "%s": %s`, poolMntPoint, s.OSDPoolName, s.ClusterName, err)
-		return err
-	}
-	logger.Debugf(`Created mountpoint "%s" for ceph storage pool "%s" in cluster "%s"`, poolMntPoint, s.OSDPoolName, s.ClusterName)
-
-	defer func() {
-		if !revert {
-			return
-		}
-
-		err := os.Remove(poolMntPoint)
-		if err != nil {
-			logger.Errorf(`Failed to delete mountpoint "%s" for ceph storage pool "%s" in cluster "%s": %s`, poolMntPoint, s.OSDPoolName, s.ClusterName, err)
-		}
-	}()
-
-	logger.Infof(`Created CEPH OSD storage pool "%s" in cluster "%s"`,
-		s.pool.Name, s.ClusterName)
-
-	revert = false
-
-	return nil
-}
-
-func (s *storageCeph) StoragePoolDelete() error {
-	logger.Infof(`Deleting CEPH OSD storage pool "%s" in cluster "%s"`,
-		s.pool.Name, s.ClusterName)
-
-	// test if pool exists
-	poolExists := cephOSDPoolExists(s.ClusterName, s.OSDPoolName, s.UserName)
-	if !poolExists {
-		logger.Warnf(`CEPH osd storage pool "%s" does not exist in cluster "%s"`, s.OSDPoolName, s.ClusterName)
-	}
-
-	// Check whether we own the pool and only remove in this case.
-	if s.pool.Config["volatile.pool.pristine"] != "" &&
-		shared.IsTrue(s.pool.Config["volatile.pool.pristine"]) {
-		logger.Debugf(`Detected that this LXD instance is the owner of the CEPH osd storage pool "%s" in cluster "%s"`, s.OSDPoolName, s.ClusterName)
-
-		// Delete the osd pool.
-		if poolExists {
-			err := cephOSDPoolDestroy(s.ClusterName, s.OSDPoolName,
-				s.UserName)
-			if err != nil {
-				logger.Errorf(`Failed to delete CEPH OSD storage pool "%s" in cluster "%s": %s`, s.pool.Name, s.ClusterName, err)
-				return err
-			}
-		}
-		logger.Debugf(`Deleted CEPH OSD storage pool "%s" in cluster "%s"`,
-			s.pool.Name, s.ClusterName)
-	}
-
-	// Delete the mountpoint for the storage pool.
-	poolMntPoint := driver.GetStoragePoolMountPoint(s.pool.Name)
-	if shared.PathExists(poolMntPoint) {
-		err := os.RemoveAll(poolMntPoint)
-		if err != nil {
-			logger.Errorf(`Failed to delete mountpoint "%s" for CEPH osd storage pool "%s" in cluster "%s": %s`, poolMntPoint, s.OSDPoolName, s.ClusterName, err)
-			return err
-		}
-		logger.Debugf(`Deleted mountpoint "%s" for CEPH osd storage pool "%s" in cluster "%s"`, poolMntPoint, s.OSDPoolName, s.ClusterName)
-	}
-
-	logger.Infof(`Deleted CEPH OSD storage pool "%s" in cluster "%s"`,
-		s.pool.Name, s.ClusterName)
-	return nil
-}
-
-func (s *storageCeph) StoragePoolMount() (bool, error) {
-	// Yay, osd pools are not mounted.
-	return true, nil
-}
-
-func (s *storageCeph) StoragePoolUmount() (bool, error) {
-	// Yay, osd pools are not mounted.
-	return true, nil
-}
-
-func (s *storageCeph) GetContainerPoolInfo() (int64, string, string) {
-	return s.poolID, s.pool.Name, s.OSDPoolName
-}
-
-func (s *storageCeph) StoragePoolVolumeCreate() error {
-	logger.Debugf(`Creating RBD storage volume "%s" on storage pool "%s"`,
-		s.volume.Name, s.pool.Name)
-
-	revert := true
-
-	// get size
-	RBDSize, err := s.getRBDSize()
-	if err != nil {
-		logger.Errorf(`Failed to retrieve size of RBD storage volume "%s" on storage pool "%s": %s`, s.volume.Name, s.pool.Name, err)
-		return err
-	}
-	logger.Debugf(`Retrieved size "%s" of RBD storage volume "%s" on storage pool "%s"`, RBDSize, s.volume.Name, s.pool.Name)
-
-	// create volume
-	err = cephRBDVolumeCreate(s.ClusterName, s.OSDPoolName, s.volume.Name,
-		storagePoolVolumeTypeNameCustom, RBDSize, s.UserName, s.OSDDataPoolName)
-	if err != nil {
-		logger.Errorf(`Failed to create RBD storage volume "%s" on storage pool "%s": %s`, s.volume.Name, s.pool.Name, err)
-		return err
-	}
-	logger.Debugf(`Created RBD storage volume "%s" on storage pool "%s"`,
-		s.volume.Name, s.pool.Name)
-
-	defer func() {
-		if !revert {
-			return
-		}
-
-		err := cephRBDVolumeDelete(s.ClusterName, s.OSDPoolName,
-			s.volume.Name, storagePoolVolumeTypeNameCustom, s.UserName)
-		if err != nil {
-			logger.Warnf(`Failed to delete RBD storage volume "%s" on storage pool "%s": %s`, s.volume.Name, s.pool.Name, err)
-		}
-	}()
-
-	RBDDevPath, err := cephRBDVolumeMap(s.ClusterName, s.OSDPoolName,
-		s.volume.Name, storagePoolVolumeTypeNameCustom, s.UserName)
-	if err != nil {
-		logger.Errorf(`Failed to map RBD storage volume for "%s" on storage pool "%s": %s`, s.volume.Name, s.pool.Name, err)
-		return err
-	}
-	logger.Debugf(`Mapped RBD storage volume for "%s" on storage pool "%s"`,
-		s.volume.Name, s.pool.Name)
-
-	defer func() {
-		err := cephRBDVolumeUnmap(s.ClusterName, s.OSDPoolName,
-			s.volume.Name, storagePoolVolumeTypeNameCustom,
-			s.UserName, true)
-		if err != nil {
-			logger.Warnf(`Failed to unmap RBD storage volume "%s" on storage pool "%s": %s`, s.volume.Name, s.pool.Name, err)
-		}
-	}()
-
-	// get filesystem
-	RBDFilesystem := s.getRBDFilesystem()
-	logger.Debugf(`Retrieved filesystem type "%s" of RBD storage volume "%s" on storage pool "%s"`, RBDFilesystem, s.volume.Name, s.pool.Name)
-
-	output, err := makeFSType(RBDDevPath, RBDFilesystem, nil)
-	if err != nil {
-		logger.Errorf(`Failed to create filesystem type "%s" on device path "%s" for RBD storage volume "%s" on storage pool "%s": %v (%s)`, RBDFilesystem, RBDDevPath, s.volume.Name, s.pool.Name, err, output)
-		return err
-	}
-	logger.Debugf(`Created filesystem type "%s" on device path "%s" for RBD storage volume "%s" on storage pool "%s"`, RBDFilesystem, RBDDevPath, s.volume.Name, s.pool.Name)
-
-	volumeMntPoint := driver.GetStoragePoolVolumeMountPoint(s.pool.Name, s.volume.Name)
-	err = os.MkdirAll(volumeMntPoint, 0711)
-	if err != nil {
-		logger.Errorf(`Failed to create mountpoint "%s" for RBD storage volume "%s" on storage pool "%s": %s"`, volumeMntPoint, s.volume.Name, s.pool.Name, err)
-		return err
-	}
-	logger.Debugf(`Created mountpoint "%s" for RBD storage volume "%s" on storage pool "%s"`, volumeMntPoint, s.volume.Name, s.pool.Name)
-
-	defer func() {
-		if !revert {
-			return
-		}
-
-		err := os.Remove(volumeMntPoint)
-		if err != nil {
-			logger.Warnf(`Failed to delete mountpoint "%s" for RBD storage volume "%s" on storage pool "%s": %s"`, volumeMntPoint, s.volume.Name, s.pool.Name, err)
-		}
-	}()
-
-	// Apply quota
-	if s.volume.Config["size"] != "" {
-		size, err := units.ParseByteSizeString(s.volume.Config["size"])
-		if err != nil {
-			return err
-		}
-
-		err = s.StorageEntitySetQuota(storagePoolVolumeTypeCustom, size, nil)
-		if err != nil {
-			return err
-		}
-	}
-
-	logger.Debugf(`Created RBD storage volume "%s" on storage pool "%s"`,
-		s.volume.Name, s.pool.Name)
-
-	revert = false
-
-	return nil
-}
-
-func (s *storageCeph) StoragePoolVolumeDelete() error {
-	logger.Debugf(`Deleting RBD storage volume "%s" on storage pool "%s"`,
-		s.volume.Name, s.pool.Name)
-
-	// Delete all snapshots
-	snapshots, err := driver.VolumeSnapshotsGet(s.s, s.pool.Name, s.volume.Name, storagePoolVolumeTypeCustom)
-	if err != nil {
-		return err
-	}
-
-	for _, snap := range snapshots {
-		err := s.doPoolVolumeSnapshotDelete(snap.Name)
-		if err != nil {
-			return err
-		}
-	}
-
-	volumeMntPoint := driver.GetStoragePoolVolumeMountPoint(s.pool.Name, s.volume.Name)
-	if shared.IsMountPoint(volumeMntPoint) {
-		err := storageDrivers.TryUnmount(volumeMntPoint, unix.MNT_DETACH)
-		if err != nil {
-			logger.Errorf(`Failed to unmount RBD storage volume "%s" on storage pool "%s": %s`, s.volume.Name, s.pool.Name, err)
-		}
-		logger.Debugf(`Unmounted RBD storage volume "%s" on storage pool "%s"`, s.volume.Name, s.pool.Name)
-	}
-
-	rbdVolumeExists := cephRBDVolumeExists(s.ClusterName, s.OSDPoolName,
-		s.volume.Name, storagePoolVolumeTypeNameCustom, s.UserName)
-
-	// delete
-	if rbdVolumeExists {
-		ret := cephContainerDelete(s.ClusterName, s.OSDPoolName, s.volume.Name,
-			storagePoolVolumeTypeNameCustom, s.UserName)
-		if ret < 0 {
-			msg := fmt.Sprintf(`Failed to delete RBD storage volume "%s" on storage pool "%s"`, s.volume.Name, s.pool.Name)
-			logger.Errorf(msg)
-			return fmt.Errorf(msg)
-		}
-		logger.Debugf(`Deleted RBD storage volume "%s" on storage pool "%s"`, s.volume.Name, s.pool.Name)
-	}
-
-	err = s.s.Cluster.StoragePoolVolumeDelete(
-		"default",
-		s.volume.Name,
-		storagePoolVolumeTypeCustom,
-		s.poolID)
-	if err != nil {
-		logger.Errorf(`Failed to delete database entry for RBD storage volume "%s" on storage pool "%s"`, s.volume.Name, s.pool.Name)
-	}
-	logger.Debugf(`Deleted database entry for RBD storage volume "%s" on storage pool "%s"`, s.volume.Name, s.pool.Name)
-
-	if shared.PathExists(volumeMntPoint) {
-		err = os.Remove(volumeMntPoint)
-		if err != nil {
-			logger.Errorf(`Failed to delete mountpoint "%s" for RBD storage volume "%s" on storage pool "%s": %s"`, volumeMntPoint, s.volume.Name, s.pool.Name, err)
-			return err
-		}
-		logger.Debugf(`Deleted mountpoint "%s" for RBD storage volume "%s" on storage pool "%s"`, volumeMntPoint, s.volume.Name, s.pool.Name)
-	}
-
-	logger.Debugf(`Deleted RBD storage volume "%s" on storage pool "%s"`,
-		s.volume.Name, s.pool.Name)
-	return nil
-}
-
-func (s *storageCeph) StoragePoolVolumeMount() (bool, error) {
-	logger.Debugf(`Mounting RBD storage volume "%s" on storage pool "%s"`,
-		s.volume.Name, s.pool.Name)
-
-	RBDFilesystem := s.getRBDFilesystem()
-	volumeMntPoint := driver.GetStoragePoolVolumeMountPoint(s.pool.Name, s.volume.Name)
-
-	customMountLockID := getCustomMountLockID(s.pool.Name, s.volume.Name)
-	lxdStorageMapLock.Lock()
-	if waitChannel, ok := lxdStorageOngoingOperationMap[customMountLockID]; ok {
-		lxdStorageMapLock.Unlock()
-		if _, ok := <-waitChannel; ok {
-			logger.Warnf(`Received value over semaphore. This should not have happened`)
-		}
-		// Give the benefit of the doubt and assume that the other
-		// thread actually succeeded in mounting the storage volume.
-		logger.Debugf(`RBD storage volume "%s" on storage pool "%s" appears to be already mounted`, s.volume.Name, s.pool.Name)
-		return false, nil
-	}
-
-	lxdStorageOngoingOperationMap[customMountLockID] = make(chan bool)
-	lxdStorageMapLock.Unlock()
-
-	var ret int
-	var customerr error
-	ourMount := false
-	RBDDevPath := ""
-	if !shared.IsMountPoint(volumeMntPoint) {
-		if !shared.PathExists(volumeMntPoint) {
-			err := os.MkdirAll(volumeMntPoint, 0711)
-			if err != nil {
-				return false, err
-			}
-		}
-
-		RBDDevPath, ret = getRBDMappedDevPath(s.ClusterName, s.OSDPoolName,
-			storagePoolVolumeTypeNameCustom, s.volume.Name, true,
-			s.UserName)
-		mountFlags, mountOptions := resolveMountOptions(s.getRBDMountOptions())
-		customerr = storageDrivers.TryMount(
-			RBDDevPath,
-			volumeMntPoint,
-			RBDFilesystem,
-			mountFlags,
-			mountOptions)
-		ourMount = true
-	}
-
-	lxdStorageMapLock.Lock()
-	if waitChannel, ok := lxdStorageOngoingOperationMap[customMountLockID]; ok {
-		close(waitChannel)
-		delete(lxdStorageOngoingOperationMap, customMountLockID)
-	}
-	lxdStorageMapLock.Unlock()
-
-	if customerr != nil || ret < 0 {
-		logger.Errorf(`Failed to mount RBD storage volume "%s" on storage pool "%s": %s`, s.volume.Name, s.pool.Name, customerr)
-		return false, customerr
-	}
-
-	logger.Debugf(`Mounted RBD storage volume "%s" on storage pool "%s"`,
-		s.volume.Name, s.pool.Name)
-	return ourMount, nil
-}
-
-func (s *storageCeph) StoragePoolVolumeUmount() (bool, error) {
-	logger.Debugf(`Unmounting RBD storage volume "%s" on storage pool "%s"`,
-		s.volume.Name, s.pool.Name)
-
-	volumeMntPoint := driver.GetStoragePoolVolumeMountPoint(s.pool.Name, s.volume.Name)
-
-	customMountLockID := getCustomUmountLockID(s.pool.Name, s.volume.Name)
-	lxdStorageMapLock.Lock()
-	if waitChannel, ok := lxdStorageOngoingOperationMap[customMountLockID]; ok {
-		lxdStorageMapLock.Unlock()
-		if _, ok := <-waitChannel; ok {
-			logger.Warnf(`Received value over semaphore. This should not have happened`)
-		}
-		// Give the benefit of the doubt and assume that the other
-		// thread actually succeeded in unmounting the storage volume.
-		logger.Debugf(`RBD storage volume "%s" on storage pool "%s" appears to be already unmounted`, s.volume.Name, s.pool.Name)
-		return false, nil
-	}
-
-	lxdStorageOngoingOperationMap[customMountLockID] = make(chan bool)
-	lxdStorageMapLock.Unlock()
-
-	var customerr error
-	ourUmount := false
-	if shared.IsMountPoint(volumeMntPoint) {
-		customerr = storageDrivers.TryUnmount(volumeMntPoint, unix.MNT_DETACH)
-		ourUmount = true
-	}
-
-	lxdStorageMapLock.Lock()
-	if waitChannel, ok := lxdStorageOngoingOperationMap[customMountLockID]; ok {
-		close(waitChannel)
-		delete(lxdStorageOngoingOperationMap, customMountLockID)
-	}
-	lxdStorageMapLock.Unlock()
-
-	if customerr != nil {
-		logger.Errorf(`Failed to unmount RBD storage volume "%s" on storage pool "%s": %s`, s.volume.Name, s.pool.Name, customerr)
-		return false, customerr
-	}
-
-	if ourUmount {
-		// Attempt to unmap
-		err := cephRBDVolumeUnmap(s.ClusterName, s.OSDPoolName,
-			s.volume.Name, storagePoolVolumeTypeNameCustom,
-			s.UserName, true)
-		if err != nil {
-			logger.Errorf(`Failed to unmap RBD storage volume for container "%s" on storage pool "%s": %s`, s.volume.Name, s.pool.Name, err)
-			return ourUmount, err
-		}
-	}
-
-	logger.Debugf(`Unmounted RBD storage volume "%s" on storage pool "%s"`,
-		s.volume.Name, s.pool.Name)
-	return ourUmount, nil
-}
-
-func (s *storageCeph) StoragePoolVolumeUpdate(writable *api.StorageVolumePut, changedConfig []string) error {
-	if writable.Restore != "" {
-		logger.Infof(`Restoring CEPH storage volume "%s" from snapshot "%s"`,
-			s.volume.Name, writable.Restore)
-
-		ourUmount, err := s.StoragePoolVolumeUmount()
-		if err != nil {
-			return err
-		}
-		if ourUmount {
-			defer s.StoragePoolVolumeMount()
-		}
-
-		prefixedSourceSnapOnlyName := fmt.Sprintf("snapshot_%s", writable.Restore)
-		err = cephRBDVolumeRestore(s.ClusterName, s.OSDPoolName,
-			s.volume.Name, storagePoolVolumeTypeNameCustom,
-			prefixedSourceSnapOnlyName, s.UserName)
-		if err != nil {
-			return err
-		}
-
-		logger.Infof(`Restored CEPH storage volume "%s" from snapshot "%s"`,
-			s.volume.Name, writable.Restore)
-		return nil
-	}
-
-	logger.Infof(`Updating CEPH storage volume "%s"`, s.volume.Name)
-
-	changeable := changeableStoragePoolVolumeProperties["ceph"]
-	unchangeable := []string{}
-	for _, change := range changedConfig {
-		if !shared.StringInSlice(change, changeable) {
-			unchangeable = append(unchangeable, change)
-		}
-	}
-
-	if len(unchangeable) > 0 {
-		return updateStoragePoolVolumeError(unchangeable, "ceph")
-	}
-
-	if shared.StringInSlice("size", changedConfig) {
-		if s.volume.Type != storagePoolVolumeTypeNameCustom {
-			return updateStoragePoolVolumeError([]string{"size"}, "ceph")
-		}
-
-		if s.volume.Config["size"] != writable.Config["size"] {
-			size, err := units.ParseByteSizeString(writable.Config["size"])
-			if err != nil {
-				return err
-			}
-
-			err = s.StorageEntitySetQuota(storagePoolVolumeTypeCustom, size, nil)
-			if err != nil {
-				return err
-			}
-		}
-	}
-
-	logger.Infof(`Updated CEPH storage volume "%s"`, s.volume.Name)
-	return nil
-}
-
-func (s *storageCeph) StoragePoolVolumeRename(newName string) error {
-	logger.Infof(`Renaming CEPH storage volume on OSD storage pool "%s" from "%s" to "%s`,
-		s.pool.Name, s.volume.Name, newName)
-
-	_, err := s.StoragePoolVolumeUmount()
-	if err != nil {
-		return err
-	}
-
-	usedBy, err := storagePoolVolumeUsedByInstancesGet(s.s, "default", s.pool.Name, s.volume.Name)
-	if err != nil {
-		return err
-	}
-	if len(usedBy) > 0 {
-		return fmt.Errorf(`RBD storage volume "%s" on CEPH OSD storage pool "%s" is attached to containers`,
-			s.volume.Name, s.pool.Name)
-	}
-
-	// unmap
-	err = cephRBDVolumeUnmap(s.ClusterName, s.OSDPoolName,
-		s.volume.Name, storagePoolVolumeTypeNameCustom,
-		s.UserName, true)
-	if err != nil {
-		logger.Errorf(`Failed to unmap RBD storage volume for container "%s" on storage pool "%s": %s`, s.volume.Name, s.pool.Name, err)
-		return err
-	}
-	logger.Debugf(`Unmapped RBD storage volume for container "%s" on storage pool "%s"`,
-		s.volume.Name, s.pool.Name)
-
-	err = cephRBDVolumeRename(s.ClusterName, s.OSDPoolName,
-		storagePoolVolumeTypeNameCustom, s.volume.Name,
-		newName, s.UserName)
-	if err != nil {
-		logger.Errorf(`Failed to rename RBD storage volume for container "%s" on storage pool "%s": %s`,
-			s.volume.Name, s.pool.Name, err)
-		return err
-	}
-	logger.Debugf(`Renamed RBD storage volume for container "%s" on storage pool "%s"`,
-		s.volume.Name, s.pool.Name)
-
-	// map
-	_, err = cephRBDVolumeMap(s.ClusterName, s.OSDPoolName,
-		newName, storagePoolVolumeTypeNameCustom,
-		s.UserName)
-	if err != nil {
-		logger.Errorf(`Failed to map RBD storage volume for container "%s" on storage pool "%s": %s`,
-			newName, s.pool.Name, err)
-		return err
-	}
-	logger.Debugf(`Mapped RBD storage volume for container "%s" on storage pool "%s"`,
-		newName, s.pool.Name)
-
-	isSnapshot := shared.IsSnapshot(s.volume.Name)
-
-	var oldPath string
-	var newPath string
-
-	if isSnapshot {
-		oldPath = driver.GetStoragePoolVolumeSnapshotMountPoint(s.pool.Name, s.volume.Name)
-		newPath = driver.GetStoragePoolVolumeSnapshotMountPoint(s.pool.Name, newName)
-	} else {
-		oldPath = driver.GetStoragePoolVolumeMountPoint(s.pool.Name, s.volume.Name)
-		newPath = driver.GetStoragePoolVolumeMountPoint(s.pool.Name, newName)
-	}
-
-	err = os.Rename(oldPath, newPath)
-	if err != nil {
-		return err
-	}
-
-	logger.Infof(`Renamed CEPH storage volume on OSD storage pool "%s" from "%s" to "%s`,
-		s.pool.Name, s.volume.Name, newName)
-
-	return s.s.Cluster.StoragePoolVolumeRename("default", s.volume.Name, newName,
-		storagePoolVolumeTypeCustom, s.poolID)
-}
-
-func (s *storageCeph) StoragePoolUpdate(writable *api.StoragePoolPut, changedConfig []string) error {
-	logger.Infof(`Updating CEPH storage pool "%s"`, s.pool.Name)
-
-	changeable := changeableStoragePoolProperties["ceph"]
-	unchangeable := []string{}
-	for _, change := range changedConfig {
-		if !shared.StringInSlice(change, changeable) {
-			unchangeable = append(unchangeable, change)
-		}
-	}
-
-	if len(unchangeable) > 0 {
-		return updateStoragePoolError(unchangeable, "ceph")
-	}
-
-	// "rsync.bwlimit" requires no on-disk modifications.
-	// "volume.block.filesystem" requires no on-disk modifications.
-	// "volume.block.mount_options" requires no on-disk modifications.
-	// "volume.size" requires no on-disk modifications.
-
-	logger.Infof(`Updated CEPH storage pool "%s"`, s.pool.Name)
-	return nil
-}
-
-func (s *storageCeph) ContainerStorageReady(container instance.Instance) bool {
-	name := container.Name()
-	logger.Debugf(`Checking if RBD storage volume for container "%s" on storage pool "%s" is ready`, name, s.pool.Name)
-
-	ok := cephRBDVolumeExists(s.ClusterName, s.OSDPoolName, project.Prefix(container.Project(), name),
-		storagePoolVolumeTypeNameContainer, s.UserName)
-	if !ok {
-		logger.Debugf(`RBD storage volume for container "%s" on storage pool "%s" does not exist`, name, s.pool.Name)
-		return false
-	}
-
-	logger.Debugf(`RBD storage volume for container "%s" on storage pool "%s" is ready`, name, s.pool.Name)
-	return true
-}
-
-func (s *storageCeph) ContainerCreate(container instance.Instance) error {
-	containerName := container.Name()
-	err := s.doContainerCreate(container.Project(), containerName, container.IsPrivileged())
-	if err != nil {
-		return err
-	}
-
-	err = container.DeferTemplateApply("create")
-	if err != nil {
-		logger.Errorf(`Failed to apply create template for container "%s": %s`, containerName, err)
-		return err
-	}
-	logger.Debugf(`Applied create template for container "%s"`,
-		containerName)
-
-	logger.Debugf(`Created RBD storage volume for container "%s" on storage pool "%s"`, containerName, s.pool.Name)
-	return nil
-}
-
-func (s *storageCeph) ContainerCreateFromImage(container instance.Instance, fingerprint string, tracker *ioprogress.ProgressTracker) error {
-	logger.Debugf(`Creating RBD storage volume for container "%s" on storage pool "%s"`, s.volume.Name, s.pool.Name)
-
-	revert := true
-
-	containerPath := container.Path()
-	containerName := container.Name()
-	containerPoolVolumeMntPoint := driver.GetContainerMountPoint(container.Project(), s.pool.Name,
-		containerName)
-
-	imageStoragePoolLockID := getImageCreateLockID(s.pool.Name, fingerprint)
-	lxdStorageMapLock.Lock()
-	if waitChannel, ok := lxdStorageOngoingOperationMap[imageStoragePoolLockID]; ok {
-		lxdStorageMapLock.Unlock()
-		if _, ok := <-waitChannel; ok {
-			logger.Warnf(`Received value over semaphore. This should not have happened`)
-		}
-	} else {
-		lxdStorageOngoingOperationMap[imageStoragePoolLockID] = make(chan bool)
-		lxdStorageMapLock.Unlock()
-
-		var imgerr error
-		ok := cephRBDVolumeExists(s.ClusterName, s.OSDPoolName,
-			fingerprint, storagePoolVolumeTypeNameImage, s.UserName)
-
-		if ok {
-			_, volume, err := s.s.Cluster.StoragePoolNodeVolumeGetType(fingerprint, db.StoragePoolVolumeTypeImage, s.poolID)
-			if err != nil {
-				return err
-			}
-			if volume.Config["block.filesystem"] != s.getRBDFilesystem() {
-				// The storage pool volume.blockfilesystem property has changed, re-import the image
-				err := s.ImageDelete(fingerprint)
-				if err != nil {
-					return err
-				}
-				ok = false
-			}
-		}
-
-		if !ok {
-			imgerr = s.ImageCreate(fingerprint, tracker)
-		}
-
-		lxdStorageMapLock.Lock()
-		if waitChannel, ok := lxdStorageOngoingOperationMap[imageStoragePoolLockID]; ok {
-			close(waitChannel)
-			delete(lxdStorageOngoingOperationMap, imageStoragePoolLockID)
-		}
-		lxdStorageMapLock.Unlock()
-
-		if imgerr != nil {
-			return imgerr
-		}
-	}
-
-	volumeName := project.Prefix(container.Project(), containerName)
-	err := cephRBDCloneCreate(s.ClusterName, s.OSDPoolName, fingerprint,
-		storagePoolVolumeTypeNameImage, "readonly", s.OSDPoolName,
-		volumeName, storagePoolVolumeTypeNameContainer, s.UserName, s.OSDDataPoolName)
-	if err != nil {
-		logger.Errorf(`Failed to clone new RBD storage volume for container "%s": %s`, containerName, err)
-		return err
-	}
-	logger.Debugf(`Cloned new RBD storage volume for container "%s"`,
-		containerName)
-
-	defer func() {
-		if !revert {
-			return
-		}
-
-		err := cephRBDVolumeDelete(s.ClusterName, s.OSDPoolName,
-			containerName, storagePoolVolumeTypeNameContainer,
-			s.UserName)
-		if err != nil {
-			logger.Warnf(`Failed to delete RBD storage volume for container "%s": %s`, containerName, err)
-		}
-	}()
-
-	// Re-generate the UUID
-	err = s.cephRBDGenerateUUID(volumeName, storagePoolVolumeTypeNameContainer)
-	if err != nil {
-		return err
-	}
-
-	// Create the mountpoint
-	privileged := container.IsPrivileged()
-	err = driver.CreateContainerMountpoint(containerPoolVolumeMntPoint,
-		containerPath, privileged)
-	if err != nil {
-		logger.Errorf(`Failed to create mountpoint "%s" for container "%s" for RBD storage volume: %s`, containerPoolVolumeMntPoint, containerName, err)
-		return err
-	}
-	logger.Debugf(`Created mountpoint "%s" for container "%s" for RBD storage volume`, containerPoolVolumeMntPoint, containerName)
-
-	defer func() {
-		if !revert {
-			return
-		}
-
-		err := os.Remove(containerPoolVolumeMntPoint)
-		if err != nil {
-			logger.Warnf(`Failed to delete mountpoint "%s" for container "%s" for RBD storage volume: %s`, containerPoolVolumeMntPoint, containerName, err)
-		}
-	}()
-
-	// Apply quota
-	_, imageVol, err := s.s.Cluster.StoragePoolNodeVolumeGetType(fingerprint, db.StoragePoolVolumeTypeImage, s.poolID)
-	if err != nil {
-		return err
-	}
-
-	if s.volume.Config["size"] != "" && imageVol.Config["size"] != s.volume.Config["size"] {
-		size, err := units.ParseByteSizeString(s.volume.Config["size"])
-		if err != nil {
-			return err
-		}
-
-		newSize := s.volume.Config["size"]
-		s.volume.Config["size"] = imageVol.Config["size"]
-		err = s.StorageEntitySetQuota(storagePoolVolumeTypeContainer, size, container)
-		if err != nil {
-			return err
-		}
-		s.volume.Config["size"] = newSize
-	}
-
-	// Shift if needed
-	ourMount, err := s.ContainerMount(container)
-	if err != nil {
-		return err
-	}
-	if ourMount {
-		defer s.ContainerUmount(container, containerPath)
-	}
-
-	err = container.DeferTemplateApply("create")
-	if err != nil {
-		logger.Errorf(`Failed to apply create template for container "%s": %s`, containerName, err)
-		return err
-	}
-	logger.Debugf(`Applied create template for container "%s"`,
-		containerName)
-
-	logger.Debugf(`Created RBD storage volume for container "%s" on storage pool "%s"`, s.volume.Name, s.pool.Name)
-
-	revert = false
-
-	return nil
-}
-
-func (s *storageCeph) ContainerDelete(container instance.Instance) error {
-	containerName := container.Name()
-	logger.Debugf(`Deleting RBD storage volume for container "%s" on storage pool "%s"`, containerName, s.pool.Name)
-
-	// umount
-	containerPath := container.Path()
-	containerMntPoint := driver.GetContainerMountPoint(container.Project(), s.pool.Name, containerName)
-	if shared.PathExists(containerMntPoint) {
-		_, err := s.ContainerUmount(container, containerPath)
-		if err != nil {
-			logger.Errorf("Failed to unmount RBD storage volume for container %q on storage pool %q: %v", containerName, s.pool.Name, err)
-			return err
-		}
-	}
-
-	volumeName := project.Prefix(container.Project(), containerName)
-	rbdVolumeExists := cephRBDVolumeExists(s.ClusterName, s.OSDPoolName,
-		volumeName, storagePoolVolumeTypeNameContainer, s.UserName)
-
-	// delete
-	if rbdVolumeExists {
-		ret := cephContainerDelete(s.ClusterName, s.OSDPoolName, volumeName,
-			storagePoolVolumeTypeNameContainer, s.UserName)
-		if ret < 0 {
-			msg := fmt.Sprintf(`Failed to delete RBD storage volume for `+
-				`container "%s" on storage pool "%s"`, containerName, s.pool.Name)
-			logger.Errorf(msg)
-			return fmt.Errorf(msg)
-		}
-	}
-
-	err := deleteContainerMountpoint(containerMntPoint, containerPath,
-		s.GetStorageTypeName())
-	if err != nil {
-		logger.Errorf(`Failed to delete mountpoint %s for RBD storage volume of container "%s" for RBD storage volume on storage pool "%s": %s`, containerMntPoint,
-			containerName, s.pool.Name, err)
-		return err
-	}
-
-	logger.Debugf(`Deleted RBD storage volume for container "%s" on storage pool "%s"`, containerName, s.pool.Name)
-	return nil
-}
-
-// This function recreates an rbd container including its snapshots. It
-// recreates the dependencies between the container and the snapshots:
-// - create an empty rbd storage volume
-// - for each snapshot dump the contents into the empty storage volume and
-//   after each dump take a snapshot of the rbd storage volume
-// - dump the container contents into the rbd storage volume.
-func (s *storageCeph) doCrossPoolContainerCopy(target instance.Instance, source instance.Instance, containerOnly bool, refresh bool, refreshSnapshots []instance.Instance) error {
-	sourcePool, err := source.StoragePool()
-	if err != nil {
-		return err
-	}
-
-	// setup storage for the source volume
-	srcStorage, err := storagePoolVolumeInit(s.s, source.Project(), sourcePool, source.Name(), storagePoolVolumeTypeContainer)
-	if err != nil {
-		return err
-	}
-
-	ourMount, err := srcStorage.StoragePoolMount()
-	if err != nil {
-		return err
-	}
-	if ourMount {
-		defer srcStorage.StoragePoolUmount()
-	}
-
-	targetPool, err := target.StoragePool()
-	if err != nil {
-		return err
-	}
-
-	var snapshots []instance.Instance
-
-	if refresh {
-		snapshots = refreshSnapshots
-	} else {
-		snapshots, err = source.Snapshots()
-		if err != nil {
-			return err
-		}
-
-		// create the main container
-		err = s.doContainerCreate(target.Project(), target.Name(), target.IsPrivileged())
-		if err != nil {
-			return err
-		}
-	}
-
-	// mount container
-	_, err = s.doContainerMount(target.Project(), target.Name())
-	if err != nil {
-		return err
-	}
-
-	destContainerMntPoint := driver.GetContainerMountPoint(target.Project(), targetPool, target.Name())
-	bwlimit := s.pool.Config["rsync.bwlimit"]
-	// Extract container
-	if !containerOnly {
-		for _, snap := range snapshots {
-			srcSnapshotMntPoint := driver.GetSnapshotMountPoint(snap.Project(), sourcePool, snap.Name())
-			_, err = rsync.LocalCopy(srcSnapshotMntPoint, destContainerMntPoint, bwlimit, true)
-			if err != nil {
-				return err
-			}
-
-			// This is costly but we need to ensure that all cached data has
-			// been committed to disk. If we don't then the rbd snapshot of
-			// the underlying filesystem can be inconsistent or - worst case
-			// - empty.
-			unix.Sync()
-
-			msg, fsFreezeErr := shared.TryRunCommand("fsfreeze", "--freeze", destContainerMntPoint)
-			logger.Debugf("Trying to freeze the filesystem: %s: %s", msg, fsFreezeErr)
-
-			// create snapshot
-			_, snapOnlyName, _ := shared.InstanceGetParentAndSnapshotName(snap.Name())
-			err = s.doContainerSnapshotCreate(target.Project(), fmt.Sprintf("%s/%s", target.Name(), snapOnlyName), target.Name())
-			if fsFreezeErr == nil {
-				msg, fsFreezeErr := shared.TryRunCommand("fsfreeze", "--unfreeze", destContainerMntPoint)
-				logger.Debugf("Trying to unfreeze the filesystem: %s: %s", msg, fsFreezeErr)
-			}
-			if err != nil {
-				return err
-			}
-		}
-	}
-
-	srcContainerMntPoint := driver.GetContainerMountPoint(source.Project(), sourcePool, source.Name())
-	_, err = rsync.LocalCopy(srcContainerMntPoint, destContainerMntPoint, bwlimit, true)
-	if err != nil {
-		if !refresh {
-			s.StoragePoolVolumeDelete()
-		}
-
-		logger.Errorf("Failed to rsync into BTRFS storage volume \"%s\" on storage pool \"%s\": %s", s.volume.Name, s.pool.Name, err)
-		return err
-	}
-
-	return nil
-}
-
-func (s *storageCeph) ContainerCopy(target instance.Instance, source instance.Instance, containerOnly bool) error {
-	sourceContainerName := source.Name()
-	logger.Debugf(`Copying RBD container storage %s to %s`, sourceContainerName, target.Name())
-
-	if source.Type() != instancetype.Container {
-		return fmt.Errorf("Source Instance type must be container")
-	}
-
-	if target.Type() != instancetype.Container {
-		return fmt.Errorf("Target Instance type must be container")
-	}
-
-	srcCt := source.(*containerLXC)
-	targetCt := target.(*containerLXC)
-
-	// Handle cross pool copies
-	_, sourcePool, _ := srcCt.Storage().GetContainerPoolInfo()
-	_, targetPool, _ := targetCt.Storage().GetContainerPoolInfo()
-	if sourcePool != targetPool {
-		err := s.doCrossPoolContainerCopy(target, source, containerOnly, false, nil)
-		if err != nil {
-			return err
-		}
-
-		return target.DeferTemplateApply("copy")
-	}
-
-	revert := true
-
-	snapshots, err := source.Snapshots()
-	if err != nil {
-		logger.Errorf(`Failed to retrieve snapshots of container "%s": %s`, sourceContainerName, err)
-		return err
-	}
-	logger.Debugf(`Retrieved snapshots of container "%s"`,
-		sourceContainerName)
-
-	targetContainerName := target.Name()
-	targetContainerMountPoint := driver.GetContainerMountPoint(target.Project(), s.pool.Name, targetContainerName)
-	if containerOnly || len(snapshots) == 0 {
-		if s.pool.Config["ceph.rbd.clone_copy"] != "" &&
-			!shared.IsTrue(s.pool.Config["ceph.rbd.clone_copy"]) {
-			err = s.copyWithoutSnapshotsFull(target, source)
-		} else {
-			err = s.copyWithoutSnapshotsSparse(target, source)
-		}
-		if err != nil {
-			logger.Errorf(`Failed to copy RBD container storage %s to %s`, sourceContainerName, target.Name())
-			return err
-		}
-
-		logger.Debugf(`Copied RBD container storage %s to %s`,
-			sourceContainerName, target.Name())
-		return nil
-	} else {
-		logger.Debugf(`Creating non-sparse copy of RBD storage volume for container "%s" to "%s" including snapshots`,
-			sourceContainerName, targetContainerName)
-
-		// create mountpoint for container
-		targetContainerPath := target.Path()
-		targetContainerMountPoint := driver.GetContainerMountPoint(
-			target.Project(),
-			s.pool.Name,
-			targetContainerName)
-		err = driver.CreateContainerMountpoint(
-			targetContainerMountPoint,
-			targetContainerPath,
-			target.IsPrivileged())
-		if err != nil {
-			logger.Errorf(`Failed to create mountpoint "%s" for RBD storage volume "%s" on storage pool "%s": %s"`, targetContainerMountPoint, s.volume.Name, s.pool.Name, err)
-			return err
-		}
-		logger.Debugf(`Created mountpoint "%s" for RBD storage volume "%s" on storage pool "%s"`, targetContainerMountPoint, s.volume.Name, s.pool.Name)
-
-		defer func() {
-			if !revert {
-				return
-			}
-
-			err = deleteContainerMountpoint(
-				targetContainerMountPoint,
-				targetContainerPath,
-				"")
-			if err != nil {
-				logger.Warnf(`Failed to delete mountpoint "%s" for RBD storage volume "%s" on storage pool "%s": %s"`, targetContainerMountPoint, s.volume.Name, s.pool.Name, err)
-			}
-		}()
-
-		// create empty dummy volume
-		err = cephRBDVolumeCreate(s.ClusterName, s.OSDPoolName,
-			project.Prefix(target.Project(), targetContainerName), storagePoolVolumeTypeNameContainer,
-			"0", s.UserName, s.OSDDataPoolName)
-		if err != nil {
-			logger.Errorf(`Failed to create RBD storage volume "%s" on storage pool "%s": %s`, targetContainerName, s.pool.Name, err)
-			return err
-		}
-		logger.Debugf(`Created RBD storage volume "%s" on storage pool "%s"`,
-			targetContainerName, s.pool.Name)
-
-		defer func() {
-			if !revert {
-				return
-			}
-
-			err := cephRBDVolumeDelete(s.ClusterName, s.OSDPoolName,
-				project.Prefix(target.Project(), targetContainerName),
-				storagePoolVolumeTypeNameContainer, s.UserName)
-			if err != nil {
-				logger.Warnf(`Failed to delete RBD storage volume "%s" on storage pool "%s": %s`, targetContainerName, s.pool.Name, err)
-			}
-		}()
-
-		// receive over the dummy volume we created above
-		targetVolumeName := fmt.Sprintf(
-			"%s/container_%s",
-			s.OSDPoolName,
-			project.Prefix(target.Project(), targetContainerName))
-
-		lastSnap := ""
-		for i, snap := range snapshots {
-			prev := ""
-			if i > 0 {
-				_, snapOnlyName, _ := shared.InstanceGetParentAndSnapshotName(snapshots[i-1].Name())
-				prev = fmt.Sprintf("snapshot_%s", snapOnlyName)
-			}
-
-			_, snapOnlyName, _ := shared.InstanceGetParentAndSnapshotName(snap.Name())
-			lastSnap = fmt.Sprintf("snapshot_%s", snapOnlyName)
-			sourceVolumeName := fmt.Sprintf(
-				"%s/container_%s at snapshot_%s",
-				s.OSDPoolName,
-				project.Prefix(source.Project(), sourceContainerName),
-				snapOnlyName)
-
-			err = s.copyWithSnapshots(
-				sourceVolumeName,
-				targetVolumeName,
-				prev)
-			if err != nil {
-				logger.Errorf(`Failed to copy RBD container storage %s to %s`, sourceVolumeName,
-					targetVolumeName)
-				return err
-			}
-			logger.Debugf(`Copied RBD container storage %s to %s`,
-				sourceVolumeName, targetVolumeName)
-
-			defer func() {
-				if !revert {
-					return
-				}
-
-				err := cephRBDSnapshotDelete(s.ClusterName,
-					s.OSDPoolName, project.Prefix(target.Project(), targetContainerName),
-					storagePoolVolumeTypeNameContainer,
-					snapOnlyName, s.UserName)
-				if err != nil {
-					logger.Warnf(`Failed to delete RBD container storage for snapshot "%s" of container "%s"`, snapOnlyName, targetContainerName)
-				}
-			}()
-
-			// create snapshot mountpoint
-			newTargetName := fmt.Sprintf("%s/%s", targetContainerName, snapOnlyName)
-			containersPath := driver.GetSnapshotMountPoint(
-				target.Project(),
-				s.pool.Name,
-				newTargetName)
-
-			snapshotMntPointSymlinkTarget := shared.VarPath(
-				"storage-pools",
-				s.pool.Name,
-				"containers-snapshots",
-				project.Prefix(target.Project(), targetContainerName))
-
-			snapshotMntPointSymlink := shared.VarPath(
-				"snapshots",
-				project.Prefix(target.Project(), targetContainerName))
-
-			err := driver.CreateSnapshotMountpoint(
-				containersPath,
-				snapshotMntPointSymlinkTarget,
-				snapshotMntPointSymlink)
-			if err != nil {
-				logger.Errorf(`Failed to create mountpoint "%s", snapshot symlink target "%s", snapshot mountpoint symlink"%s" for RBD storage volume "%s" on storage pool "%s": %s`, containersPath, snapshotMntPointSymlinkTarget, snapshotMntPointSymlink, s.volume.Name, s.pool.Name, err)
-				return err
-			}
-			logger.Debugf(`Created mountpoint "%s", snapshot symlink target "%s", snapshot mountpoint symlink"%s" for RBD storage volume "%s" on storage pool "%s"`, containersPath, snapshotMntPointSymlinkTarget, snapshotMntPointSymlink, s.volume.Name, s.pool.Name)
-
-			defer func() {
-				if !revert {
-					return
-				}
-
-				err = deleteSnapshotMountpoint(
-					containersPath,
-					snapshotMntPointSymlinkTarget,
-					snapshotMntPointSymlink)
-				if err != nil {
-					logger.Warnf(`Failed to delete mountpoint "%s", snapshot symlink target "%s", snapshot mountpoint symlink "%s" for RBD storage volume "%s" on storage pool "%s": %s`, containersPath, snapshotMntPointSymlinkTarget, snapshotMntPointSymlink, s.volume.Name, s.pool.Name, err)
-				}
-			}()
-		}
-
-		// copy snapshot
-		sourceVolumeName := fmt.Sprintf(
-			"%s/container_%s",
-			s.OSDPoolName,
-			project.Prefix(source.Project(), sourceContainerName))
-		err = s.copyWithSnapshots(
-			sourceVolumeName,
-			targetVolumeName,
-			lastSnap)
-		if err != nil {
-			logger.Errorf(`Failed to copy RBD container storage %s to %s`, sourceVolumeName, targetVolumeName)
-			return err
-		}
-		logger.Debugf(`Copied RBD container storage %s to %s`, sourceVolumeName, targetVolumeName)
-
-		// Re-generate the UUID
-		err := s.cephRBDGenerateUUID(project.Prefix(target.Project(), targetContainerName), storagePoolVolumeTypeNameContainer)
-		if err != nil {
-			return err
-		}
-
-		logger.Debugf(`Created non-sparse copy of RBD storage volume for container "%s" to "%s" including snapshots`,
-			sourceContainerName, targetContainerName)
-	}
-
-	// Mount the container
-	ourMount, err := s.ContainerMount(target)
-	if err != nil {
-		return err
-	}
-	if ourMount {
-		defer s.ContainerUmount(target, targetContainerMountPoint)
-	}
-
-	err = target.DeferTemplateApply("copy")
-	if err != nil {
-		logger.Errorf(`Failed to apply copy template for container "%s": %s`, target.Name(), err)
-		return err
-	}
-	logger.Debugf(`Applied copy template for container "%s"`, target.Name())
-
-	logger.Debugf(`Copied RBD container storage %s to %s`, sourceContainerName, target.Name())
-
-	revert = false
-	return nil
-}
-
-func (s *storageCeph) ContainerRefresh(target instance.Instance, source instance.Instance, snapshots []instance.Instance) error {
-	logger.Debugf(`Refreshing RBD container storage for %s from %s`, target.Name(), source.Name())
-
-	return s.doCrossPoolContainerCopy(target, source, len(snapshots) == 0, true, snapshots)
-}
-
-func (s *storageCeph) ContainerMount(c instance.Instance) (bool, error) {
-	logger.Debugf("Mounting RBD storage volume for container \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-
-	ourMount, err := s.doContainerMount(c.Project(), c.Name())
-	if err != nil {
-		return false, err
-	}
-
-	logger.Debugf("Mounted RBD storage volume for container \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-	return ourMount, nil
-}
-
-func (s *storageCeph) ContainerUmount(c instance.Instance, path string) (bool, error) {
-	logger.Debugf("Unmounting RBD storage volume for container \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-	name := c.Name()
-
-	containerMntPoint := driver.GetContainerMountPoint(c.Project(), s.pool.Name, name)
-	if shared.IsSnapshot(name) {
-		containerMntPoint = driver.GetSnapshotMountPoint(c.Project(), s.pool.Name, name)
-	}
-
-	containerUmountLockID := getContainerUmountLockID(s.pool.Name, project.Prefix(c.Project(), name))
-	lxdStorageMapLock.Lock()
-	if waitChannel, ok := lxdStorageOngoingOperationMap[containerUmountLockID]; ok {
-		lxdStorageMapLock.Unlock()
-		if _, ok := <-waitChannel; ok {
-			logger.Warnf("Received value over semaphore, this should not have happened")
-		}
-
-		// Give the benefit of the doubt and assume that the other
-		// thread actually succeeded in unmounting the storage volume.
-		logger.Debugf("RBD storage volume for container \"%s\" on storage pool \"%s\" appears to be already unmounted", s.volume.Name, s.pool.Name)
-		return false, nil
-	}
-
-	lxdStorageOngoingOperationMap[containerUmountLockID] = make(chan bool)
-	lxdStorageMapLock.Unlock()
-
-	var mounterr error
-	ourUmount := false
-	if shared.IsMountPoint(containerMntPoint) {
-		mounterr = storageDrivers.TryUnmount(containerMntPoint, 0)
-		ourUmount = true
-	}
-
-	lxdStorageMapLock.Lock()
-	if waitChannel, ok := lxdStorageOngoingOperationMap[containerUmountLockID]; ok {
-		close(waitChannel)
-		delete(lxdStorageOngoingOperationMap, containerUmountLockID)
-	}
-	lxdStorageMapLock.Unlock()
-
-	if mounterr != nil {
-		logger.Errorf("Failed to unmount RBD storage volume for container \"%s\": %s", s.volume.Name, mounterr)
-		return false, mounterr
-	}
-
-	logger.Debugf("Unmounted RBD storage volume for container \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-	return ourUmount, nil
-}
-
-func (s *storageCeph) ContainerRename(c instance.Instance, newName string) error {
-	oldName := c.Name()
-	containerPath := c.Path()
-
-	revert := true
-
-	logger.Debugf(`Renaming RBD storage volume for container "%s" from "%s" to "%s"`, oldName, oldName, newName)
-
-	// unmount
-	_, err := s.ContainerUmount(c, containerPath)
-	if err != nil {
-		return err
-	}
-
-	// unmap
-	err = cephRBDVolumeUnmap(s.ClusterName, s.OSDPoolName, oldName,
-		storagePoolVolumeTypeNameContainer, s.UserName, true)
-	if err != nil {
-		logger.Errorf(`Failed to unmap RBD storage volume for container "%s" on storage pool "%s": %s`, oldName, s.pool.Name, err)
-		return err
-	}
-	logger.Debugf(`Unmapped RBD storage volume for container "%s" on storage pool "%s"`, oldName, s.pool.Name)
-
-	defer func() {
-		if !revert {
-			return
-		}
-
-		_, err := cephRBDVolumeMap(s.ClusterName, s.OSDPoolName,
-			oldName, storagePoolVolumeTypeNameContainer, s.UserName)
-		if err != nil {
-			logger.Warnf(`Failed to Map RBD storage volume for container "%s": %s`, oldName, err)
-		}
-	}()
-
-	err = cephRBDVolumeRename(s.ClusterName, s.OSDPoolName,
-		storagePoolVolumeTypeNameContainer, oldName, newName, s.UserName)
-	if err != nil {
-		logger.Errorf(`Failed to rename RBD storage volume for container "%s" on storage pool "%s": %s`, oldName, s.pool.Name, err)
-		return err
-	}
-	logger.Debugf(`Renamed RBD storage volume for container "%s" on storage pool "%s"`, oldName, s.pool.Name)
-
-	defer func() {
-		if !revert {
-			return
-		}
-
-		err = cephRBDVolumeRename(s.ClusterName, s.OSDPoolName,
-			storagePoolVolumeTypeNameContainer, newName, oldName,
-			s.UserName)
-		if err != nil {
-			logger.Warnf(`Failed to rename RBD storage volume for container "%s" on storage pool "%s": %s`, newName, s.pool.Name, err)
-		}
-	}()
-
-	// map
-	_, err = cephRBDVolumeMap(s.ClusterName, s.OSDPoolName, newName,
-		storagePoolVolumeTypeNameContainer, s.UserName)
-	if err != nil {
-		logger.Errorf(`Failed to map RBD storage volume for container "%s" on storage pool "%s": %s`, newName, s.pool.Name, err)
-		return err
-	}
-	logger.Debugf(`Mapped RBD storage volume for container "%s" on storage pool "%s"`, newName, s.pool.Name)
-
-	defer func() {
-		if !revert {
-			return
-		}
-
-		err := cephRBDVolumeUnmap(s.ClusterName, s.OSDPoolName, newName,
-			storagePoolVolumeTypeNameContainer, s.UserName, true)
-		if err != nil {
-			logger.Warnf(`Failed to unmap RBD storage volume for container "%s": %s`, newName, err)
-		}
-	}()
-
-	// Create new mountpoint on the storage pool.
-	oldContainerMntPoint := driver.GetContainerMountPoint(c.Project(), s.pool.Name, oldName)
-	oldContainerMntPointSymlink := containerPath
-	newContainerMntPoint := driver.GetContainerMountPoint(c.Project(), s.pool.Name, newName)
-	newContainerMntPointSymlink := shared.VarPath("containers", project.Prefix(c.Project(), newName))
-	err = renameContainerMountpoint(
-		oldContainerMntPoint,
-		oldContainerMntPointSymlink,
-		newContainerMntPoint,
-		newContainerMntPointSymlink)
-	if err != nil {
-		return err
-	}
-
-	defer func() {
-		if !revert {
-			return
-		}
-
-		renameContainerMountpoint(newContainerMntPoint,
-			newContainerMntPointSymlink, oldContainerMntPoint,
-			oldContainerMntPointSymlink)
-	}()
-
-	// Rename the snapshot mountpoint on the storage pool.
-	oldSnapshotMntPoint := driver.GetSnapshotMountPoint(c.Project(), s.pool.Name, oldName)
-	newSnapshotMntPoint := driver.GetSnapshotMountPoint(c.Project(), s.pool.Name, newName)
-	if shared.PathExists(oldSnapshotMntPoint) {
-		err := os.Rename(oldSnapshotMntPoint, newSnapshotMntPoint)
-		if err != nil {
-			return err
-		}
-	}
-
-	defer func() {
-		if !revert {
-			return
-		}
-
-		os.Rename(newSnapshotMntPoint, oldSnapshotMntPoint)
-	}()
-
-	// Remove old symlink.
-	oldSnapshotPath := shared.VarPath("snapshots", project.Prefix(c.Project(), oldName))
-	if shared.PathExists(oldSnapshotPath) {
-		err := os.Remove(oldSnapshotPath)
-		if err != nil {
-			return err
-		}
-	}
-
-	defer func() {
-		if !revert {
-			return
-		}
-
-		os.Symlink(oldSnapshotMntPoint, oldSnapshotPath)
-	}()
-
-	// Create new symlink.
-	newSnapshotPath := shared.VarPath("snapshots", project.Prefix(c.Project(), newName))
-	if shared.PathExists(newSnapshotPath) {
-		err := os.Symlink(newSnapshotMntPoint, newSnapshotPath)
-		if err != nil {
-			return err
-		}
-	}
-
-	logger.Debugf(`Renamed RBD storage volume for container "%s" from "%s" to "%s"`, oldName, oldName, newName)
-
-	revert = false
-
-	return nil
-}
-
-func (s *storageCeph) ContainerRestore(target instance.Instance, source instance.Instance) error {
-	sourceName := source.Name()
-	targetName := target.Name()
-
-	logger.Debugf(`Restoring RBD storage volume for container "%s" from %s to %s`, targetName, sourceName, targetName)
-
-	ourStop, err := source.StorageStop()
-	if err != nil {
-		return err
-	}
-	if ourStop {
-		defer source.StorageStart()
-	}
-
-	ourStop, err = target.StorageStop()
-	if err != nil {
-		return err
-	}
-	if ourStop {
-		defer target.StorageStart()
-	}
-
-	sourceContainerOnlyName, sourceSnapshotOnlyName, _ := shared.InstanceGetParentAndSnapshotName(sourceName)
-	prefixedSourceSnapOnlyName := fmt.Sprintf("snapshot_%s", sourceSnapshotOnlyName)
-	err = cephRBDVolumeRestore(s.ClusterName, s.OSDPoolName,
-		project.Prefix(source.Project(), sourceContainerOnlyName), storagePoolVolumeTypeNameContainer,
-		prefixedSourceSnapOnlyName, s.UserName)
-	if err != nil {
-		logger.Errorf(`Failed to restore RBD storage volume for container "%s" from "%s": %s`, targetName, sourceName, err)
-		return err
-	}
-
-	// Re-generate the UUID
-	err = s.cephRBDGenerateUUID(project.Prefix(target.Project(), target.Name()), storagePoolVolumeTypeNameContainer)
-	if err != nil {
-		return err
-	}
-
-	logger.Debugf(`Restored RBD storage volume for container "%s" from %s to %s`, targetName, sourceName, targetName)
-	return nil
-}
-
-func (s *storageCeph) ContainerGetUsage(container instance.Instance) (int64, error) {
-	return -1, fmt.Errorf("RBD quotas are currently not supported")
-}
-
-func (s *storageCeph) ContainerSnapshotCreate(snapshotContainer instance.Instance, sourceContainer instance.Instance) error {
-	containerMntPoint := driver.GetContainerMountPoint(sourceContainer.Project(), s.pool.Name, sourceContainer.Name())
-	if shared.IsMountPoint(containerMntPoint) {
-		// This is costly but we need to ensure that all cached data has
-		// been committed to disk. If we don't then the rbd snapshot of
-		// the underlying filesystem can be inconsistent or - worst case
-		// - empty.
-		unix.Sync()
-
-		msg, fsFreezeErr := shared.TryRunCommand("fsfreeze", "--freeze", containerMntPoint)
-		logger.Debugf("Trying to freeze the filesystem: %s: %s", msg, fsFreezeErr)
-		if fsFreezeErr == nil {
-			defer shared.TryRunCommand("fsfreeze", "--unfreeze", containerMntPoint)
-		}
-	}
-
-	return s.doContainerSnapshotCreate(sourceContainer.Project(), snapshotContainer.Name(), sourceContainer.Name())
-}
-
-func (s *storageCeph) ContainerSnapshotDelete(snapshotContainer instance.Instance) error {
-	logger.Debugf(`Deleting RBD storage volume for snapshot "%s" on storage pool "%s"`, s.volume.Name, s.pool.Name)
-
-	snapshotContainerName := snapshotContainer.Name()
-	sourceContainerName, sourceContainerSnapOnlyName, _ :=
-		shared.InstanceGetParentAndSnapshotName(snapshotContainerName)
-	snapshotName := fmt.Sprintf("snapshot_%s", sourceContainerSnapOnlyName)
-
-	rbdVolumeExists := cephRBDSnapshotExists(s.ClusterName, s.OSDPoolName,
-		project.Prefix(snapshotContainer.Project(), sourceContainerName), storagePoolVolumeTypeNameContainer,
-		snapshotName, s.UserName)
-
-	if rbdVolumeExists {
-		ret := cephContainerSnapshotDelete(s.ClusterName, s.OSDPoolName,
-			project.Prefix(snapshotContainer.Project(), sourceContainerName),
-			storagePoolVolumeTypeNameContainer, snapshotName, s.UserName)
-		if ret < 0 {
-			msg := fmt.Sprintf(`Failed to delete RBD storage volume for `+
-				`snapshot "%s" on storage pool "%s"`,
-				snapshotContainerName, s.pool.Name)
-			logger.Errorf(msg)
-			return fmt.Errorf(msg)
-		}
-	}
-
-	snapshotContainerMntPoint := driver.GetSnapshotMountPoint(snapshotContainer.Project(), s.pool.Name,
-		snapshotContainerName)
-	if shared.PathExists(snapshotContainerMntPoint) {
-		err := os.RemoveAll(snapshotContainerMntPoint)
-		if err != nil {
-			logger.Errorf(`Failed to delete mountpoint "%s" of RBD snapshot "%s" of container "%s" on storage pool "%s": %s`, snapshotContainerMntPoint, sourceContainerSnapOnlyName, sourceContainerName, s.OSDPoolName, err)
-			return err
-		}
-		logger.Debugf(`Deleted mountpoint "%s" of RBD snapshot "%s" of container "%s" on storage pool "%s"`, snapshotContainerMntPoint, sourceContainerSnapOnlyName, sourceContainerName, s.OSDPoolName)
-	}
-
-	// check if snapshot directory is empty
-	snapshotContainerPath := driver.GetSnapshotMountPoint(snapshotContainer.Project(), s.pool.Name,
-		sourceContainerName)
-	empty, _ := shared.PathIsEmpty(snapshotContainerPath)
-	if empty == true {
-		// remove snapshot directory for container
-		err := os.Remove(snapshotContainerPath)
-		if err != nil {
-			logger.Errorf(`Failed to delete snapshot directory "%s" of RBD snapshot "%s" of container "%s" on storage pool "%s": %s`, snapshotContainerPath, sourceContainerSnapOnlyName, sourceContainerName, s.OSDPoolName, err)
-			return err
-		}
-		logger.Debugf(`Deleted snapshot directory  "%s" of RBD snapshot "%s" of container "%s" on storage pool "%s"`, snapshotContainerPath, sourceContainerSnapOnlyName, sourceContainerName, s.OSDPoolName)
-
-		// remove the snapshot symlink if possible
-		snapshotSymlink := shared.VarPath("snapshots",
-			project.Prefix(snapshotContainer.Project(), sourceContainerName))
-		if shared.PathExists(snapshotSymlink) {
-			err := os.Remove(snapshotSymlink)
-			if err != nil {
-				logger.Errorf(`Failed to delete snapshot symlink "%s" of RBD snapshot "%s" of container "%s" on storage pool "%s": %s`, snapshotSymlink, sourceContainerSnapOnlyName, sourceContainerName, s.OSDPoolName, err)
-				return err
-			}
-			logger.Debugf(`Deleted snapshot symlink "%s" of RBD snapshot "%s" of container "%s" on storage pool "%s"`, snapshotSymlink, sourceContainerSnapOnlyName, sourceContainerName, s.OSDPoolName)
-		}
-	}
-
-	logger.Debugf(`Deleted RBD storage volume for snapshot "%s" on storage pool "%s"`, s.volume.Name, s.pool.Name)
-	return nil
-}
-
-func (s *storageCeph) ContainerSnapshotRename(c instance.Instance, newName string) error {
-	oldName := c.Name()
-	logger.Debugf(`Renaming RBD storage volume for snapshot "%s" from "%s" to "%s"`, oldName, oldName, newName)
-
-	revert := true
-
-	containerOnlyName, snapOnlyName, _ := shared.InstanceGetParentAndSnapshotName(oldName)
-	containerOnlyName = project.Prefix(c.Project(), containerOnlyName)
-	oldSnapOnlyName := fmt.Sprintf("snapshot_%s", snapOnlyName)
-	_, newSnapOnlyName, _ := shared.InstanceGetParentAndSnapshotName(newName)
-	newSnapOnlyName = fmt.Sprintf("snapshot_%s", newSnapOnlyName)
-	err := cephRBDVolumeSnapshotRename(s.ClusterName, s.OSDPoolName,
-		containerOnlyName, storagePoolVolumeTypeNameContainer, oldSnapOnlyName,
-		newSnapOnlyName, s.UserName)
-	if err != nil {
-		logger.Errorf(`Failed to rename RBD storage volume for snapshot "%s" from "%s" to "%s": %s`, oldName, oldName, newName, err)
-		return err
-	}
-
-	defer func() {
-		if !revert {
-			return
-		}
-
-		err := cephRBDVolumeSnapshotRename(s.ClusterName, s.OSDPoolName,
-			containerOnlyName, storagePoolVolumeTypeNameContainer,
-			newSnapOnlyName, oldSnapOnlyName, s.UserName)
-		if err != nil {
-			logger.Warnf(`Failed to rename RBD storage volume for container "%s" on storage pool "%s": %s`, oldName, s.pool.Name, err)
-		}
-	}()
-
-	oldSnapshotMntPoint := driver.GetSnapshotMountPoint(c.Project(), s.pool.Name, oldName)
-	newSnapshotMntPoint := driver.GetSnapshotMountPoint(c.Project(), s.pool.Name, newName)
-	err = os.Rename(oldSnapshotMntPoint, newSnapshotMntPoint)
-	if err != nil {
-		logger.Errorf(`Failed to rename mountpoint for RBD storage volume for snapshot "%s" from "%s" to "%s": %s`, oldName, oldSnapshotMntPoint, newSnapshotMntPoint, err)
-		return err
-	}
-	logger.Debugf(`Renamed mountpoint for RBD storage volume for snapshot "%s" from "%s" to "%s"`, oldName, oldSnapshotMntPoint, newSnapshotMntPoint)
-
-	logger.Debugf(`Renamed RBD storage volume for snapshot "%s" from "%s" to "%s"`, oldName, oldName, newName)
-
-	revert = false
-
-	return nil
-}
-
-func (s *storageCeph) ContainerSnapshotStart(c instance.Instance) (bool, error) {
-	containerName := c.Name()
-	logger.Debugf(`Initializing RBD storage volume for snapshot "%s" on storage pool "%s"`, containerName, s.pool.Name)
-
-	revert := true
-
-	containerOnlyName, snapOnlyName, _ := shared.InstanceGetParentAndSnapshotName(containerName)
-	containerOnlyName = project.Prefix(c.Project(), containerOnlyName)
-
-	// protect
-	prefixedSnapOnlyName := fmt.Sprintf("snapshot_%s", snapOnlyName)
-	err := cephRBDSnapshotProtect(s.ClusterName, s.OSDPoolName,
-		containerOnlyName, storagePoolVolumeTypeNameContainer,
-		prefixedSnapOnlyName, s.UserName)
-	if err != nil {
-		logger.Errorf(`Failed to protect snapshot of RBD storage volume for container "%s" on storage pool "%s": %s`, containerName, s.pool.Name, err)
-		return false, err
-	}
-	logger.Debugf(`Protected snapshot of RBD storage volume for container "%s" on storage pool "%s"`, containerName, s.pool.Name)
-
-	defer func() {
-		if !revert {
-			return
-		}
-
-		err := cephRBDSnapshotUnprotect(s.ClusterName, s.OSDPoolName,
-			containerOnlyName, storagePoolVolumeTypeNameContainer,
-			prefixedSnapOnlyName, s.UserName)
-		if err != nil {
-			logger.Warnf(`Failed to unprotect snapshot of RBD storage volume for container "%s" on storage pool "%s": %s`, containerName, s.pool.Name, err)
-		}
-	}()
-
-	cloneName := fmt.Sprintf("%s_%s_start_clone", containerOnlyName, snapOnlyName)
-	// clone
-	err = cephRBDCloneCreate(s.ClusterName, s.OSDPoolName,
-		containerOnlyName, storagePoolVolumeTypeNameContainer,
-		prefixedSnapOnlyName, s.OSDPoolName, cloneName, "snapshots",
-		s.UserName, s.OSDDataPoolName)
-	if err != nil {
-		logger.Errorf(`Failed to create clone of RBD storage volume for container "%s" on storage pool "%s": %s`, containerName, s.pool.Name, err)
-		return false, err
-	}
-	logger.Debugf(`Created clone of RBD storage volume for container "%s" on storage pool "%s"`, containerName, s.pool.Name)
-
-	defer func() {
-		if !revert {
-			return
-		}
-
-		// delete
-		err = cephRBDVolumeDelete(s.ClusterName, s.OSDPoolName,
-			cloneName, "snapshots", s.UserName)
-		if err != nil {
-			logger.Errorf(`Failed to delete clone of RBD storage volume for container "%s" on storage pool "%s": %s`, containerName, s.pool.Name, err)
-		}
-	}()
-
-	// Re-generate the UUID
-	err = s.cephRBDGenerateUUID(cloneName, "snapshots")
-	if err != nil {
-		return false, err
-	}
-
-	// map
-	RBDDevPath, err := cephRBDVolumeMap(s.ClusterName, s.OSDPoolName,
-		cloneName, "snapshots", s.UserName)
-	if err != nil {
-		logger.Errorf(`Failed to map RBD storage volume for container "%s" on storage pool "%s": %s`, containerName, s.pool.Name, err)
-		return false, err
-	}
-	logger.Debugf(`Mapped RBD storage volume for container "%s" on storage pool "%s"`, containerName, s.pool.Name)
-
-	defer func() {
-		if !revert {
-			return
-		}
-
-		err := cephRBDVolumeUnmap(s.ClusterName, s.OSDPoolName,
-			cloneName, "snapshots", s.UserName, true)
-		if err != nil {
-			logger.Warnf(`Failed to unmap RBD storage volume for container "%s" on storage pool "%s": %s`, containerName, s.pool.Name, err)
-		}
-	}()
-
-	containerMntPoint := driver.GetSnapshotMountPoint(c.Project(), s.pool.Name, containerName)
-	RBDFilesystem := s.getRBDFilesystem()
-	mountFlags, mountOptions := resolveMountOptions(s.getRBDMountOptions())
-	if RBDFilesystem == "xfs" {
-		idx := strings.Index(mountOptions, "nouuid")
-		if idx < 0 {
-			mountOptions += ",nouuid"
-		}
-	}
-
-	err = storageDrivers.TryMount(
-		RBDDevPath,
-		containerMntPoint,
-		RBDFilesystem,
-		mountFlags,
-		mountOptions)
-	if err != nil {
-		logger.Errorf("Failed to mount RBD device %s onto %s: %s",
-			RBDDevPath, containerMntPoint, err)
-		return false, err
-	}
-	logger.Debugf("Mounted RBD device %s onto %s", RBDDevPath,
-		containerMntPoint)
-
-	logger.Debugf(`Initialized RBD storage volume for snapshot "%s" on storage pool "%s"`, containerName, s.pool.Name)
-
-	revert = false
-
-	return true, nil
-}
-
-func (s *storageCeph) ContainerSnapshotStop(c instance.Instance) (bool, error) {
-	logger.Debugf(`Stopping RBD storage volume for snapshot "%s" on storage pool "%s"`, c.Name(), s.pool.Name)
-
-	containerName := c.Name()
-	containerMntPoint := driver.GetSnapshotMountPoint(c.Project(), s.pool.Name, containerName)
-
-	// Check if already unmounted
-	if !shared.IsMountPoint(containerMntPoint) {
-		return false, nil
-	}
-
-	// Unmount
-	err := storageDrivers.TryUnmount(containerMntPoint, unix.MNT_DETACH)
-	if err != nil {
-		logger.Errorf("Failed to unmount %s: %s", containerMntPoint, err)
-		return false, err
-	}
-
-	logger.Debugf("Unmounted %s", containerMntPoint)
-
-	containerOnlyName, snapOnlyName, _ := shared.InstanceGetParentAndSnapshotName(containerName)
-	containerOnlyName = project.Prefix(c.Project(), containerOnlyName)
-	cloneName := fmt.Sprintf("%s_%s_start_clone", containerOnlyName, snapOnlyName)
-
-	// Unmap the RBD volume
-	err = cephRBDVolumeUnmap(s.ClusterName, s.OSDPoolName, cloneName, "snapshots", s.UserName, true)
-	if err != nil {
-		logger.Warnf(`Failed to unmap RBD storage volume for container "%s" on storage pool "%s": %s`, containerName, s.pool.Name, err)
-	} else {
-		logger.Debugf(`Unmapped RBD storage volume for container "%s" on storage pool "%s"`, containerName, s.pool.Name)
-	}
-
-	rbdVolumeExists := cephRBDVolumeExists(s.ClusterName, s.OSDPoolName, cloneName, "snapshots", s.UserName)
-	if rbdVolumeExists {
-		// Delete the temporary RBD volume
-		err = cephRBDVolumeDelete(s.ClusterName, s.OSDPoolName, cloneName, "snapshots", s.UserName)
-		if err != nil {
-			logger.Errorf(`Failed to delete clone of RBD storage volume for container "%s" on storage pool "%s": %s`, containerName, s.pool.Name, err)
-			return false, err
-		}
-		logger.Debugf(`Deleted clone of RBD storage volume for container "%s" on storage pool "%s"`, containerName, s.pool.Name)
-	}
-
-	logger.Debugf(`Stopped RBD storage volume for snapshot "%s" on storage pool "%s"`, containerName, s.pool.Name)
-	return true, nil
-}
-
-func (s *storageCeph) ContainerSnapshotCreateEmpty(c instance.Instance) error {
-	logger.Debugf(`Creating empty RBD storage volume for snapshot "%s" on storage pool "%s" (noop)`, c.Name(), s.pool.Name)
-
-	logger.Debugf(`Created empty RBD storage volume for snapshot "%s" on storage pool "%s" (noop)`, c.Name(), s.pool.Name)
-	return nil
-}
-
-func (s *storageCeph) ContainerBackupCreate(path string, backup backup.Backup, source instance.Instance) error {
-	// Generate the actual backup
-	if !backup.InstanceOnly() {
-		snapshots, err := source.Snapshots()
-		if err != nil {
-			return err
-		}
-
-		for _, snap := range snapshots {
-			err := s.cephRBDVolumeBackupCreate(path, backup, snap)
-			if err != nil {
-				return err
-			}
-		}
-	}
-
-	return s.cephRBDVolumeBackupCreate(path, backup, source)
-}
-
-// This function recreates an rbd container including its snapshots. It
-// recreates the dependencies between the container and the snapshots:
-// - create an empty rbd storage volume
-// - for each snapshot dump the contents into the empty storage volume and
-//   after each dump take a snapshot of the rbd storage volume
-// - dump the container contents into the rbd storage volume.
-func (s *storageCeph) ContainerBackupLoad(info backup.Info, data io.ReadSeeker, tarArgs []string) error {
-	// create the main container
-	err := s.doContainerCreate(info.Project, info.Name, info.Privileged)
-	if err != nil {
-		return err
-	}
-
-	// mount container
-	_, err = s.doContainerMount(info.Project, info.Name)
-	if err != nil {
-		return err
-	}
-
-	containerMntPoint := driver.GetContainerMountPoint(info.Project, s.pool.Name, info.Name)
-	// Extract container
-	for _, snap := range info.Snapshots {
-		cur := fmt.Sprintf("backup/snapshots/%s", snap)
-
-		// Prepare tar arguments
-		args := append(tarArgs, []string{
-			"-",
-			"--recursive-unlink",
-			"--strip-components=3",
-			"--xattrs-include=*",
-			"-C", containerMntPoint, cur,
-		}...)
-
-		// Extract snapshots
-		data.Seek(0, 0)
-		err = shared.RunCommandWithFds(data, nil, "tar", args...)
-		if err != nil {
-			logger.Errorf("Failed to untar \"%s\" into \"%s\": %s", cur, containerMntPoint, err)
-			return err
-		}
-
-		// This is costly but we need to ensure that all cached data has
-		// been committed to disk. If we don't then the rbd snapshot of
-		// the underlying filesystem can be inconsistent or - worst case
-		// - empty.
-		unix.Sync()
-
-		msg, fsFreezeErr := shared.TryRunCommand("fsfreeze", "--freeze", containerMntPoint)
-		logger.Debugf("Trying to freeze the filesystem: %s: %s", msg, fsFreezeErr)
-
-		// create snapshot
-		err = s.doContainerSnapshotCreate(info.Project, fmt.Sprintf("%s/%s", info.Name, snap), info.Name)
-		if fsFreezeErr == nil {
-			msg, fsFreezeErr := shared.TryRunCommand("fsfreeze", "--unfreeze", containerMntPoint)
-			logger.Debugf("Trying to unfreeze the filesystem: %s: %s", msg, fsFreezeErr)
-		}
-		if err != nil {
-			return err
-		}
-	}
-
-	// Prepare tar arguments
-	args := append(tarArgs, []string{
-		"-",
-		"--strip-components=2",
-		"--xattrs-include=*",
-		"-C", containerMntPoint, "backup/container",
-	}...)
-
-	// Extract container
-	data.Seek(0, 0)
-	err = shared.RunCommandWithFds(data, nil, "tar", args...)
-	if err != nil {
-		logger.Errorf("Failed to untar \"backup/container\" into \"%s\": %s", containerMntPoint, err)
-		return err
-	}
-
-	return nil
-}
-
-func (s *storageCeph) ImageCreate(fingerprint string, tracker *ioprogress.ProgressTracker) error {
-	logger.Debugf(`Creating RBD storage volume for image "%s" on storage pool "%s"`, fingerprint, s.pool.Name)
-
-	revert := true
-
-	// create image mountpoint
-	imageMntPoint := driver.GetImageMountPoint(s.pool.Name, fingerprint)
-	if !shared.PathExists(imageMntPoint) {
-		err := os.MkdirAll(imageMntPoint, 0700)
-		if err != nil {
-			logger.Errorf(`Failed to create mountpoint "%s" for RBD storage volume for image "%s" on storage pool "%s": %s`, imageMntPoint, fingerprint, s.pool.Name, err)
-			return err
-		}
-		logger.Debugf(`Created mountpoint "%s" for RBD storage volume for image "%s" on storage pool "%s"`, imageMntPoint, fingerprint, s.pool.Name)
-	}
-
-	defer func() {
-		if !revert {
-			return
-		}
-
-		err := os.Remove(imageMntPoint)
-		if err != nil {
-			logger.Warnf(`Failed to delete mountpoint "%s" for RBD storage volume for image "%s" on storage pool "%s": %s`, imageMntPoint, fingerprint, s.pool.Name, err)
-		}
-	}()
-
-	prefixedType := fmt.Sprintf("zombie_%s_%s",
-		storagePoolVolumeTypeNameImage,
-		s.volume.Config["block.filesystem"])
-	ok := cephRBDVolumeExists(s.ClusterName, s.OSDPoolName, fingerprint,
-		prefixedType, s.UserName)
-	if !ok {
-		logger.Debugf(`RBD storage volume for image "%s" on storage pool "%s" does not exist`, fingerprint, s.pool.Name)
-
-		// get size
-		RBDSize, err := s.getRBDSize()
-		if err != nil {
-			logger.Errorf(`Failed to retrieve size of RBD storage volume for image "%s" on storage pool "%s": %s`, fingerprint, s.pool.Name, err)
-			return err
-		}
-		logger.Debugf(`Retrieve size "%s" of RBD storage volume for image "%s" on storage pool "%s"`, RBDSize, fingerprint, s.pool.Name)
-
-		// create volume
-		err = cephRBDVolumeCreate(s.ClusterName, s.OSDPoolName,
-			fingerprint, storagePoolVolumeTypeNameImage, RBDSize,
-			s.UserName, s.OSDDataPoolName)
-		if err != nil {
-			logger.Errorf(`Failed to create RBD storage volume for image "%s" on storage pool "%s": %s`, fingerprint, s.pool.Name, err)
-			return err
-		}
-		logger.Debugf(`Created RBD storage volume for image "%s" on storage pool "%s"`, fingerprint, s.pool.Name)
-
-		defer func() {
-			if !revert {
-				return
-			}
-
-			err := cephRBDVolumeDelete(s.ClusterName, s.OSDPoolName,
-				fingerprint, storagePoolVolumeTypeNameImage,
-				s.UserName)
-			if err != nil {
-				logger.Warnf(`Failed to delete RBD storage volume for image "%s" on storage pool "%s": %s`, fingerprint, s.pool.Name, err)
-			}
-		}()
-
-		RBDDevPath, err := cephRBDVolumeMap(s.ClusterName,
-			s.OSDPoolName, fingerprint,
-			storagePoolVolumeTypeNameImage, s.UserName)
-		if err != nil {
-			logger.Errorf(`Failed to map RBD storage volume for image "%s" on storage pool "%s": %s`, fingerprint, s.pool.Name, err)
-			return err
-		}
-		logger.Debugf(`Mapped RBD storage volume for image "%s" on storage pool "%s"`, fingerprint, s.pool.Name)
-
-		defer func() {
-			if !revert {
-				return
-			}
-
-			err := cephRBDVolumeUnmap(s.ClusterName, s.OSDPoolName,
-				fingerprint, storagePoolVolumeTypeNameImage,
-				s.UserName, true)
-			if err != nil {
-				logger.Warnf(`Failed to unmap RBD storage volume for image "%s" on storage pool "%s": %s`, fingerprint, s.pool.Name, err)
-			}
-		}()
-
-		// get filesystem
-		RBDFilesystem := s.getRBDFilesystem()
-		output, err := makeFSType(RBDDevPath, RBDFilesystem, nil)
-		if err != nil {
-			logger.Errorf(`Failed to create filesystem "%s" for RBD storage volume for image "%s" on storage pool "%s": %v (%s)`, RBDFilesystem, fingerprint, s.pool.Name, err, output)
-			return err
-		}
-		logger.Debugf(`Created filesystem "%s" for RBD storage volume for image "%s" on storage pool "%s"`, RBDFilesystem, fingerprint, s.pool.Name)
-
-		// mount image
-		_, err = s.ImageMount(fingerprint)
-		if err != nil {
-			return err
-		}
-
-		// rsync contents into image
-		imagePath := shared.VarPath("images", fingerprint)
-		err = driver.ImageUnpack(imagePath, imageMntPoint, "", true, s.s.OS.RunningInUserNS, nil)
-		if err != nil {
-			logger.Errorf(`Failed to unpack image for RBD storage volume for image "%s" on storage pool "%s": %s`, fingerprint, s.pool.Name, err)
-
-			// umount image
-			s.ImageUmount(fingerprint)
-			return err
-		}
-		logger.Debugf(`Unpacked image for RBD storage volume for image "%s" on storage pool "%s"`, fingerprint, s.pool.Name)
-
-		// umount image
-		s.ImageUmount(fingerprint)
-
-		// unmap
-		err = cephRBDVolumeUnmap(s.ClusterName, s.OSDPoolName,
-			fingerprint, storagePoolVolumeTypeNameImage, s.UserName,
-			true)
-		if err != nil {
-			logger.Errorf(`Failed to unmap RBD storage volume for image "%s" on storage pool "%s": %s`, fingerprint, s.pool.Name, err)
-			return err
-		}
-		logger.Debugf(`Unmapped RBD storage volume for image "%s" on storage pool "%s"`, fingerprint, s.pool.Name)
-
-		// make snapshot of volume
-		err = cephRBDSnapshotCreate(s.ClusterName, s.OSDPoolName,
-			fingerprint, storagePoolVolumeTypeNameImage, "readonly",
-			s.UserName)
-		if err != nil {
-			logger.Errorf(`Failed to create snapshot for RBD storage volume for image "%s" on storage pool "%s": %s`, fingerprint, s.pool.Name, err)
-			return err
-		}
-		logger.Debugf(`Created snapshot for RBD storage volume for image "%s" on storage pool "%s"`, fingerprint, s.pool.Name)
-
-		defer func() {
-			if !revert {
-				return
-			}
-
-			err := cephRBDSnapshotDelete(s.ClusterName,
-				s.OSDPoolName, fingerprint,
-				storagePoolVolumeTypeNameImage, "readonly",
-				s.UserName)
-			if err != nil {
-				logger.Warnf(`Failed to delete snapshot for RBD storage volume for image "%s" on storage pool "%s": %s`, fingerprint, s.pool.Name, err)
-			}
-		}()
-
-		// protect volume so we can create clones of it
-		err = cephRBDSnapshotProtect(s.ClusterName, s.OSDPoolName,
-			fingerprint, storagePoolVolumeTypeNameImage, "readonly",
-			s.UserName)
-		if err != nil {
-			logger.Errorf(`Failed to protect snapshot for RBD storage volume for image "%s" on storage pool "%s": %s`, fingerprint, s.pool.Name, err)
-			return err
-		}
-		logger.Debugf(`Protected snapshot for RBD storage volume for image "%s" on storage pool "%s"`, fingerprint, s.pool.Name)
-
-		defer func() {
-			if !revert {
-				return
-			}
-
-			err := cephRBDSnapshotUnprotect(s.ClusterName,
-				s.OSDPoolName, fingerprint,
-				storagePoolVolumeTypeNameImage, "readonly",
-				s.UserName)
-			if err != nil {
-				logger.Warnf(`Failed to unprotect snapshot for RBD storage volume for image "%s" on storage pool "%s": %s`, fingerprint, s.pool.Name, err)
-			}
-		}()
-	} else {
-		logger.Debugf(`RBD storage volume for image "%s" on storage pool "%s" does exist`, fingerprint, s.pool.Name)
-
-		// unmark deleted
-		err := cephRBDVolumeUnmarkDeleted(s.ClusterName, s.OSDPoolName,
-			fingerprint, storagePoolVolumeTypeNameImage, s.UserName,
-			s.volume.Config["block.filesystem"], "")
-		if err != nil {
-			logger.Errorf(`Failed to unmark RBD storage volume for image "%s" on storage pool "%s" as zombie: %s`, fingerprint, s.pool.Name, err)
-			return err
-		}
-		logger.Debugf(`Unmarked RBD storage volume for image "%s" on storage pool "%s" as zombie`, fingerprint, s.pool.Name)
-
-		defer func() {
-			if !revert {
-				return
-			}
-
-			err := cephRBDVolumeMarkDeleted(s.ClusterName,
-				s.OSDPoolName, storagePoolVolumeTypeNameImage,
-				fingerprint, fingerprint, s.UserName,
-				s.volume.Config["block.filesystem"])
-			if err != nil {
-				logger.Warnf(`Failed to mark RBD storage volume for image "%s" on storage pool "%s" as zombie: %s`, fingerprint, s.pool.Name, err)
-			}
-		}()
-	}
-
-	err := s.createImageDbPoolVolume(fingerprint)
-	if err != nil {
-		logger.Errorf(`Failed to create database entry for RBD storage volume for image "%s" on storage pool "%s": %s`, fingerprint, s.pool.Name, err)
-		return err
-	}
-	logger.Debugf(`Createdd database entry for RBD storage volume for image "%s" on storage pool "%s"`, fingerprint, s.pool.Name)
-
-	logger.Debugf(`Created RBD storage volume for image "%s" on storage pool "%s"`, fingerprint, s.pool.Name)
-
-	revert = false
-
-	return nil
-}
-
-func (s *storageCeph) ImageDelete(fingerprint string) error {
-	logger.Debugf(`Deleting RBD storage volume for image "%s" on storage pool "%s"`, fingerprint, s.pool.Name)
-
-	// try to umount but don't fail
-	s.ImageUmount(fingerprint)
-
-	// check if image has dependent snapshots
-	_, err := cephRBDSnapshotListClones(s.ClusterName, s.OSDPoolName,
-		fingerprint, storagePoolVolumeTypeNameImage, "readonly",
-		s.UserName)
-	if err != nil {
-		if err != db.ErrNoSuchObject {
-			logger.Errorf(`Failed to list clones of RBD storage volume for image "%s" on storage pool "%s": %s`, fingerprint, s.pool.Name, err)
-			return err
-		}
-		logger.Debugf(`Retrieved no clones of RBD storage volume for image "%s" on storage pool "%s"`, fingerprint, s.pool.Name)
-
-		// unprotect snapshot
-		err = cephRBDSnapshotUnprotect(s.ClusterName, s.OSDPoolName,
-			fingerprint, storagePoolVolumeTypeNameImage, "readonly",
-			s.UserName)
-		if err != nil {
-			logger.Errorf(`Failed to unprotect snapshot for RBD storage volume for image "%s" on storage pool "%s": %s`, fingerprint, s.pool.Name, err)
-			return err
-		}
-		logger.Debugf(`Unprotected snapshot for RBD storage volume for image "%s" on storage pool "%s"`, fingerprint, s.pool.Name)
-
-		// delete snapshots
-		err = cephRBDSnapshotsPurge(s.ClusterName, s.OSDPoolName,
-			fingerprint, storagePoolVolumeTypeNameImage, s.UserName)
-		if err != nil {
-			logger.Errorf(`Failed to delete snapshot for RBD storage volume for image "%s" on storage pool "%s": %s`, fingerprint, s.pool.Name, err)
-			return err
-		}
-		logger.Debugf(`Deleted snapshot for RBD storage volume for image "%s" on storage pool "%s"`, fingerprint, s.pool.Name)
-
-		// unmap
-		err = cephRBDVolumeUnmap(s.ClusterName, s.OSDPoolName,
-			fingerprint, storagePoolVolumeTypeNameImage, s.UserName,
-			true)
-		if err != nil {
-			logger.Errorf(`Failed to unmap RBD storage volume for image "%s" on storage pool "%s": %s`, fingerprint, s.pool.Name, err)
-			return err
-		}
-		logger.Debugf(`Unmapped RBD storage volume for image "%s" on storage pool "%s"`, fingerprint, s.pool.Name)
-
-		// delete volume
-		err = cephRBDVolumeDelete(s.ClusterName, s.OSDPoolName,
-			fingerprint, storagePoolVolumeTypeNameImage, s.UserName)
-		if err != nil {
-			logger.Errorf(`Failed to delete RBD storage volume for image "%s" on storage pool "%s": %s`, fingerprint, s.pool.Name, err)
-			return err
-		}
-		logger.Debugf(`Deleted RBD storage volume for image "%s" on storage pool "%s"`, fingerprint, s.pool.Name)
-	} else {
-		// unmap
-		err = cephRBDVolumeUnmap(s.ClusterName, s.OSDPoolName,
-			fingerprint, storagePoolVolumeTypeNameImage, s.UserName,
-			true)
-		if err != nil {
-			logger.Errorf(`Failed to unmap RBD storage volume for image "%s" on storage pool "%s": %s`, fingerprint, s.pool.Name, err)
-			return err
-		}
-		logger.Debugf(`Unmapped RBD storage volume for image "%s" on storage pool "%s"`, fingerprint, s.pool.Name)
-
-		// mark deleted
-		err := cephRBDVolumeMarkDeleted(s.ClusterName, s.OSDPoolName,
-			storagePoolVolumeTypeNameImage, fingerprint,
-			fingerprint, s.UserName,
-			s.volume.Config["block.filesystem"])
-		if err != nil {
-			logger.Errorf(`Failed to mark RBD storage volume for image "%s" on storage pool "%s" as zombie: %s`, fingerprint, s.pool.Name, err)
-			return err
-		}
-		logger.Debugf(`Marked RBD storage volume for image "%s" on storage pool "%s" as zombie`, fingerprint, s.pool.Name)
-	}
-
-	err = s.deleteImageDbPoolVolume(fingerprint)
-	if err != nil {
-		logger.Errorf(`Failed to delete database entry for RBD storage volume for image "%s" on storage pool "%s": %s`, fingerprint, s.pool.Name, err)
-		return err
-	}
-	logger.Debugf(`Deleted database entry for RBD storage volume for image "%s" on storage pool "%s"`, fingerprint, s.pool.Name)
-
-	imageMntPoint := driver.GetImageMountPoint(s.pool.Name, fingerprint)
-	if shared.PathExists(imageMntPoint) {
-		err := os.Remove(imageMntPoint)
-		if err != nil {
-			logger.Errorf(`Failed to delete image mountpoint "%s" for RBD storage volume for image "%s" on storage pool "%s": %s`, imageMntPoint, fingerprint, s.pool.Name, err)
-			return err
-		}
-		logger.Debugf(`Deleted image mountpoint "%s" for RBD storage volume for image "%s" on storage pool "%s"`, imageMntPoint, fingerprint, s.pool.Name)
-	}
-
-	logger.Debugf(`Deleted RBD storage volume for image "%s" on storage pool "%s"`, fingerprint, s.pool.Name)
-	return nil
-}
-
-func (s *storageCeph) ImageMount(fingerprint string) (bool, error) {
-	logger.Debugf("Mounting RBD storage volume for image \"%s\" on storage pool \"%s\"", fingerprint, s.pool.Name)
-
-	imageMntPoint := driver.GetImageMountPoint(s.pool.Name, fingerprint)
-	if shared.IsMountPoint(imageMntPoint) {
-		return false, nil
-	}
-
-	RBDFilesystem := s.getRBDFilesystem()
-	RBDMountOptions := s.getRBDMountOptions()
-	mountFlags, mountOptions := resolveMountOptions(RBDMountOptions)
-	RBDDevPath, ret := getRBDMappedDevPath(s.ClusterName, s.OSDPoolName,
-		storagePoolVolumeTypeNameImage, fingerprint, true, s.UserName)
-	errMsg := fmt.Sprintf("Failed to mount RBD device %s onto %s",
-		RBDDevPath, imageMntPoint)
-	if ret < 0 {
-		logger.Errorf(errMsg)
-		return false, fmt.Errorf(errMsg)
-	}
-
-	err := storageDrivers.TryMount(RBDDevPath, imageMntPoint, RBDFilesystem, mountFlags, mountOptions)
-	if err != nil || ret < 0 {
-		return false, err
-	}
-
-	logger.Debugf("Mounted RBD storage volume for image \"%s\" on storage pool \"%s\"", fingerprint, s.pool.Name)
-	return true, nil
-}
-
-func (s *storageCeph) ImageUmount(fingerprint string) (bool, error) {
-	logger.Debugf("Unmounting RBD storage volume for image \"%s\" on storage pool \"%s\"", fingerprint, s.pool.Name)
-
-	imageMntPoint := driver.GetImageMountPoint(s.pool.Name, fingerprint)
-	if !shared.IsMountPoint(imageMntPoint) {
-		return false, nil
-	}
-
-	err := storageDrivers.TryUnmount(imageMntPoint, 0)
-	if err != nil {
-		return false, err
-	}
-
-	logger.Debugf("Unmounted RBD storage volume for image \"%s\" on storage pool \"%s\"", fingerprint, s.pool.Name)
-	return true, nil
-}
-
-func (s *storageCeph) StorageEntitySetQuota(volumeType int, size int64, data interface{}) error {
-	logger.Debugf(`Setting RBD quota for "%s"`, s.volume.Name)
-
-	if !shared.IntInSlice(volumeType, supportedVolumeTypes) {
-		return fmt.Errorf("Invalid storage type")
-	}
-
-	var ret int
-	var c instance.Instance
-	fsType := s.getRBDFilesystem()
-	mountpoint := ""
-	RBDDevPath := ""
-	volumeName := ""
-	switch volumeType {
-	case storagePoolVolumeTypeContainer:
-		c = data.(instance.Instance)
-		ctName := c.Name()
-		if c.IsRunning() {
-			msg := fmt.Sprintf(`Cannot resize RBD storage volume `+
-				`for container "%s" when it is running`,
-				ctName)
-			logger.Errorf(msg)
-			return fmt.Errorf(msg)
-		}
-
-		RBDDevPath, ret = getRBDMappedDevPath(s.ClusterName,
-			s.OSDPoolName, storagePoolVolumeTypeNameContainer,
-			s.volume.Name, true, s.UserName)
-		mountpoint = driver.GetContainerMountPoint(c.Project(), s.pool.Name, ctName)
-		volumeName = ctName
-	default:
-		RBDDevPath, ret = getRBDMappedDevPath(s.ClusterName,
-			s.OSDPoolName, storagePoolVolumeTypeNameCustom,
-			s.volume.Name, true, s.UserName)
-		mountpoint = driver.GetStoragePoolVolumeMountPoint(s.pool.Name,
-			s.volume.Name)
-		volumeName = s.volume.Name
-	}
-	if ret < 0 {
-		return fmt.Errorf("Failed to get mapped RBD path")
-	}
-
-	oldSize, err := units.ParseByteSizeString(s.volume.Config["size"])
-	if err != nil {
-		return err
-	}
-
-	// The right disjunct just means that someone unset the size property in
-	// the container's config. We obviously cannot resize to 0.
-	if oldSize == size || size == 0 {
-		return nil
-	}
-
-	if size < oldSize {
-		err = s.rbdShrink(RBDDevPath, size, fsType, mountpoint,
-			volumeType, volumeName, data)
-	} else if size > oldSize {
-		err = s.rbdGrow(RBDDevPath, size, fsType, mountpoint,
-			volumeType, volumeName, data)
-	}
-	if err != nil {
-		return err
-	}
-
-	// Update the database
-	s.volume.Config["size"] = units.GetByteSizeString(size, 0)
-	err = s.s.Cluster.StoragePoolVolumeUpdateByProject(
-		"default",
-		s.volume.Name,
-		volumeType,
-		s.poolID,
-		s.volume.Description,
-		s.volume.Config)
-	if err != nil {
-		return err
-	}
-
-	logger.Debugf(`Set RBD quota for "%s"`, s.volume.Name)
-	return nil
-}
-
-func (s *storageCeph) StoragePoolResources() (*api.ResourcesStoragePool, error) {
-	var stdout bytes.Buffer
-	err := shared.RunCommandWithFds(nil, &stdout,
-		"ceph",
-		"--name", fmt.Sprintf("client.%s", s.UserName),
-		"--cluster", s.ClusterName,
-		"df",
-		"-f", "json")
-	if err != nil {
-		return nil, err
-	}
-
-	// Temporary structs for parsing
-	type cephDfPoolStats struct {
-		BytesUsed      int64 `json:"bytes_used"`
-		BytesAvailable int64 `json:"max_avail"`
-	}
-
-	type cephDfPool struct {
-		Name  string          `json:"name"`
-		Stats cephDfPoolStats `json:"stats"`
-	}
-
-	type cephDf struct {
-		Pools []cephDfPool `json:"pools"`
-	}
-
-	// Parse the JSON output
-	df := cephDf{}
-	err = json.Unmarshal(stdout.Bytes(), &df)
-	if err != nil {
-		return nil, err
-	}
-
-	var pool *cephDfPool
-	for _, entry := range df.Pools {
-		if entry.Name == s.OSDPoolName {
-			pool = &entry
-			break
-		}
-	}
-
-	if pool == nil {
-		return nil, fmt.Errorf("OSD pool missing in df output")
-	}
-
-	spaceUsed := uint64(pool.Stats.BytesUsed)
-	spaceAvailable := uint64(pool.Stats.BytesAvailable)
-
-	res := api.ResourcesStoragePool{}
-	res.Space.Total = spaceAvailable + spaceUsed
-	res.Space.Used = spaceUsed
-
-	return &res, nil
-}
-
-func (s *storageCeph) StoragePoolVolumeCopy(source *api.StorageVolumeSource) error {
-	logger.Infof("Copying RBD storage volume \"%s\" on storage pool \"%s\" as \"%s\" to storage pool \"%s\"", source.Name, source.Pool, s.volume.Name, s.pool.Name)
-	successMsg := fmt.Sprintf("Copied RBD storage volume \"%s\" on storage pool \"%s\" as \"%s\" to storage pool \"%s\"", source.Name, source.Pool, s.volume.Name, s.pool.Name)
-
-	if s.pool.Name != source.Pool {
-		return s.doCrossPoolVolumeCopy(source)
-	}
-
-	rbdSnapshots, err := cephRBDVolumeListSnapshots(s.ClusterName, s.OSDPoolName, source.Name, storagePoolVolumeTypeNameCustom, s.UserName)
-	if err != nil && err != db.ErrNoSuchObject {
-		return err
-	}
-
-	snapshots := []string{}
-	for _, name := range rbdSnapshots {
-		if strings.HasPrefix(name, "snapshot_") {
-			snapshots = append(snapshots, name)
-		}
-	}
-
-	if source.VolumeOnly || len(snapshots) == 0 {
-		if s.pool.Config["ceph.rbd.clone_copy"] != "" && !shared.IsTrue(s.pool.Config["ceph.rbd.clone_copy"]) {
-			err = s.copyVolumeWithoutSnapshotsFull(source)
-		} else {
-			err = s.copyVolumeWithoutSnapshotsSparse(source)
-		}
-		if err != nil {
-			logger.Errorf("Failed to create RBD storage volume \"%s\" on storage pool \"%s\": %s", source.Name, source.Pool, err)
-			return err
-		}
-	} else {
-		logger.Debugf(`Creating non-sparse copy of RBD storage volume for container "%s" to "%s" including snapshots`,
-			source.Name, s.volume.Name)
-
-		revert := true
-		volumeMntPoint := driver.GetStoragePoolVolumeMountPoint(s.pool.Name, s.volume.Name)
-
-		err = os.MkdirAll(volumeMntPoint, 0711)
-		if err != nil {
-			logger.Errorf("Failed to create mountpoint \"%s\" for RBD storage volume \"%s\" on storage pool \"%s\": %s", volumeMntPoint, s.volume.Name, s.pool.Name, err)
-			return err
-		}
-
-		defer func() {
-			if !revert {
-				return
-			}
-
-			err = os.RemoveAll(volumeMntPoint)
-			if err != nil {
-				logger.Warnf(`Failed to delete mountpoint "%s" for RBD storage volume "%s" on storage pool "%s": %s"`, volumeMntPoint, s.volume.Name, s.pool.Name, err)
-			}
-		}()
-
-		// create empty dummy volume
-		err = cephRBDVolumeCreate(s.ClusterName, s.OSDPoolName,
-			s.volume.Name, storagePoolVolumeTypeNameCustom,
-			"0", s.UserName, s.OSDDataPoolName)
-		if err != nil {
-			logger.Errorf(`Failed to create RBD storage volume "%s" on storage pool "%s": %s`, s.volume.Name, s.pool.Name, err)
-			return err
-		}
-		logger.Debugf(`Created RBD storage volume "%s" on storage pool "%s"`,
-			s.volume.Name, s.pool.Name)
-
-		defer func() {
-			if !revert {
-				return
-			}
-
-			err := cephRBDVolumeDelete(s.ClusterName, s.OSDPoolName,
-				s.volume.Name,
-				storagePoolVolumeTypeNameCustom, s.UserName)
-			if err != nil {
-				logger.Warnf(`Failed to delete RBD storage volume "%s" on storage pool "%s": %s`, s.volume.Name, s.pool.Name, err)
-			}
-		}()
-
-		// receive over the dummy volume we created above
-		targetVolumeName := fmt.Sprintf(
-			"%s/custom_%s",
-			s.OSDPoolName,
-			s.volume.Name)
-
-		lastSnap := ""
-		for i, snap := range snapshots {
-			prev := ""
-			if i > 0 {
-				snapOnlyName := strings.SplitN(snapshots[i-1], "snapshot_", 2)[1]
-				prev = fmt.Sprintf("snapshot_%s", snapOnlyName)
-			}
-
-			snapOnlyName := strings.SplitN(snap, "snapshot_", 2)[1]
-			lastSnap = fmt.Sprintf("snapshot_%s", snapOnlyName)
-			sourceVolumeName := fmt.Sprintf(
-				"%s/custom_%s at snapshot_%s",
-				s.OSDPoolName,
-				source.Name,
-				snapOnlyName)
-
-			err = s.copyWithSnapshots(
-				sourceVolumeName,
-				targetVolumeName,
-				prev)
-			if err != nil {
-				logger.Errorf(`Failed to copy RBD volume storage %s to %s`, sourceVolumeName,
-					targetVolumeName)
-				return err
-			}
-			logger.Debugf(`Copied RBD volume storage %s to %s`,
-				sourceVolumeName, targetVolumeName)
-
-			defer func() {
-				if !revert {
-					return
-				}
-
-				err := cephRBDSnapshotDelete(s.ClusterName,
-					s.OSDPoolName, s.volume.Name,
-					storagePoolVolumeTypeNameCustom,
-					snapOnlyName, s.UserName)
-				if err != nil {
-					logger.Warnf(`Failed to delete RBD container storage for snapshot "%s" of container "%s"`, snapOnlyName, s.volume.Name)
-				}
-			}()
-
-			// create snapshot mountpoint
-			newTargetName := fmt.Sprintf("%s/%s", s.volume.Name, snapOnlyName)
-			targetPath := driver.GetStoragePoolVolumeSnapshotMountPoint(s.pool.Name, newTargetName)
-			err = os.MkdirAll(targetPath, driver.SnapshotsDirMode)
-			if err != nil {
-				logger.Errorf("Failed to create mountpoint \"%s\" for RBD storage volume \"%s\" on storage pool \"%s\": %s", targetPath, s.volume.Name, s.pool.Name, err)
-				return err
-			}
-
-			defer func() {
-				if !revert {
-					return
-				}
-
-				err = os.RemoveAll(targetPath)
-				if err != nil {
-					logger.Errorf("Failed to delete mountpoint \"%s\" for RBD storage volume \"%s\" on storage pool \"%s\": %s", targetPath, s.volume.Name, s.pool.Name, err)
-				}
-			}()
-		}
-
-		// copy snapshot
-		sourceVolumeName := fmt.Sprintf(
-			"%s/custom_%s",
-			s.OSDPoolName,
-			source.Name)
-		err = s.copyWithSnapshots(
-			sourceVolumeName,
-			targetVolumeName,
-			lastSnap)
-		if err != nil {
-			logger.Errorf(`Failed to copy RBD custom storage %s to %s`, sourceVolumeName, targetVolumeName)
-			return err
-		}
-		logger.Debugf(`Copied RBD custom storage %s to %s`, sourceVolumeName, targetVolumeName)
-
-		_, err = cephRBDVolumeMap(s.ClusterName, s.OSDPoolName,
-			s.volume.Name, storagePoolVolumeTypeNameCustom,
-			s.UserName)
-		if err != nil {
-			logger.Errorf(`Failed to map RBD storage volume for custom volume "%s" on storage pool "%s": %s`, s.volume.Name, s.pool.Name, err)
-			return err
-		}
-		logger.Debugf(`Mapped RBD storage volume for custom volume "%s" on storage pool "%s"`, s.volume.Name, s.pool.Name)
-
-		logger.Debugf(`Created non-sparse copy of RBD storage volume for custom volume "%s" to "%s" including snapshots`,
-			source.Name, s.volume.Name)
-	}
-
-	logger.Infof(successMsg)
-	return nil
-}
-
-func (s *storageCeph) StorageMigrationSource(args MigrationSourceArgs) (MigrationStorageSourceDriver, error) {
-	return rsyncStorageMigrationSource(args)
-}
-
-func (s *storageCeph) StorageMigrationSink(conn *websocket.Conn, op *operations.Operation, args MigrationSinkArgs) error {
-	return rsyncStorageMigrationSink(conn, op, args)
-}
-
-func (s *storageCeph) StoragePoolVolumeSnapshotCreate(target *api.StorageVolumeSnapshotsPost) error {
-	logger.Debugf("Creating RBD storage volume snapshot \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-	sourcePath := driver.GetStoragePoolVolumeMountPoint(s.pool.Name, s.volume.Name)
-
-	if shared.IsMountPoint(sourcePath) {
-		// This is costly but we need to ensure that all cached data has
-		// been committed to disk. If we don't then the rbd snapshot of
-		// the underlying filesystem can be inconsistent or - worst case
-		// - empty.
-		unix.Sync()
-
-		msg, fsFreezeErr := shared.TryRunCommand("fsfreeze", "--freeze", sourcePath)
-		logger.Debugf("Trying to freeze the filesystem: %s: %s", msg, fsFreezeErr)
-		if fsFreezeErr == nil {
-			defer shared.TryRunCommand("fsfreeze", "--unfreeze", sourcePath)
-		}
-	}
-
-	sourceOnlyName, snapshotOnlyName, _ := shared.InstanceGetParentAndSnapshotName(target.Name)
-	snapshotName := fmt.Sprintf("snapshot_%s", snapshotOnlyName)
-	err := cephRBDSnapshotCreate(s.ClusterName, s.OSDPoolName, sourceOnlyName, storagePoolVolumeTypeNameCustom, snapshotName, s.UserName)
-	if err != nil {
-		logger.Errorf("Failed to create snapshot for RBD storage volume for image \"%s\" on storage pool \"%s\": %s", sourceOnlyName, s.pool.Name, err)
-		return err
-	}
-
-	targetPath := driver.GetStoragePoolVolumeSnapshotMountPoint(s.pool.Name, target.Name)
-	err = os.MkdirAll(targetPath, driver.SnapshotsDirMode)
-	if err != nil {
-		logger.Errorf("Failed to create mountpoint \"%s\" for RBD storage volume \"%s\" on storage pool \"%s\": %s", targetPath, s.volume.Name, s.pool.Name, err)
-		return err
-	}
-
-	logger.Debugf("Created RBD storage volume snapshot \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-	return nil
-}
-
-func (s *storageCeph) doPoolVolumeSnapshotDelete(name string) error {
-	sourceName, snapshotOnlyName, ok := shared.InstanceGetParentAndSnapshotName(name)
-	if !ok {
-		return fmt.Errorf("Not a snapshot name")
-	}
-	snapshotName := fmt.Sprintf("snapshot_%s", snapshotOnlyName)
-
-	rbdVolumeExists := cephRBDSnapshotExists(s.ClusterName, s.OSDPoolName, sourceName, storagePoolVolumeTypeNameCustom, snapshotName, s.UserName)
-	if rbdVolumeExists {
-		ret := cephContainerSnapshotDelete(s.ClusterName, s.OSDPoolName, sourceName, storagePoolVolumeTypeNameCustom, snapshotName, s.UserName)
-		if ret < 0 {
-			msg := fmt.Sprintf("Failed to delete RBD storage volume for snapshot \"%s\" on storage pool \"%s\"", name, s.pool.Name)
-			logger.Errorf(msg)
-			return fmt.Errorf(msg)
-		}
-	}
-
-	storageVolumeSnapshotPath := driver.GetStoragePoolVolumeSnapshotMountPoint(s.pool.Name, name)
-	empty, err := shared.PathIsEmpty(storageVolumeSnapshotPath)
-	if err == nil && empty {
-		os.RemoveAll(storageVolumeSnapshotPath)
-	}
-
-	err = s.s.Cluster.StoragePoolVolumeDelete(
-		"default",
-		name,
-		storagePoolVolumeTypeCustom,
-		s.poolID)
-	if err != nil {
-		logger.Errorf(`Failed to delete database entry for DIR storage volume "%s" on storage pool "%s"`,
-			name, s.pool.Name)
-	}
-
-	return nil
-}
-
-func (s *storageCeph) StoragePoolVolumeSnapshotDelete() error {
-	logger.Infof("Deleting CEPH storage volume snapshot \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-
-	err := s.doPoolVolumeSnapshotDelete(s.volume.Name)
-	if err != nil {
-		return err
-	}
-
-	logger.Infof("Deleted CEPH storage volume snapshot \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-	return nil
-}
-
-func (s *storageCeph) StoragePoolVolumeSnapshotRename(newName string) error {
-	logger.Infof("Renaming CEPH storage volume on OSD storage pool \"%s\" from \"%s\" to \"%s\"", s.pool.Name, s.volume.Name, newName)
-
-	sourceName, oldSnapOnlyName, ok := shared.InstanceGetParentAndSnapshotName(s.volume.Name)
-	if !ok {
-		return fmt.Errorf("Not a snapshot name")
-	}
-
-	err := cephRBDVolumeSnapshotRename(s.ClusterName, s.OSDPoolName, sourceName, storagePoolVolumeTypeNameCustom, fmt.Sprintf("snapshot_%s", oldSnapOnlyName), fmt.Sprintf("snapshot_%s", newName), s.UserName)
-	if err != nil {
-		logger.Errorf("Failed to rename RBD storage volume for container \"%s\" on storage pool \"%s\": %s", s.volume.Name, s.pool.Name, err)
-		return err
-	}
-	logger.Debugf("Renamed RBD storage volume for container \"%s\" on storage pool \"%s\"", s.volume.Name, s.pool.Name)
-
-	fullSnapshotName := fmt.Sprintf("%s%s%s", sourceName, shared.SnapshotDelimiter, newName)
-	oldPath := driver.GetStoragePoolVolumeSnapshotMountPoint(s.pool.Name, s.volume.Name)
-	newPath := driver.GetStoragePoolVolumeSnapshotMountPoint(s.pool.Name, fullSnapshotName)
-	err = os.Rename(oldPath, newPath)
-	if err != nil {
-		return err
-	}
-
-	logger.Infof("Renamed CEPH storage volume on OSD storage pool \"%s\" from \"%s\" to \"%s\"", s.pool.Name, s.volume.Name, fullSnapshotName)
-
-	return s.s.Cluster.StoragePoolVolumeRename("default", s.volume.Name, fullSnapshotName, storagePoolVolumeTypeCustom, s.poolID)
-}
-
-func (s *storageCeph) MigrationType() migration.MigrationFSType {
-	return migration.MigrationFSType_RBD
-}
-
-func (s *storageCeph) PreservesInodes() bool {
-	return false
-}
-
-func (s *storageCeph) MigrationSource(args MigrationSourceArgs) (MigrationStorageSourceDriver, error) {
-	// If the container is a snapshot, let's just send that. We don't need
-	// to send anything else, because that's all the user asked for.
-	if args.Instance.IsSnapshot() {
-		return &rbdMigrationSourceDriver{
-			container: args.Instance,
-			ceph:      s,
-		}, nil
-	}
-
-	driver := rbdMigrationSourceDriver{
-		container:        args.Instance,
-		snapshots:        []instance.Instance{},
-		rbdSnapshotNames: []string{},
-		ceph:             s,
-	}
-
-	instanceName := args.Instance.Name()
-	if args.InstanceOnly {
-		logger.Debugf(`Only migrating the RBD storage volume for container "%s" on storage pool "%s`, instanceName, s.pool.Name)
-		return &driver, nil
-	}
-
-	// List all the snapshots in order of reverse creation. The idea here is
-	// that we send the oldest to newest snapshot, hopefully saving on xfer
-	// costs. Then, after all that, we send the container itself.
-	snapshots, err := cephRBDVolumeListSnapshots(s.ClusterName,
-		s.OSDPoolName, project.Prefix(args.Instance.Project(), instanceName),
-		storagePoolVolumeTypeNameContainer, s.UserName)
-	if err != nil {
-		if err != db.ErrNoSuchObject {
-			logger.Errorf(`Failed to list snapshots for RBD storage volume "%s" on storage pool "%s": %s`, instanceName, s.pool.Name, err)
-			return nil, err
-		}
-	}
-	logger.Debugf(`Retrieved snapshots "%v" for RBD storage volume "%s" on storage pool "%s"`, snapshots, instanceName, s.pool.Name)
-
-	for _, snap := range snapshots {
-		// In the case of e.g. multiple copies running at the same time,
-		// we will have potentially multiple migration-send snapshots.
-		// (Or in the case of the test suite, sometimes one will take
-		// too long to delete.)
-		if !strings.HasPrefix(snap, "snapshot_") {
-			continue
-		}
-
-		lxdName := fmt.Sprintf("%s%s%s", instanceName, shared.SnapshotDelimiter, snap[len("snapshot_"):])
-		snapshot, err := instance.LoadByProjectAndName(s.s, args.Instance.Project(), lxdName)
-		if err != nil {
-			logger.Errorf(`Failed to load snapshot "%s" for RBD storage volume "%s" on storage pool "%s": %s`, lxdName, instanceName, s.pool.Name, err)
-			return nil, err
-		}
-
-		driver.snapshots = append(driver.snapshots, snapshot)
-		driver.rbdSnapshotNames = append(driver.rbdSnapshotNames, snap)
-	}
-
-	return &driver, nil
-}
-
-func (s *storageCeph) MigrationSink(conn *websocket.Conn, op *operations.Operation, args MigrationSinkArgs) error {
-	// Check that we received a valid root disk device with a pool property
-	// set.
-	parentStoragePool := ""
-	parentExpandedDevices := args.Instance.ExpandedDevices()
-	parentLocalRootDiskDeviceKey, parentLocalRootDiskDevice, _ := shared.GetRootDiskDevice(parentExpandedDevices.CloneNative())
-	if parentLocalRootDiskDeviceKey != "" {
-		parentStoragePool = parentLocalRootDiskDevice["pool"]
-	}
-
-	// A little neuroticism.
-	if parentStoragePool == "" {
-		return fmt.Errorf(`Detected that the container's root device ` +
-			`is missing the pool property during RBD migration`)
-	}
-	logger.Debugf(`Detected root disk device with pool property set to "%s" during RBD migration`, parentStoragePool)
-
-	// create empty volume for container
-	// TODO: The cluster name can be different between LXD instances. Find
-	// out what to do in this case. Maybe I'm overthinking this and if the
-	// pool exists and we were able to initialize a new storage interface on
-	// the receiving LXD instance it also means that s.ClusterName has been
-	// set to the correct cluster name for that LXD instance. Yeah, I think
-	// that's actually correct.
-	instanceName := args.Instance.Name()
-	if !cephRBDVolumeExists(s.ClusterName, s.OSDPoolName, project.Prefix(args.Instance.Project(), instanceName), storagePoolVolumeTypeNameContainer, s.UserName) {
-		err := cephRBDVolumeCreate(s.ClusterName, s.OSDPoolName, project.Prefix(args.Instance.Project(), instanceName), storagePoolVolumeTypeNameContainer, "0", s.UserName, s.OSDDataPoolName)
-		if err != nil {
-			logger.Errorf(`Failed to create RBD storage volume "%s" for cluster "%s" in OSD pool "%s" on storage pool "%s": %s`, instanceName, s.ClusterName, s.OSDPoolName, s.pool.Name, err)
-			return err
-		}
-		logger.Debugf(`Created RBD storage volume "%s" on storage pool "%s"`, instanceName, s.pool.Name)
-	}
-
-	if len(args.Snapshots) > 0 {
-		snapshotMntPointSymlinkTarget := shared.VarPath("storage-pools", s.pool.Name, "containers-snapshots", project.Prefix(args.Instance.Project(), instanceName))
-		snapshotMntPointSymlink := shared.VarPath("snapshots", project.Prefix(args.Instance.Project(), instanceName))
-		if !shared.PathExists(snapshotMntPointSymlink) {
-			err := os.Symlink(snapshotMntPointSymlinkTarget, snapshotMntPointSymlink)
-			if err != nil {
-				return err
-			}
-		}
-	}
-
-	// Now we're ready to receive the actual fs.
-	recvName := fmt.Sprintf("%s/container_%s", s.OSDPoolName, project.Prefix(args.Instance.Project(), instanceName))
-	for _, snap := range args.Snapshots {
-		curSnapName := snap.GetName()
-		ctArgs := snapshotProtobufToInstanceArgs(args.Instance.Project(), instanceName, snap)
-
-		// Ensure that snapshot and parent container have the same
-		// storage pool in their local root disk device.  If the root
-		// disk device for the snapshot comes from a profile on the new
-		// instance as well we don't need to do anything.
-		if ctArgs.Devices != nil {
-			snapLocalRootDiskDeviceKey, _, _ := shared.GetRootDiskDevice(ctArgs.Devices.CloneNative())
-			if snapLocalRootDiskDeviceKey != "" {
-				ctArgs.Devices[snapLocalRootDiskDeviceKey]["pool"] = parentStoragePool
-			}
-		}
-		_, err := containerCreateEmptySnapshot(args.Instance.DaemonState(), ctArgs)
-		if err != nil {
-			logger.Errorf(`Failed to create empty RBD storage volume for container "%s" on storage pool "%s: %s`, instanceName, s.OSDPoolName, err)
-			return err
-		}
-		logger.Debugf(`Created empty RBD storage volume for container "%s" on storage pool "%s`, instanceName, s.OSDPoolName)
-
-		wrapper := migration.ProgressWriter(op, "fs_progress", curSnapName)
-		err = s.rbdRecv(conn, recvName, wrapper)
-		if err != nil {
-			logger.Errorf(`Failed to receive RBD storage volume "%s": %s`, curSnapName, err)
-			return err
-		}
-		logger.Debugf(`Received RBD storage volume "%s"`, curSnapName)
-
-		snapshotMntPoint := driver.GetSnapshotMountPoint(args.Instance.Project(), s.pool.Name, fmt.Sprintf("%s/%s", instanceName, *snap.Name))
-		if !shared.PathExists(snapshotMntPoint) {
-			err := os.MkdirAll(snapshotMntPoint, 0100)
-			if err != nil {
-				return err
-			}
-		}
-	}
-
-	defer func() {
-		snaps, err := cephRBDVolumeListSnapshots(s.ClusterName, s.OSDPoolName, project.Prefix(args.Instance.Project(), instanceName), storagePoolVolumeTypeNameContainer, s.UserName)
-		if err == nil {
-			for _, snap := range snaps {
-				snapOnlyName, _, _ := shared.InstanceGetParentAndSnapshotName(snap)
-				if !strings.HasPrefix(snapOnlyName, "migration-send") {
-					continue
-				}
-
-				err := cephRBDSnapshotDelete(s.ClusterName, s.OSDPoolName, project.Prefix(args.Instance.Project(), instanceName), storagePoolVolumeTypeNameContainer, snapOnlyName, s.UserName)
-				if err != nil {
-					logger.Warnf(`Failed to delete RBD container storage for snapshot "%s" of container "%s"`, snapOnlyName, instanceName)
-				}
-			}
-		}
-	}()
-
-	// receive the container itself
-	wrapper := migration.ProgressWriter(op, "fs_progress", instanceName)
-	err := s.rbdRecv(conn, recvName, wrapper)
-	if err != nil {
-		logger.Errorf(`Failed to receive RBD storage volume "%s": %s`, recvName, err)
-		return err
-	}
-	logger.Debugf(`Received RBD storage volume "%s"`, recvName)
-
-	if args.Live {
-		err := s.rbdRecv(conn, recvName, wrapper)
-		if err != nil {
-			logger.Errorf(`Failed to receive RBD storage volume "%s": %s`, recvName, err)
-			return err
-		}
-		logger.Debugf(`Received RBD storage volume "%s"`, recvName)
-	}
-
-	// Re-generate the UUID
-	err = s.cephRBDGenerateUUID(project.Prefix(args.Instance.Project(), args.Instance.Name()), storagePoolVolumeTypeNameContainer)
-	if err != nil {
-		return err
-	}
-
-	containerMntPoint := driver.GetContainerMountPoint(args.Instance.Project(), s.pool.Name, instanceName)
-	err = driver.CreateContainerMountpoint(
-		containerMntPoint,
-		args.Instance.Path(),
-		args.Instance.IsPrivileged())
-	if err != nil {
-		logger.Errorf(`Failed to create mountpoint "%s" for RBD storage volume for container "%s" on storage pool "%s": %s"`, containerMntPoint, instanceName, s.pool.Name, err)
-		return err
-	}
-	logger.Debugf(`Created mountpoint "%s" for RBD storage volume for container "%s" on storage pool "%s""`, containerMntPoint, instanceName, s.pool.Name)
-
-	return nil
-}
-func (s *storageCeph) rbdRecv(conn *websocket.Conn,
-	volumeName string,
-	writeWrapper func(io.WriteCloser) io.WriteCloser) error {
-	args := []string{
-		"import-diff",
-		"--cluster", s.ClusterName,
-		"-",
-		volumeName,
-	}
-
-	cmd := exec.Command("rbd", args...)
-
-	stdin, err := cmd.StdinPipe()
-	if err != nil {
-		return err
-	}
-
-	stderr, err := cmd.StderrPipe()
-	if err != nil {
-		return err
-	}
-
-	err = cmd.Start()
-	if err != nil {
-		return err
-	}
-
-	writePipe := io.WriteCloser(stdin)
-	if writeWrapper != nil {
-		writePipe = writeWrapper(stdin)
-	}
-
-	<-shared.WebsocketRecvStream(writePipe, conn)
-
-	output, err := ioutil.ReadAll(stderr)
-	if err != nil {
-		logger.Debugf(`Failed to read stderr output from "rbd import-diff": %s`, err)
-	}
-
-	err = cmd.Wait()
-	if err != nil {
-		logger.Errorf(`Failed to perform "rbd import-diff": %s`, string(output))
-	}
-
-	return err
-}
diff --git a/lxd/storage_ceph_utils.go b/lxd/storage_ceph_utils.go
deleted file mode 100644
index b55f1e296e..0000000000
--- a/lxd/storage_ceph_utils.go
+++ /dev/null
@@ -1,2091 +0,0 @@
-package main
-
-import (
-	"encoding/json"
-	"fmt"
-	"io/ioutil"
-	"os"
-	"os/exec"
-	"strconv"
-	"strings"
-	"syscall"
-	"time"
-
-	"github.com/pborman/uuid"
-	"golang.org/x/sys/unix"
-
-	"github.com/lxc/lxd/lxd/backup"
-	"github.com/lxc/lxd/lxd/db"
-	"github.com/lxc/lxd/lxd/instance"
-	"github.com/lxc/lxd/lxd/project"
-	"github.com/lxc/lxd/lxd/rsync"
-	driver "github.com/lxc/lxd/lxd/storage"
-	storageDrivers "github.com/lxc/lxd/lxd/storage/drivers"
-	"github.com/lxc/lxd/shared"
-	"github.com/lxc/lxd/shared/api"
-	"github.com/lxc/lxd/shared/logger"
-	"github.com/lxc/lxd/shared/units"
-)
-
-// cephOSDPoolExists checks whether a given OSD pool exists.
-func cephOSDPoolExists(ClusterName string, poolName string, userName string) bool {
-	_, err := shared.RunCommand(
-		"ceph",
-		"--name", fmt.Sprintf("client.%s", userName),
-		"--cluster", ClusterName,
-		"osd",
-		"pool",
-		"get",
-		poolName,
-		"size")
-	if err != nil {
-		return false
-	}
-
-	return true
-}
-
-// cephOSDPoolDestroy destroys an OSD pool.
-// - A call to cephOSDPoolDestroy will destroy a pool including any storage
-//   volumes that still exist in the pool.
-// - In case the OSD pool that is supposed to be deleted does not exist this
-//   command will still exit 0. This means that if the caller wants to be sure
-//   that this call actually deleted an OSD pool it needs to check for the
-//   existence of the pool first.
-func cephOSDPoolDestroy(clusterName string, poolName string, userName string) error {
-	_, err := shared.RunCommand("ceph",
-		"--name", fmt.Sprintf("client.%s", userName),
-		"--cluster", clusterName,
-		"osd",
-		"pool",
-		"delete",
-		poolName,
-		poolName,
-		"--yes-i-really-really-mean-it")
-	if err != nil {
-		return err
-	}
-
-	return nil
-}
-
-// cephRBDVolumeCreate creates an RBD storage volume.
-// Note that the set of features is intentionally limited is intentionally
-// limited by passing --image-feature explicitly. This is done to ensure that
-// the chances of a conflict between the features supported by the userspace
-// library and the kernel module are minimized. Otherwise random panics might
-// occur.
-func cephRBDVolumeCreate(clusterName string, poolName string, volumeName string,
-	volumeType string, size string, userName string, dataPoolName string) error {
-	cmd := []string{
-		"--id", userName,
-		"--image-feature", "layering,",
-		"--cluster", clusterName,
-		"--pool", poolName,
-	}
-
-	if dataPoolName != "" {
-		cmd = append(cmd, "--data-pool", dataPoolName)
-	}
-
-	cmd = append(cmd,
-		"--size", size,
-		"create",
-		fmt.Sprintf("%s_%s", volumeType, volumeName))
-
-	_, err := shared.RunCommand("rbd", cmd...)
-	return err
-}
-
-// cephRBDVolumeExists checks whether a given RBD storage volume exists.
-func cephRBDVolumeExists(clusterName string, poolName string, volumeName string,
-	volumeType string, userName string) bool {
-	_, err := shared.RunCommand(
-		"rbd",
-		"--id", userName,
-		"--cluster", clusterName,
-		"--pool", poolName,
-		"image-meta",
-		"list",
-		fmt.Sprintf("%s_%s", volumeType, volumeName))
-	if err != nil {
-		return false
-	}
-	return true
-}
-
-// cephRBDVolumeSnapshotExists checks whether a given RBD snapshot exists.
-func cephRBDSnapshotExists(clusterName string, poolName string,
-	volumeName string, volumeType string, snapshotName string,
-	userName string) bool {
-	_, err := shared.RunCommand(
-		"rbd",
-		"--id", userName,
-		"--cluster", clusterName,
-		"--pool", poolName,
-		"info",
-		fmt.Sprintf("%s_%s@%s", volumeType, volumeName, snapshotName))
-	if err != nil {
-		return false
-	}
-	return true
-}
-
-// cephRBDVolumeDelete deletes an RBD storage volume.
-// - In case the RBD storage volume that is supposed to be deleted does not
-//   exist this command will still exit 0. This means that if the caller wants
-//   to be sure that this call actually deleted an RBD storage volume it needs
-//   to check for the existence of the pool first.
-func cephRBDVolumeDelete(clusterName string, poolName string, volumeName string,
-	volumeType string, userName string) error {
-	_, err := shared.RunCommand(
-		"rbd",
-		"--id", userName,
-		"--cluster", clusterName,
-		"--pool", poolName,
-		"rm",
-		fmt.Sprintf("%s_%s", volumeType, volumeName))
-	if err != nil {
-		return err
-	}
-
-	return nil
-}
-
-// cephRBDVolumeMap maps a given RBD storage volume
-// This will ensure that the RBD storage volume is accessible as a block device
-// in the /dev directory and is therefore necessary in order to mount it.
-func cephRBDVolumeMap(clusterName string, poolName string, volumeName string,
-	volumeType string, userName string) (string, error) {
-	devPath, err := shared.RunCommand(
-		"rbd",
-		"--id", userName,
-		"--cluster", clusterName,
-		"--pool", poolName,
-		"map",
-		fmt.Sprintf("%s_%s", volumeType, volumeName))
-	if err != nil {
-		return "", err
-	}
-
-	idx := strings.Index(devPath, "/dev/rbd")
-	if idx < 0 {
-		return "", fmt.Errorf("Failed to detect mapped device path")
-	}
-
-	devPath = devPath[idx:]
-	return strings.TrimSpace(devPath), nil
-}
-
-// cephRBDVolumeUnmap unmaps a given RBD storage volume
-// This is a precondition in order to delete an RBD storage volume can.
-func cephRBDVolumeUnmap(clusterName string, poolName string, volumeName string,
-	volumeType string, userName string, unmapUntilEINVAL bool) error {
-	unmapImageName := fmt.Sprintf("%s_%s", volumeType, volumeName)
-
-	busyCount := 0
-
-again:
-	_, err := shared.RunCommand(
-		"rbd",
-		"--id", userName,
-		"--cluster", clusterName,
-		"--pool", poolName,
-		"unmap",
-		unmapImageName)
-	if err != nil {
-		runError, ok := err.(shared.RunError)
-		if ok {
-			exitError, ok := runError.Err.(*exec.ExitError)
-			if ok {
-				waitStatus := exitError.Sys().(syscall.WaitStatus)
-				if waitStatus.ExitStatus() == 22 {
-					// EINVAL (already unmapped)
-					return nil
-				}
-
-				if waitStatus.ExitStatus() == 16 {
-					// EBUSY (currently in use)
-					busyCount++
-					if busyCount == 10 {
-						return err
-					}
-
-					// Wait a second an try again
-					time.Sleep(time.Second)
-					goto again
-				}
-			}
-		}
-
-		return err
-	}
-
-	if unmapUntilEINVAL {
-		goto again
-	}
-
-	return nil
-}
-
-// cephRBDVolumeSnapshotUnmap unmaps a given RBD snapshot
-// This is a precondition in order to delete an RBD snapshot can.
-func cephRBDVolumeSnapshotUnmap(clusterName string, poolName string,
-	volumeName string, volumeType string, snapshotName string,
-	userName string, unmapUntilEINVAL bool) error {
-	unmapSnapshotName := fmt.Sprintf("%s_%s@%s", volumeType, volumeName,
-		snapshotName)
-
-again:
-	_, err := shared.RunCommand(
-		"rbd",
-		"--id", userName,
-		"--cluster", clusterName,
-		"--pool", poolName,
-		"unmap",
-		unmapSnapshotName)
-	if err != nil {
-		runError, ok := err.(shared.RunError)
-		if ok {
-			exitError, ok := runError.Err.(*exec.ExitError)
-			if ok {
-				waitStatus := exitError.Sys().(syscall.WaitStatus)
-				if waitStatus.ExitStatus() == 22 {
-					// EINVAL (already unmapped)
-					return nil
-				}
-			}
-		}
-		return err
-	}
-
-	if unmapUntilEINVAL {
-		goto again
-	}
-
-	return nil
-}
-
-// cephRBDSnapshotCreate creates a read-write snapshot of a given RBD storage
-// volume
-func cephRBDSnapshotCreate(clusterName string, poolName string,
-	volumeName string, volumeType string, snapshotName string,
-	userName string) error {
-	_, err := shared.RunCommand(
-		"rbd",
-		"--id", userName,
-		"--cluster", clusterName,
-		"--pool", poolName,
-		"snap",
-		"create",
-		"--snap", snapshotName,
-		fmt.Sprintf("%s_%s", volumeType, volumeName))
-	if err != nil {
-		return err
-	}
-
-	return nil
-}
-
-// cephRBDSnapshotsPurge deletes all snapshot of a given RBD storage volume
-// Note that this will only succeed if none of the snapshots are protected.
-func cephRBDSnapshotsPurge(clusterName string, poolName string,
-	volumeName string, volumeType string, userName string) error {
-	_, err := shared.RunCommand(
-		"rbd",
-		"--id", userName,
-		"--cluster", clusterName,
-		"--pool", poolName,
-		"snap",
-		"purge",
-		fmt.Sprintf("%s_%s", volumeType, volumeName))
-	if err != nil {
-		return err
-	}
-
-	return nil
-}
-
-// cephRBDSnapshotProtect protects a given snapshot from being deleted
-// This is a precondition to be able to create RBD clones from a given snapshot.
-func cephRBDSnapshotProtect(clusterName string, poolName string,
-	volumeName string, volumeType string, snapshotName string,
-	userName string) error {
-	_, err := shared.RunCommand(
-		"rbd",
-		"--id", userName,
-		"--cluster", clusterName,
-		"--pool", poolName,
-		"snap",
-		"protect",
-		"--snap", snapshotName,
-		fmt.Sprintf("%s_%s", volumeType, volumeName))
-	if err != nil {
-		runError, ok := err.(shared.RunError)
-		if ok {
-			exitError, ok := runError.Err.(*exec.ExitError)
-			if ok {
-				waitStatus := exitError.Sys().(syscall.WaitStatus)
-				if waitStatus.ExitStatus() == 16 {
-					// EBUSY (snapshot already protected)
-					return nil
-				}
-			}
-		}
-		return err
-	}
-
-	return nil
-}
-
-// cephRBDSnapshotUnprotect unprotects a given snapshot
-// - This is a precondition to be able to delete an RBD snapshot.
-// - This command will only succeed if the snapshot does not have any clones.
-func cephRBDSnapshotUnprotect(clusterName string, poolName string,
-	volumeName string, volumeType string, snapshotName string,
-	userName string) error {
-	_, err := shared.RunCommand(
-		"rbd",
-		"--id", userName,
-		"--cluster", clusterName,
-		"--pool", poolName,
-		"snap",
-		"unprotect",
-		"--snap", snapshotName,
-		fmt.Sprintf("%s_%s", volumeType, volumeName))
-	if err != nil {
-		runError, ok := err.(shared.RunError)
-		if ok {
-			exitError, ok := runError.Err.(*exec.ExitError)
-			if ok {
-				waitStatus := exitError.Sys().(syscall.WaitStatus)
-				if waitStatus.ExitStatus() == 22 {
-					// EBUSY (snapshot already unprotected)
-					return nil
-				}
-			}
-		}
-		return err
-	}
-
-	return nil
-}
-
-// cephRBDCloneCreate creates a clone from a protected RBD snapshot
-func cephRBDCloneCreate(sourceClusterName string, sourcePoolName string,
-	sourceVolumeName string, sourceVolumeType string,
-	sourceSnapshotName string, targetPoolName string,
-	targetVolumeName string, targetVolumeType string,
-	userName string, targetDataPoolName string) error {
-	cmd := []string{
-		"--id", userName,
-		"--cluster", sourceClusterName,
-		"--image-feature", "layering",
-	}
-
-	if targetDataPoolName != "" {
-		cmd = append(cmd, "--data-pool", targetDataPoolName)
-	}
-
-	cmd = append(cmd,
-		"clone",
-		fmt.Sprintf("%s/%s_%s@%s", sourcePoolName, sourceVolumeType,
-			sourceVolumeName, sourceSnapshotName),
-		fmt.Sprintf("%s/%s_%s", targetPoolName, targetVolumeType,
-			targetVolumeName))
-
-	_, err := shared.RunCommand("rbd", cmd...)
-	if err != nil {
-		return err
-	}
-
-	return nil
-}
-
-// cephRBDSnapshotListClones list all clones of an RBD snapshot
-func cephRBDSnapshotListClones(clusterName string, poolName string,
-	volumeName string, volumeType string,
-	snapshotName string, userName string) ([]string, error) {
-	msg, err := shared.RunCommand(
-		"rbd",
-		"--id", userName,
-		"--cluster", clusterName,
-		"--pool", poolName,
-		"children",
-		"--image", fmt.Sprintf("%s_%s", volumeType, volumeName),
-		"--snap", snapshotName)
-	if err != nil {
-		return nil, err
-	}
-
-	msg = strings.TrimSpace(msg)
-	clones := strings.Fields(msg)
-	if len(clones) == 0 {
-		return nil, db.ErrNoSuchObject
-	}
-
-	return clones, nil
-}
-
-// cephRBDVolumeMarkDeleted marks an RBD storage volume as being in "zombie"
-// state
-// An RBD storage volume that is in zombie state is not tracked in LXD's
-// database anymore but still needs to be kept around for the sake of any
-// dependent storage entities in the storage pool. This usually happens when an
-// RBD storage volume has protected snapshots; a scenario most common when
-// creating a sparse copy of a container or when LXD updated an image and the
-// image still has dependent container clones.
-func cephRBDVolumeMarkDeleted(clusterName string, poolName string,
-	volumeType string, oldVolumeName string, newVolumeName string,
-	userName string, suffix string) error {
-	deletedName := fmt.Sprintf("%s/zombie_%s_%s", poolName, volumeType,
-		newVolumeName)
-	if suffix != "" {
-		deletedName = fmt.Sprintf("%s_%s", deletedName, suffix)
-	}
-	_, err := shared.RunCommand(
-		"rbd",
-		"--id", userName,
-		"--cluster", clusterName,
-		"mv",
-		fmt.Sprintf("%s/%s_%s", poolName, volumeType, oldVolumeName),
-		deletedName)
-	if err != nil {
-		return err
-	}
-
-	return nil
-}
-
-// cephRBDVolumeUnmarkDeleted unmarks an RBD storage volume as being in "zombie"
-// state
-// - An RBD storage volume that is in zombie is not tracked in LXD's database
-//   anymore but still needs to be kept around for the sake of any dependent
-//   storage entities in the storage pool.
-// - This function is mostly used when a user has deleted the storage volume of
-//   an image from the storage pool and then triggers a container creation. If
-//   LXD detects that the storage volume for the given hash already exists in
-//   the pool but is marked as "zombie" it will unmark it as a zombie instead of
-//   creating another storage volume for the image.
-func cephRBDVolumeUnmarkDeleted(clusterName string, poolName string,
-	volumeName string, volumeType string, userName string, oldSuffix string,
-	newSuffix string) error {
-	oldName := fmt.Sprintf("%s/zombie_%s_%s", poolName, volumeType, volumeName)
-	if oldSuffix != "" {
-		oldName = fmt.Sprintf("%s_%s", oldName, oldSuffix)
-	}
-
-	newName := fmt.Sprintf("%s/%s_%s", poolName, volumeType, volumeName)
-	if newSuffix != "" {
-		newName = fmt.Sprintf("%s_%s", newName, newSuffix)
-	}
-
-	_, err := shared.RunCommand(
-		"rbd",
-		"--id", userName,
-		"--cluster", clusterName,
-		"mv",
-		oldName,
-		newName)
-	if err != nil {
-		return err
-	}
-
-	return nil
-}
-
-// cephRBDVolumeRename renames a given RBD storage volume
-// Note that this usually requires that the image be unmapped under its original
-// name, then renamed, and finally will be remapped again. If it is not unmapped
-// under its original name and the callers maps it under its new name the image
-// will be mapped twice. This will prevent it from being deleted.
-func cephRBDVolumeRename(clusterName string, poolName string, volumeType string,
-	oldVolumeName string, newVolumeName string, userName string) error {
-	_, err := shared.RunCommand(
-		"rbd",
-		"--id", userName,
-		"--cluster", clusterName,
-		"mv",
-		fmt.Sprintf("%s/%s_%s", poolName, volumeType, oldVolumeName),
-		fmt.Sprintf("%s/%s_%s", poolName, volumeType, newVolumeName))
-	if err != nil {
-		return err
-	}
-
-	return nil
-}
-
-// cephRBDVolumeRename renames a given RBD storage volume
-// Note that if the snapshot is mapped - which it usually shouldn't be - this
-// usually requires that the snapshot be unmapped under its original name, then
-// renamed, and finally will be remapped again. If it is not unmapped under its
-// original name and the caller maps it under its new name the snapshot will be
-// mapped twice. This will prevent it from being deleted.
-func cephRBDVolumeSnapshotRename(clusterName string, poolName string,
-	volumeName string, volumeType string, oldSnapshotName string,
-	newSnapshotName string, userName string) error {
-	_, err := shared.RunCommand(
-		"rbd",
-		"--id", userName,
-		"--cluster", clusterName,
-		"snap",
-		"rename",
-		fmt.Sprintf("%s/%s_%s@%s", poolName, volumeType, volumeName,
-			oldSnapshotName),
-		fmt.Sprintf("%s/%s_%s@%s", poolName, volumeType, volumeName,
-			newSnapshotName))
-	if err != nil {
-		return err
-	}
-
-	return nil
-}
-
-// cephRBDVolumeGetParent will return the snapshot the RBD clone was created
-// from
-// - If the RBD storage volume is not a clone then this function will return
-//   db.NoSuchObjectError.
-// - The snapshot will be returned as
-//   <osd-pool-name>/<rbd-volume-name>@<rbd-snapshot-name>
-//   The caller will usually want to parse this according to its needs. This
-//   helper library provides two small functions to do this but see below.
-func cephRBDVolumeGetParent(clusterName string, poolName string,
-	volumeName string, volumeType string, userName string) (string, error) {
-	msg, err := shared.RunCommand(
-		"rbd",
-		"--id", userName,
-		"--cluster", clusterName,
-		"--pool", poolName,
-		"info",
-		fmt.Sprintf("%s_%s", volumeType, volumeName))
-	if err != nil {
-		return "", err
-	}
-
-	idx := strings.Index(msg, "parent: ")
-	if idx == -1 {
-		return "", db.ErrNoSuchObject
-	}
-
-	msg = msg[(idx + len("parent: ")):]
-	msg = strings.TrimSpace(msg)
-
-	idx = strings.Index(msg, "\n")
-	if idx == -1 {
-		return "", fmt.Errorf("Unexpected parsing error")
-	}
-
-	msg = msg[:idx]
-	msg = strings.TrimSpace(msg)
-
-	return msg, nil
-}
-
-// cephRBDSnapshotDelete deletes an RBD snapshot
-// This requires that the snapshot does not have any clones and is unmapped and
-// unprotected.
-func cephRBDSnapshotDelete(clusterName string, poolName string,
-	volumeName string, volumeType string, snapshotName string,
-	userName string) error {
-	_, err := shared.RunCommand(
-		"rbd",
-		"--id", userName,
-		"--cluster", clusterName,
-		"--pool", poolName,
-		"snap",
-		"rm",
-		fmt.Sprintf("%s_%s@%s", volumeType, volumeName, snapshotName))
-	if err != nil {
-		return err
-	}
-
-	return nil
-}
-
-// cephRBDVolumeCopy copies an RBD storage volume
-// This is a non-sparse copy which doesn't introduce any dependency relationship
-// between the source RBD storage volume and the target RBD storage volume. The
-// operations is similar to creating an empty RBD storage volume and rsyncing
-// the contents of the source RBD storage volume into it.
-func cephRBDVolumeCopy(clusterName string, oldVolumeName string,
-	newVolumeName string, userName string) error {
-	_, err := shared.RunCommand(
-		"rbd",
-		"--id", userName,
-		"--cluster", clusterName,
-		"cp",
-		oldVolumeName,
-		newVolumeName)
-	if err != nil {
-		return err
-	}
-
-	return nil
-}
-
-// cephRBDVolumeListSnapshots retrieves the snapshots of an RBD storage volume
-// The format of the snapshot names is simply the part after the @. So given a
-// valid RBD path relative to a pool
-// <osd-pool-name>/<rbd-storage-volume>@<rbd-snapshot-name>
-// this will only return
-// <rbd-snapshot-name>
-func cephRBDVolumeListSnapshots(clusterName string, poolName string,
-	volumeName string, volumeType string,
-	userName string) ([]string, error) {
-	msg, err := shared.RunCommand(
-		"rbd",
-		"--id", userName,
-		"--format", "json",
-		"--cluster", clusterName,
-		"--pool", poolName,
-		"snap",
-		"ls", fmt.Sprintf("%s_%s", volumeType, volumeName))
-	if err != nil {
-		return []string{}, err
-	}
-
-	var data []map[string]interface{}
-	err = json.Unmarshal([]byte(msg), &data)
-	if err != nil {
-		return []string{}, err
-	}
-
-	snapshots := []string{}
-	for _, v := range data {
-		_, ok := v["name"]
-		if !ok {
-			return []string{}, fmt.Errorf("No \"name\" property found")
-		}
-
-		name, ok := v["name"].(string)
-		if !ok {
-			return []string{}, fmt.Errorf("\"name\" property did not have string type")
-		}
-
-		name = strings.TrimSpace(name)
-		snapshots = append(snapshots, name)
-	}
-
-	if len(snapshots) == 0 {
-		return []string{}, db.ErrNoSuchObject
-	}
-
-	return snapshots, nil
-}
-
-// cephRBDVolumeRestore restores an RBD storage volume to the state of one of
-// its snapshots
-func cephRBDVolumeRestore(clusterName string, poolName string, volumeName string,
-	volumeType string, snapshotName string, userName string) error {
-	_, err := shared.RunCommand(
-		"rbd",
-		"--id", userName,
-		"--cluster", clusterName,
-		"--pool", poolName,
-		"snap",
-		"rollback",
-		"--snap", snapshotName,
-		fmt.Sprintf("%s_%s", volumeType, volumeName))
-	if err != nil {
-		return err
-	}
-
-	return nil
-}
-
-// getRBDSize returns the size the RBD storage volume is supposed to be created
-// with
-func (s *storageCeph) getRBDSize() (string, error) {
-	size, ok := s.volume.Config["size"]
-	if !ok {
-		size = s.pool.Config["volume.size"]
-	}
-
-	sz, err := units.ParseByteSizeString(size)
-	if err != nil {
-		return "", err
-	}
-
-	// Safety net: Set to default value.
-	if sz == 0 {
-		sz, _ = units.ParseByteSizeString("10GB")
-	}
-
-	return fmt.Sprintf("%dB", sz), nil
-}
-
-// getRBDFilesystem returns the filesystem the RBD storage volume is supposed to
-// be created with
-func (s *storageCeph) getRBDFilesystem() string {
-	if s.volume.Config["block.filesystem"] != "" {
-		return s.volume.Config["block.filesystem"]
-	}
-
-	if s.pool.Config["volume.block.filesystem"] != "" {
-		return s.pool.Config["volume.block.filesystem"]
-	}
-
-	return "ext4"
-}
-
-// getRBDMountOptions returns the mount options the storage volume is supposed
-// to be mounted with
-// The option string that is returned needs to be passed to the approriate
-// helper (currently named "LXDResolveMountoptions") which will take on the job
-// of splitting it into appropriate flags and string options.
-func (s *storageCeph) getRBDMountOptions() string {
-	if s.volume.Config["block.mount_options"] != "" {
-		return s.volume.Config["block.mount_options"]
-	}
-
-	if s.pool.Config["volume.block.mount_options"] != "" {
-		return s.pool.Config["volume.block.mount_options"]
-	}
-
-	if s.getRBDFilesystem() == "btrfs" {
-		return "user_subvol_rm_allowed,discard"
-	}
-
-	return "discard"
-}
-
-// copyWithoutSnapshotsFull creates a non-sparse copy of a container
-// This does not introduce a dependency relation between the source RBD storage
-// volume and the target RBD storage volume.
-func (s *storageCeph) copyWithoutSnapshotsFull(target instance.Instance, source instance.Instance) error {
-	logger.Debugf(`Creating non-sparse copy of RBD storage volume for container "%s" to "%s" without snapshots`, source.Name(), target.Name())
-
-	sourceIsSnapshot := source.IsSnapshot()
-	sourceContainerName := project.Prefix(source.Project(), source.Name())
-	targetContainerName := project.Prefix(target.Project(), target.Name())
-	oldVolumeName := fmt.Sprintf("%s/container_%s", s.OSDPoolName,
-		sourceContainerName)
-	newVolumeName := fmt.Sprintf("%s/container_%s", s.OSDPoolName,
-		targetContainerName)
-	if sourceIsSnapshot {
-		sourceContainerOnlyName, sourceSnapshotOnlyName, _ :=
-			shared.InstanceGetParentAndSnapshotName(sourceContainerName)
-		oldVolumeName = fmt.Sprintf("%s/container_%s at snapshot_%s",
-			s.OSDPoolName, sourceContainerOnlyName,
-			sourceSnapshotOnlyName)
-	}
-
-	err := cephRBDVolumeCopy(s.ClusterName, oldVolumeName, newVolumeName,
-		s.UserName)
-	if err != nil {
-		logger.Debugf(`Failed to create full RBD copy "%s" to "%s": %s`, source.Name(), target.Name(), err)
-		return err
-	}
-
-	_, err = cephRBDVolumeMap(s.ClusterName, s.OSDPoolName, targetContainerName,
-		storagePoolVolumeTypeNameContainer, s.UserName)
-	if err != nil {
-		logger.Errorf(`Failed to map RBD storage volume for image "%s" on storage pool "%s": %s`, targetContainerName, s.pool.Name, err)
-		return err
-	}
-
-	// Re-generate the UUID
-	err = s.cephRBDGenerateUUID(project.Prefix(target.Project(), target.Name()), storagePoolVolumeTypeNameContainer)
-	if err != nil {
-		return err
-	}
-
-	// Create mountpoint
-	targetContainerMountPoint := driver.GetContainerMountPoint(target.Project(), s.pool.Name, target.Name())
-	err = driver.CreateContainerMountpoint(targetContainerMountPoint, target.Path(), target.IsPrivileged())
-	if err != nil {
-		return err
-	}
-
-	ourMount, err := target.StorageStart()
-	if err != nil {
-		return err
-	}
-	if ourMount {
-		defer target.StorageStop()
-	}
-
-	err = target.DeferTemplateApply("copy")
-	if err != nil {
-		logger.Errorf(`Failed to apply copy template for container "%s": %s`, target.Name(), err)
-		return err
-	}
-	logger.Debugf(`Applied copy template for container "%s"`, target.Name())
-
-	logger.Debugf(`Created non-sparse copy of RBD storage volume for container "%s" to "%s" without snapshots`, source.Name(),
-		target.Name())
-	return nil
-}
-
-// copyWithoutSnapshotsFull creates a sparse copy of a container
-// This introduces a dependency relation between the source RBD storage volume
-// and the target RBD storage volume.
-func (s *storageCeph) copyWithoutSnapshotsSparse(target instance.Instance, source instance.Instance) error {
-	logger.Debugf(`Creating sparse copy of RBD storage volume for container "%s" to "%s" without snapshots`, source.Name(),
-		target.Name())
-
-	sourceIsSnapshot := source.IsSnapshot()
-	sourceContainerName := project.Prefix(source.Project(), source.Name())
-	targetContainerName := project.Prefix(target.Project(), target.Name())
-	sourceContainerOnlyName := sourceContainerName
-	sourceSnapshotOnlyName := ""
-	snapshotName := fmt.Sprintf("zombie_snapshot_%s",
-		uuid.NewRandom().String())
-	if sourceIsSnapshot {
-		sourceContainerOnlyName, sourceSnapshotOnlyName, _ =
-			shared.InstanceGetParentAndSnapshotName(sourceContainerName)
-		snapshotName = fmt.Sprintf("snapshot_%s", sourceSnapshotOnlyName)
-	} else {
-		// create snapshot
-		err := cephRBDSnapshotCreate(s.ClusterName, s.OSDPoolName,
-			sourceContainerName, storagePoolVolumeTypeNameContainer,
-			snapshotName, s.UserName)
-		if err != nil {
-			logger.Errorf(`Failed to create snapshot for RBD storage volume for image "%s" on storage pool "%s": %s`, targetContainerName, s.pool.Name, err)
-			return err
-		}
-	}
-
-	// protect volume so we can create clones of it
-	err := cephRBDSnapshotProtect(s.ClusterName, s.OSDPoolName,
-		sourceContainerOnlyName, storagePoolVolumeTypeNameContainer,
-		snapshotName, s.UserName)
-	if err != nil {
-		logger.Errorf(`Failed to protect snapshot for RBD storage volume for image "%s" on storage pool "%s": %s`, snapshotName, s.pool.Name, err)
-		return err
-	}
-
-	err = cephRBDCloneCreate(s.ClusterName, s.OSDPoolName,
-		sourceContainerOnlyName, storagePoolVolumeTypeNameContainer,
-		snapshotName, s.OSDPoolName, targetContainerName,
-		storagePoolVolumeTypeNameContainer, s.UserName, s.OSDDataPoolName)
-	if err != nil {
-		logger.Errorf(`Failed to clone new RBD storage volume for container "%s": %s`, targetContainerName, err)
-		return err
-	}
-
-	// Re-generate the UUID
-	err = s.cephRBDGenerateUUID(project.Prefix(target.Project(), target.Name()), storagePoolVolumeTypeNameContainer)
-	if err != nil {
-		return err
-	}
-
-	// Create mountpoint
-	targetContainerMountPoint := driver.GetContainerMountPoint(target.Project(), s.pool.Name, target.Name())
-	err = driver.CreateContainerMountpoint(targetContainerMountPoint, target.Path(), target.IsPrivileged())
-	if err != nil {
-		return err
-	}
-
-	ourMount, err := target.StorageStart()
-	if err != nil {
-		return err
-	}
-	if ourMount {
-		defer target.StorageStop()
-	}
-
-	err = target.DeferTemplateApply("copy")
-	if err != nil {
-		logger.Errorf(`Failed to apply copy template for container "%s": %s`, target.Name(), err)
-		return err
-	}
-	logger.Debugf(`Applied copy template for container "%s"`, target.Name())
-
-	logger.Debugf(`Created sparse copy of RBD storage volume for container "%s" to "%s" without snapshots`, source.Name(),
-		target.Name())
-	return nil
-}
-
-// copyWithSnapshots creates a non-sparse copy of a container including its
-// snapshots
-// This does not introduce a dependency relation between the source RBD storage
-// volume and the target RBD storage volume.
-func (s *storageCeph) copyWithSnapshots(sourceVolumeName string,
-	targetVolumeName string, sourceParentSnapshot string) error {
-	logger.Debugf(`Creating non-sparse copy of RBD storage volume "%s to "%s"`, sourceVolumeName, targetVolumeName)
-
-	args := []string{
-		"export-diff",
-		"--id", s.UserName,
-		"--cluster", s.ClusterName,
-		sourceVolumeName,
-	}
-
-	if sourceParentSnapshot != "" {
-		args = append(args, "--from-snap", sourceParentSnapshot)
-	}
-
-	// redirect output to stdout
-	args = append(args, "-")
-
-	rbdSendCmd := exec.Command("rbd", args...)
-	rbdRecvCmd := exec.Command(
-		"rbd",
-		"--id", s.UserName,
-		"import-diff",
-		"--cluster", s.ClusterName,
-		"-",
-		targetVolumeName)
-
-	rbdRecvCmd.Stdin, _ = rbdSendCmd.StdoutPipe()
-	rbdRecvCmd.Stdout = os.Stdout
-	rbdRecvCmd.Stderr = os.Stderr
-
-	err := rbdRecvCmd.Start()
-	if err != nil {
-		return err
-	}
-
-	err = rbdSendCmd.Run()
-	if err != nil {
-		return err
-	}
-
-	err = rbdRecvCmd.Wait()
-	if err != nil {
-		return err
-	}
-
-	logger.Debugf(`Created non-sparse copy of RBD storage volume "%s" to "%s"`, sourceVolumeName, targetVolumeName)
-	return nil
-}
-
-// cephContainerDelete deletes the RBD storage volume of a container including
-// any dependencies
-// - This function takes care to delete any RBD storage entities that are marked
-//   as zombie and whose existence is solely dependent on the RBD storage volume
-//   for the container to be deleted.
-// - This function will mark any storage entities of the container to be deleted
-//   as zombies in case any RBD storage entities in the storage pool have a
-//   dependency relation with it.
-// - This function uses a C-style convention to return error or success simply
-//   because it is more elegant and simple than the go way.
-//   The function will return
-//   -1 on error
-//    0 if the RBD storage volume has been deleted
-//    1 if the RBD storage volume has been marked as a zombie
-// - cephContainerDelete in conjunction with cephContainerSnapshotDelete
-//   recurses through an OSD storage pool to find and delete any storage
-//   entities that were kept around because of dependency relations but are not
-//   deletable.
-func cephContainerDelete(clusterName string, poolName string, volumeName string,
-	volumeType string, userName string) int {
-	logEntry := fmt.Sprintf("%s/%s_%s", poolName, volumeType, volumeName)
-
-	snaps, err := cephRBDVolumeListSnapshots(clusterName, poolName,
-		volumeName, volumeType, userName)
-	if err == nil {
-		var zombies int
-		for _, snap := range snaps {
-			logEntry := fmt.Sprintf("%s/%s_%s@%s", poolName,
-				volumeType, volumeName, snap)
-
-			ret := cephContainerSnapshotDelete(clusterName,
-				poolName, volumeName, volumeType, snap, userName)
-			if ret < 0 {
-				logger.Errorf(`Failed to delete RBD storage volume "%s"`, logEntry)
-				return -1
-			} else if ret == 1 {
-				logger.Debugf(`Marked RBD storage volume "%s" as zombie`, logEntry)
-				zombies++
-			} else {
-				logger.Debugf(`Deleted RBD storage volume "%s"`, logEntry)
-			}
-		}
-
-		if zombies > 0 {
-			// unmap
-			err = cephRBDVolumeUnmap(clusterName, poolName,
-				volumeName, volumeType, userName, true)
-			if err != nil {
-				logger.Errorf(`Failed to unmap RBD storage volume "%s": %s`, logEntry, err)
-				return -1
-			}
-			logger.Debugf(`Unmapped RBD storage volume "%s"`, logEntry)
-
-			if strings.HasPrefix(volumeType, "zombie_") {
-				logger.Debugf(`RBD storage volume "%s" already marked as zombie`, logEntry)
-				return 1
-			}
-
-			newVolumeName := fmt.Sprintf("%s_%s", volumeName,
-				uuid.NewRandom().String())
-			err := cephRBDVolumeMarkDeleted(clusterName, poolName,
-				volumeType, volumeName, newVolumeName, userName,
-				"")
-			if err != nil {
-				logger.Errorf(`Failed to mark RBD storage volume "%s" as zombie: %s`, logEntry, err)
-				return -1
-			}
-			logger.Debugf(`Marked RBD storage volume "%s" as zombie`, logEntry)
-
-			return 1
-		}
-	} else {
-		if err != db.ErrNoSuchObject {
-			logger.Errorf(`Failed to retrieve snapshots of RBD storage volume: %s`, err)
-			return -1
-		}
-
-		parent, err := cephRBDVolumeGetParent(clusterName, poolName,
-			volumeName, volumeType, userName)
-		if err == nil {
-			logger.Debugf(`Detected "%s" as parent of RBD storage volume "%s"`, parent, logEntry)
-			_, parentVolumeType, parentVolumeName,
-				parentSnapshotName, err := parseParent(parent)
-			if err != nil {
-				logger.Errorf(`Failed to parse parent "%s" of RBD storage volume "%s"`, parent, logEntry)
-				return -1
-			}
-			logger.Debugf(`Split parent "%s" of RBD storage volume "%s" into volume type "%s", volume name "%s", and snapshot name "%s"`, parent, logEntry, parentVolumeType,
-				parentVolumeName, parentSnapshotName)
-
-			// unmap
-			err = cephRBDVolumeUnmap(clusterName, poolName,
-				volumeName, volumeType, userName, true)
-			if err != nil {
-				logger.Errorf(`Failed to unmap RBD storage volume "%s": %s`, logEntry, err)
-				return -1
-			}
-			logger.Debugf(`Unmapped RBD storage volume "%s"`, logEntry)
-
-			// delete
-			err = cephRBDVolumeDelete(clusterName, poolName,
-				volumeName, volumeType, userName)
-			if err != nil {
-				logger.Errorf(`Failed to delete RBD storage volume "%s": %s`, logEntry, err)
-				return -1
-			}
-			logger.Debugf(`Deleted RBD storage volume "%s"`, logEntry)
-
-			// Only delete the parent snapshot of the container if
-			// it is a zombie. If it is not we know that LXD is
-			// still using it.
-			if strings.HasPrefix(parentVolumeType, "zombie_") ||
-				strings.HasPrefix(parentSnapshotName, "zombie_") {
-				ret := cephContainerSnapshotDelete(clusterName,
-					poolName, parentVolumeName,
-					parentVolumeType, parentSnapshotName,
-					userName)
-				if ret < 0 {
-					logger.Errorf(`Failed to delete snapshot "%s" of RBD storage volume "%s"`, parentSnapshotName, logEntry)
-					return -1
-				}
-				logger.Debugf(`Deleteed snapshot "%s" of RBD storage volume "%s"`, parentSnapshotName, logEntry)
-			}
-
-			return 0
-		} else {
-			if err != db.ErrNoSuchObject {
-				logger.Errorf(`Failed to retrieve parent of RBD storage volume "%s"`, logEntry)
-				return -1
-			}
-			logger.Debugf(`RBD storage volume "%s" does not have parent`, logEntry)
-
-			// unmap
-			err = cephRBDVolumeUnmap(clusterName, poolName,
-				volumeName, volumeType, userName, true)
-			if err != nil {
-				logger.Errorf(`Failed to unmap RBD storage volume "%s": %s`, logEntry, err)
-				return -1
-			}
-			logger.Debugf(`Unmapped RBD storage volume "%s"`, logEntry)
-
-			// delete
-			err = cephRBDVolumeDelete(clusterName, poolName,
-				volumeName, volumeType, userName)
-			if err != nil {
-				logger.Errorf(`Failed to delete RBD storage volume "%s": %s`, logEntry, err)
-				return -1
-			}
-			logger.Debugf(`Deleted RBD storage volume "%s"`, logEntry)
-
-		}
-	}
-
-	return 0
-}
-
-// cephContainerSnapshotDelete deletes an RBD snapshot of a container including
-// any dependencies
-// - This function takes care to delete any RBD storage entities that are marked
-//   as zombie and whose existence is solely dependent on the RBD snapshot for
-//   the container to be deleted.
-// - This function will mark any storage entities of the container to be deleted
-//   as zombies in case any RBD storage entities in the storage pool have a
-//   dependency relation with it.
-// - This function uses a C-style convention to return error or success simply
-//   because it is more elegant and simple than the go way.
-//   The function will return
-//   -1 on error
-//    0 if the RBD storage volume has been deleted
-//    1 if the RBD storage volume has been marked as a zombie
-// - cephContainerSnapshotDelete in conjunction with cephContainerDelete
-//   recurses through an OSD storage pool to find and delete any storage
-//   entities that were kept around because of dependency relations but are not
-//   deletable.
-func cephContainerSnapshotDelete(clusterName string, poolName string,
-	volumeName string, volumeType string, snapshotName string,
-	userName string) int {
-	logImageEntry := fmt.Sprintf("%s/%s_%s", poolName, volumeType, volumeName)
-	logSnapshotEntry := fmt.Sprintf("%s/%s_%s@%s", poolName, volumeType,
-		volumeName, snapshotName)
-
-	clones, err := cephRBDSnapshotListClones(clusterName, poolName,
-		volumeName, volumeType, snapshotName, userName)
-	if err != nil {
-		if err != db.ErrNoSuchObject {
-			logger.Errorf(`Failed to list clones of RBD snapshot "%s" of RBD storage volume "%s": %s`, logSnapshotEntry, logImageEntry, err)
-			return -1
-		}
-		logger.Debugf(`RBD snapshot "%s" of RBD storage volume "%s" does not have any clones`, logSnapshotEntry, logImageEntry)
-
-		// unprotect
-		err = cephRBDSnapshotUnprotect(clusterName, poolName, volumeName,
-			volumeType, snapshotName, userName)
-		if err != nil {
-			logger.Errorf(`Failed to unprotect RBD snapshot "%s" of RBD storage volume "%s": %s`, logSnapshotEntry, logImageEntry, err)
-			return -1
-		}
-		logger.Debugf(`Unprotected RBD snapshot "%s" of RBD storage volume "%s"`, logSnapshotEntry, logImageEntry)
-
-		// unmap
-		err = cephRBDVolumeSnapshotUnmap(clusterName, poolName,
-			volumeName, volumeType, snapshotName, userName, true)
-		if err != nil {
-			logger.Errorf(`Failed to unmap RBD snapshot "%s" of RBD storage volume "%s": %s`, logSnapshotEntry, logImageEntry, err)
-			return -1
-		}
-		logger.Debugf(`Unmapped RBD snapshot "%s" of RBD storage volume "%s"`, logSnapshotEntry, logImageEntry)
-
-		// delete
-		err = cephRBDSnapshotDelete(clusterName, poolName, volumeName,
-			volumeType, snapshotName, userName)
-		if err != nil {
-			logger.Errorf(`Failed to delete RBD snapshot "%s" of RBD storage volume "%s": %s`, logSnapshotEntry, logImageEntry, err)
-			return -1
-		}
-		logger.Debugf(`Deleted RBD snapshot "%s" of RBD storage volume "%s"`, logSnapshotEntry, logImageEntry)
-
-		// Only delete the parent image if it is a zombie. If it is not
-		// we know that LXD is still using it.
-		if strings.HasPrefix(volumeType, "zombie_") {
-			ret := cephContainerDelete(clusterName, poolName,
-				volumeName, volumeType, userName)
-			if ret < 0 {
-				logger.Errorf(`Failed to delete RBD storage volume "%s"`,
-					logImageEntry)
-				return -1
-			}
-			logger.Debugf(`Deleted RBD storage volume "%s"`, logImageEntry)
-		}
-
-		return 0
-	} else {
-		logger.Debugf(`Detected "%v" as clones of RBD snapshot "%s" of RBD storage volume "%s"`, clones, logSnapshotEntry, logImageEntry)
-
-		canDelete := true
-		for _, clone := range clones {
-			clonePool, cloneType, cloneName, err := parseClone(clone)
-			if err != nil {
-				logger.Errorf(`Failed to parse clone "%s" of RBD snapshot "%s" of RBD storage volume "%s"`, clone, logSnapshotEntry, logImageEntry)
-				return -1
-			}
-			logger.Debugf(`Split clone "%s" of RBD snapshot "%s" of RBD storage volume "%s" into pool name "%s", volume type "%s", and volume name "%s"`, clone, logSnapshotEntry, logImageEntry, clonePool, cloneType, cloneName)
-
-			if !strings.HasPrefix(cloneType, "zombie_") {
-				canDelete = false
-				continue
-			}
-
-			ret := cephContainerDelete(clusterName, clonePool,
-				cloneName, cloneType, userName)
-			if ret < 0 {
-				logger.Errorf(`Failed to delete clone "%s" of RBD snapshot "%s" of RBD storage volume "%s"`, clone, logSnapshotEntry, logImageEntry)
-				return -1
-			} else if ret == 1 {
-				// Only marked as zombie
-				canDelete = false
-			}
-		}
-
-		if canDelete {
-			logger.Debugf(`Deleted all clones of RBD snapshot "%s" of RBD storage volume "%s"`, logSnapshotEntry, logImageEntry)
-
-			// unprotect
-			err = cephRBDSnapshotUnprotect(clusterName, poolName,
-				volumeName, volumeType, snapshotName, userName)
-			if err != nil {
-				logger.Errorf(`Failed to unprotect RBD snapshot "%s" of RBD storage volume "%s": %s`, logSnapshotEntry, logImageEntry, err)
-				return -1
-			}
-			logger.Debugf(`Unprotected RBD snapshot "%s" of RBD storage volume "%s"`, logSnapshotEntry, logImageEntry)
-
-			// unmap
-			err = cephRBDVolumeSnapshotUnmap(clusterName, poolName,
-				volumeName, volumeType, snapshotName, userName,
-				true)
-			if err != nil {
-				logger.Errorf(`Failed to unmap RBD snapshot "%s" of RBD storage volume "%s": %s`, logSnapshotEntry, logImageEntry, err)
-				return -1
-			}
-			logger.Debugf(`Unmapped RBD snapshot "%s" of RBD storage volume "%s"`, logSnapshotEntry, logImageEntry)
-
-			// delete
-			err = cephRBDSnapshotDelete(clusterName, poolName,
-				volumeName, volumeType, snapshotName, userName)
-			if err != nil {
-				logger.Errorf(`Failed to delete RBD snapshot "%s" of RBD storage volume "%s": %s`, logSnapshotEntry, logImageEntry, err)
-				return -1
-			}
-			logger.Debugf(`Deleted RBD snapshot "%s" of RBD storage volume "%s"`, logSnapshotEntry, logImageEntry)
-
-			// Only delete the parent image if it is a zombie. If it
-			// is not we know that LXD is still using it.
-			if strings.HasPrefix(volumeType, "zombie_") {
-				ret := cephContainerDelete(clusterName,
-					poolName, volumeName, volumeType,
-					userName)
-				if ret < 0 {
-					logger.Errorf(`Failed to delete RBD storage volume "%s"`, logImageEntry)
-					return -1
-				}
-				logger.Debugf(`Deleted RBD storage volume "%s"`,
-					logImageEntry)
-			}
-		} else {
-			logger.Debugf(`Could not delete all clones of RBD snapshot "%s" of RBD storage volume "%s"`, logSnapshotEntry, logImageEntry)
-
-			if strings.HasPrefix(snapshotName, "zombie_") {
-				return 1
-			}
-
-			err := cephRBDVolumeSnapshotUnmap(clusterName, poolName,
-				volumeName, volumeType, snapshotName, userName,
-				true)
-			if err != nil {
-				logger.Errorf(`Failed to unmap RBD snapshot "%s" of RBD storage volume "%s": %s`, logSnapshotEntry, logImageEntry, err)
-				return -1
-			}
-			logger.Debug(`Unmapped RBD snapshot "%s" of RBD storage volume "%s"`, logSnapshotEntry, logImageEntry)
-
-			newSnapshotName := fmt.Sprintf("zombie_%s", snapshotName)
-			logSnapshotNewEntry := fmt.Sprintf("%s/%s_%s@%s",
-				poolName, volumeName, volumeType, newSnapshotName)
-			err = cephRBDVolumeSnapshotRename(clusterName, poolName,
-				volumeName, volumeType, snapshotName,
-				newSnapshotName, userName)
-			if err != nil {
-				logger.Errorf(`Failed to rename RBD snapshot "%s" of RBD storage volume "%s" to %s`, logSnapshotEntry, logImageEntry, logSnapshotNewEntry)
-				return -1
-			}
-			logger.Debugf(`Renamed RBD snapshot "%s" of RBD storage volume "%s" to %s`, logSnapshotEntry, logImageEntry, logSnapshotNewEntry)
-		}
-
-	}
-
-	return 1
-}
-
-// parseParent splits a string describing a RBD storage entity into its
-// components
-// This can be used on strings like
-// <osd-pool-name>/<lxd-specific-prefix>_<rbd-storage-volume>@<rbd-snapshot-name>
-// and will split it into
-// <osd-pool-name>, <rbd-storage-volume>, <lxd-specific-prefix>, <rbdd-snapshot-name>
-func parseParent(parent string) (string, string, string, string, error) {
-	idx := strings.Index(parent, "/")
-	if idx == -1 {
-		return "", "", "", "", fmt.Errorf("Unexpected parsing error")
-	}
-	slider := parent[(idx + 1):]
-	poolName := parent[:idx]
-
-	volumeType := slider
-	idx = strings.Index(slider, "zombie_")
-	if idx == 0 {
-		idx += len("zombie_")
-		volumeType = slider
-		slider = slider[idx:]
-	}
-
-	idxType := strings.Index(slider, "_")
-	if idxType == -1 {
-		return "", "", "", "", fmt.Errorf("Unexpected parsing error")
-	}
-
-	if idx == len("zombie_") {
-		idxType += idx
-	}
-	volumeType = volumeType[:idxType]
-
-	idx = strings.Index(slider, "_")
-	if idx == -1 {
-		return "", "", "", "", fmt.Errorf("Unexpected parsing error")
-	}
-
-	volumeName := slider
-	idx = strings.Index(volumeName, "_")
-	if idx == -1 {
-		return "", "", "", "", fmt.Errorf("Unexpected parsing error")
-	}
-	volumeName = volumeName[(idx + 1):]
-
-	idx = strings.Index(volumeName, "@")
-	if idx == -1 {
-		return "", "", "", "", fmt.Errorf("Unexpected parsing error")
-	}
-	snapshotName := volumeName[(idx + 1):]
-	volumeName = volumeName[:idx]
-
-	return poolName, volumeType, volumeName, snapshotName, nil
-}
-
-// parseClone splits a strings describing an RBD storage volume
-// For example a string like
-// <osd-pool-name>/<lxd-specific-prefix>_<rbd-storage-volume>
-// will be split into
-// <osd-pool-name>, <lxd-specific-prefix>, <rbd-storage-volume>
-func parseClone(clone string) (string, string, string, error) {
-	idx := strings.Index(clone, "/")
-	if idx == -1 {
-		return "", "", "", fmt.Errorf("Unexpected parsing error")
-	}
-	slider := clone[(idx + 1):]
-	poolName := clone[:idx]
-
-	volumeType := slider
-	idx = strings.Index(slider, "zombie_")
-	if idx == 0 {
-		idx += len("zombie_")
-		volumeType = slider
-		slider = slider[idx:]
-	}
-
-	idxType := strings.Index(slider, "_")
-	if idxType == -1 {
-		return "", "", "", fmt.Errorf("Unexpected parsing error")
-	}
-
-	if idx == len("zombie_") {
-		idxType += idx
-	}
-	volumeType = volumeType[:idxType]
-
-	idx = strings.Index(slider, "_")
-	if idx == -1 {
-		return "", "", "", fmt.Errorf("Unexpected parsing error")
-	}
-
-	volumeName := slider
-	idx = strings.Index(volumeName, "_")
-	if idx == -1 {
-		return "", "", "", fmt.Errorf("Unexpected parsing error")
-	}
-	volumeName = volumeName[(idx + 1):]
-
-	return poolName, volumeType, volumeName, nil
-}
-
-// getRBDMappedDevPath looks at sysfs to retrieve the device path
-// "/dev/rbd<idx>" for an RBD image. If it doesn't find it it will map it if
-// told to do so.
-func getRBDMappedDevPath(clusterName string, poolName string, volumeType string,
-	volumeName string, doMap bool, userName string) (string, int) {
-	files, err := ioutil.ReadDir("/sys/devices/rbd")
-	if err != nil {
-		if os.IsNotExist(err) {
-			if doMap {
-				goto mapImage
-			}
-
-			return "", 0
-		}
-
-		return "", -1
-	}
-
-	for _, f := range files {
-		if !f.IsDir() {
-			continue
-		}
-
-		fName := f.Name()
-		idx, err := strconv.ParseUint(fName, 10, 64)
-		if err != nil {
-			continue
-		}
-
-		tmp := fmt.Sprintf("/sys/devices/rbd/%s/pool", fName)
-		contents, err := ioutil.ReadFile(tmp)
-		if err != nil {
-			if os.IsNotExist(err) {
-				continue
-			}
-
-			return "", -1
-		}
-
-		detectedPoolName := strings.TrimSpace(string(contents))
-		if detectedPoolName != poolName {
-			continue
-		}
-
-		tmp = fmt.Sprintf("/sys/devices/rbd/%s/name", fName)
-		contents, err = ioutil.ReadFile(tmp)
-		if err != nil {
-			if os.IsNotExist(err) {
-				continue
-			}
-
-			return "", -1
-		}
-
-		typedVolumeName := fmt.Sprintf("%s_%s", volumeType, volumeName)
-		detectedVolumeName := strings.TrimSpace(string(contents))
-		if detectedVolumeName != typedVolumeName {
-			continue
-		}
-
-		tmp = fmt.Sprintf("/sys/devices/rbd/%s/snap", fName)
-		contents, err = ioutil.ReadFile(tmp)
-		if err != nil {
-			if os.IsNotExist(err) {
-				return fmt.Sprintf("/dev/rbd%d", idx), 1
-			}
-
-			return "", -1
-		}
-
-		detectedSnapName := strings.TrimSpace(string(contents))
-		if detectedSnapName != "-" {
-			continue
-		}
-
-		return fmt.Sprintf("/dev/rbd%d", idx), 1
-	}
-
-	if !doMap {
-		return "", 0
-	}
-
-mapImage:
-	devPath, err := cephRBDVolumeMap(clusterName, poolName,
-		volumeName, volumeType, userName)
-	if err != nil {
-		return "", -1
-	}
-
-	return strings.TrimSpace(devPath), 2
-}
-
-func (s *storageCeph) rbdShrink(path string, size int64, fsType string,
-	fsMntPoint string, volumeType int, volumeName string,
-	data interface{}) error {
-	var err error
-	var msg string
-
-	cleanupFunc, err := shrinkVolumeFilesystem(s, volumeType, fsType, path, fsMntPoint, size, data)
-	if cleanupFunc != nil {
-		defer cleanupFunc()
-	}
-	if err != nil {
-		return err
-	}
-
-	volumeTypeName := ""
-	switch volumeType {
-	case storagePoolVolumeTypeContainer:
-		volumeTypeName = storagePoolVolumeTypeNameContainer
-	case storagePoolVolumeTypeCustom:
-		volumeTypeName = storagePoolVolumeTypeNameCustom
-	default:
-		return fmt.Errorf(`Resizing not implemented for `+
-			`storage volume type %d`, volumeType)
-	}
-	msg, err = shared.TryRunCommand(
-		"rbd",
-		"resize",
-		"--allow-shrink",
-		"--id", s.UserName,
-		"--cluster", s.ClusterName,
-		"--pool", s.OSDPoolName,
-		"--size", fmt.Sprintf("%dM", (size/1024/1024)),
-		fmt.Sprintf("%s_%s", volumeTypeName, volumeName))
-	if err != nil {
-		logger.Errorf(`Could not shrink RBD storage volume "%s": %s`,
-			path, msg)
-		return fmt.Errorf(`Could not shrink RBD storage volume "%s":
-			%s`, path, msg)
-	}
-
-	logger.Debugf("Reduce underlying %s filesystem for LV \"%s\"", fsType, path)
-	return nil
-}
-
-func (s *storageCeph) rbdGrow(path string, size int64, fsType string,
-	fsMntPoint string, volumeType int, volumeName string,
-	data interface{}) error {
-
-	// Find the volume type name
-	volumeTypeName := ""
-	switch volumeType {
-	case storagePoolVolumeTypeContainer:
-		volumeTypeName = storagePoolVolumeTypeNameContainer
-	case storagePoolVolumeTypeCustom:
-		volumeTypeName = storagePoolVolumeTypeNameCustom
-	default:
-		return fmt.Errorf(`Resizing not implemented for storage `+
-			`volume type %d`, volumeType)
-	}
-
-	// Grow the block device
-	_, err := shared.TryRunCommand(
-		"rbd",
-		"resize",
-		"--id", s.UserName,
-		"--cluster", s.ClusterName,
-		"--pool", s.OSDPoolName,
-		"--size", fmt.Sprintf("%dM", (size/1024/1024)),
-		fmt.Sprintf("%s_%s", volumeTypeName, volumeName))
-	if err != nil {
-		logger.Errorf(`Could not extend RBD storage volume "%s": %v`, path, err)
-		return fmt.Errorf(`Could not extend RBD storage volume "%s": %v`, path, err)
-	}
-
-	// Mount the filesystem
-	switch volumeType {
-	case storagePoolVolumeTypeContainer:
-		c := data.(instance.Instance)
-		ourMount, err := c.StorageStart()
-		if err != nil {
-			return err
-		}
-
-		if ourMount {
-			defer c.StorageStop()
-		}
-	case storagePoolVolumeTypeCustom:
-		ourMount, err := s.StoragePoolVolumeMount()
-		if err != nil {
-			return err
-		}
-
-		if ourMount {
-			defer s.StoragePoolVolumeUmount()
-		}
-	}
-
-	// Grow the filesystem
-	return driver.GrowFileSystem(fsType, path, fsMntPoint)
-}
-
-// copyWithSnapshots creates a non-sparse copy of a container including its
-// snapshots
-// This does not introduce a dependency relation between the source RBD storage
-// volume and the target RBD storage volume.
-func (s *storageCeph) cephRBDVolumeDumpToFile(sourceVolumeName string, file string) error {
-	logger.Debugf(`Dumping RBD storage volume "%s" to "%s"`, sourceVolumeName, file)
-
-	args := []string{
-		"export",
-		"--id", s.UserName,
-		"--cluster", s.ClusterName,
-		sourceVolumeName,
-		file,
-	}
-
-	rbdSendCmd := exec.Command("rbd", args...)
-	err := rbdSendCmd.Run()
-	if err != nil {
-		return err
-	}
-
-	logger.Debugf(`Dumped RBD storage volume "%s" to "%s"`, sourceVolumeName, file)
-	return nil
-}
-
-// cephRBDVolumeBackupCreate creates a backup of a container or snapshot.
-func (s *storageCeph) cephRBDVolumeBackupCreate(tmpPath string, backup backup.Backup, source instance.Instance) error {
-	sourceIsSnapshot := source.IsSnapshot()
-	sourceContainerName := source.Name()
-	sourceContainerOnlyName := project.Prefix(source.Project(), sourceContainerName)
-	sourceSnapshotOnlyName := ""
-
-	// Prepare for rsync
-	rsync := func(oldPath string, newPath string, bwlimit string) error {
-		output, err := rsync.LocalCopy(oldPath, newPath, bwlimit, true)
-		if err != nil {
-			return fmt.Errorf("Failed to rsync: %s: %s", string(output), err)
-		}
-
-		return nil
-	}
-
-	bwlimit := s.pool.Config["rsync.bwlimit"]
-	// Create a temporary snapshot
-	snapshotName := fmt.Sprintf("zombie_snapshot_%s", uuid.NewRandom().String())
-	if sourceIsSnapshot {
-		sourceContainerOnlyName, sourceSnapshotOnlyName, _ = shared.InstanceGetParentAndSnapshotName(sourceContainerName)
-		sourceContainerOnlyName = project.Prefix(source.Project(), sourceContainerOnlyName)
-		snapshotName = fmt.Sprintf("snapshot_%s", sourceSnapshotOnlyName)
-	} else {
-		// This is costly but we need to ensure that all cached data has
-		// been committed to disk. If we don't then the rbd snapshot of
-		// the underlying filesystem can be inconsistent or - worst case
-		// - empty.
-		unix.Sync()
-
-		// create snapshot
-		err := cephRBDSnapshotCreate(s.ClusterName, s.OSDPoolName, sourceContainerOnlyName, storagePoolVolumeTypeNameContainer, snapshotName, s.UserName)
-		if err != nil {
-			return err
-		}
-		defer cephRBDSnapshotDelete(s.ClusterName, s.OSDPoolName, sourceContainerOnlyName, storagePoolVolumeTypeNameContainer, snapshotName, s.UserName)
-	}
-
-	// Protect volume so we can create clones of it
-	err := cephRBDSnapshotProtect(s.ClusterName, s.OSDPoolName, sourceContainerOnlyName, storagePoolVolumeTypeNameContainer, snapshotName, s.UserName)
-	if err != nil {
-		return err
-	}
-	defer cephRBDSnapshotUnprotect(s.ClusterName, s.OSDPoolName, sourceContainerOnlyName, storagePoolVolumeTypeNameContainer, snapshotName, s.UserName)
-
-	// Create a new volume from the snapshot
-	cloneName := uuid.NewRandom().String()
-	err = cephRBDCloneCreate(s.ClusterName, s.OSDPoolName, sourceContainerOnlyName, storagePoolVolumeTypeNameContainer, snapshotName, s.OSDPoolName, cloneName, "backup", s.UserName, s.OSDDataPoolName)
-	if err != nil {
-		return err
-	}
-	defer cephRBDVolumeDelete(s.ClusterName, s.OSDPoolName, cloneName, "backup", s.UserName)
-
-	// Map the new volume
-	RBDDevPath, err := cephRBDVolumeMap(s.ClusterName, s.OSDPoolName, cloneName, "backup", s.UserName)
-	if err != nil {
-		return err
-	}
-	defer cephRBDVolumeUnmap(s.ClusterName, s.OSDPoolName, cloneName, "backup", s.UserName, true)
-
-	// Generate a new UUID if needed
-	RBDFilesystem := s.getRBDFilesystem()
-	msg, err := driver.FSGenerateNewUUID(RBDFilesystem, RBDDevPath)
-	if err != nil {
-		logger.Errorf("Failed to create new UUID for filesystem \"%s\": %s: %s", RBDFilesystem, msg, err)
-		return err
-	}
-
-	// Create a temporary mountpoing
-	tmpContainerMntPoint, err := ioutil.TempDir("", "lxd_backup_")
-	if err != nil {
-		return err
-	}
-	defer os.RemoveAll(tmpContainerMntPoint)
-
-	err = os.Chmod(tmpContainerMntPoint, 0100)
-	if err != nil {
-		return err
-	}
-
-	// Mount the volume
-	mountFlags, mountOptions := resolveMountOptions(s.getRBDMountOptions())
-	err = storageDrivers.TryMount(RBDDevPath, tmpContainerMntPoint, RBDFilesystem, mountFlags, mountOptions)
-	if err != nil {
-		logger.Errorf("Failed to mount RBD device %s onto %s: %s", RBDDevPath, tmpContainerMntPoint, err)
-		return err
-	}
-	logger.Debugf("Mounted RBD device %s onto %s", RBDDevPath, tmpContainerMntPoint)
-	defer storageDrivers.TryUnmount(tmpContainerMntPoint, unix.MNT_DETACH)
-
-	// Figure out the target name
-	targetName := sourceContainerName
-	if sourceIsSnapshot {
-		_, targetName, _ = shared.InstanceGetParentAndSnapshotName(sourceContainerName)
-	}
-
-	// Create the path for the backup.
-	targetBackupMntPoint := fmt.Sprintf("%s/container", tmpPath)
-	if sourceIsSnapshot {
-		targetBackupMntPoint = fmt.Sprintf("%s/snapshots/%s", tmpPath, targetName)
-	}
-
-	err = os.MkdirAll(targetBackupMntPoint, 0711)
-	if err != nil {
-		return err
-	}
-
-	err = rsync(tmpContainerMntPoint, targetBackupMntPoint, bwlimit)
-	if err != nil {
-		return err
-	}
-
-	return nil
-}
-
-func (s *storageCeph) doContainerCreate(projectName, name string, privileged bool) error {
-	logger.Debugf(`Creating RBD storage volume for container "%s" on storage pool "%s"`, name, s.pool.Name)
-
-	revert := true
-
-	// get size
-	RBDSize, err := s.getRBDSize()
-	if err != nil {
-		logger.Errorf(`Failed to retrieve size of RBD storage volume for container "%s" on storage pool "%s": %s`, name, s.pool.Name, err)
-		return err
-	}
-	logger.Debugf(`Retrieved size "%s" of RBD storage volume for container "%s" on storage pool "%s"`, RBDSize, name, s.pool.Name)
-
-	// create volume
-	volumeName := project.Prefix(projectName, name)
-	err = cephRBDVolumeCreate(s.ClusterName, s.OSDPoolName, volumeName, storagePoolVolumeTypeNameContainer, RBDSize, s.UserName, s.OSDDataPoolName)
-	if err != nil {
-		logger.Errorf(`Failed to create RBD storage volume for container "%s" on storage pool "%s": %s`, name, s.pool.Name, err)
-		return err
-	}
-	logger.Debugf(`Created RBD storage volume for container "%s" on storage pool "%s"`, name, s.pool.Name)
-
-	defer func() {
-		if !revert {
-			return
-		}
-
-		err := cephRBDVolumeDelete(s.ClusterName, s.OSDPoolName, volumeName, storagePoolVolumeTypeNameContainer, s.UserName)
-		if err != nil {
-			logger.Warnf(`Failed to delete RBD storage volume for container "%s" on storage pool "%s": %s`, name, s.pool.Name, err)
-		}
-	}()
-
-	RBDDevPath, err := cephRBDVolumeMap(s.ClusterName, s.OSDPoolName, volumeName, storagePoolVolumeTypeNameContainer, s.UserName)
-	if err != nil {
-		logger.Errorf(`Failed to map RBD storage volume for container "%s" on storage pool "%s": %s`, name, s.pool.Name, err)
-		return err
-	}
-	logger.Debugf(`Mapped RBD storage volume for container "%s" on storage pool "%s"`, name, s.pool.Name)
-
-	defer func() {
-		if !revert {
-			return
-		}
-
-		err := cephRBDVolumeUnmap(s.ClusterName, s.OSDPoolName, volumeName, storagePoolVolumeTypeNameContainer, s.UserName, true)
-		if err != nil {
-			logger.Warnf(`Failed to unmap RBD storage volume for container "%s" on storage pool "%s": %s`, name, s.pool.Name, err)
-		}
-	}()
-
-	// get filesystem
-	RBDFilesystem := s.getRBDFilesystem()
-	output, err := makeFSType(RBDDevPath, RBDFilesystem, nil)
-	if err != nil {
-		logger.Errorf(`Failed to create filesystem type "%s" on device path "%s" for RBD storage volume for container "%s" on storage pool "%s": %v (%s)`, RBDFilesystem, RBDDevPath, name, s.pool.Name, err, output)
-		return err
-	}
-	logger.Debugf(`Created filesystem type "%s" on device path "%s" for RBD storage volume for container "%s" on storage pool "%s"`, RBDFilesystem, RBDDevPath, name, s.pool.Name)
-
-	containerPath := shared.VarPath("containers", project.Prefix(projectName, name))
-	containerMntPoint := driver.GetContainerMountPoint(projectName, s.pool.Name, name)
-	err = driver.CreateContainerMountpoint(containerMntPoint, containerPath, privileged)
-	if err != nil {
-		logger.Errorf(`Failed to create mountpoint "%s" for RBD storage volume for container "%s" on storage pool "%s": %s"`, containerMntPoint, name, s.pool.Name, err)
-		return err
-	}
-	logger.Debugf(`Created mountpoint "%s" for RBD storage volume for container "%s" on storage pool "%s""`, containerMntPoint, name, s.pool.Name)
-
-	defer func() {
-		if !revert {
-			return
-		}
-
-		err := os.Remove(containerMntPoint)
-		if err != nil {
-			logger.Warnf(`Failed to delete mountpoint "%s" for RBD storage volume for container "%s" on storage pool "%s": %s"`, containerMntPoint, name, s.pool.Name, err)
-		}
-	}()
-
-	logger.Debugf(`Created RBD storage volume for container "%s" on storage pool "%s"`, name, s.pool.Name)
-
-	revert = false
-
-	return nil
-}
-
-func (s *storageCeph) doContainerMount(projectName string, name string) (bool, error) {
-	RBDFilesystem := s.getRBDFilesystem()
-	containerMntPoint := driver.GetContainerMountPoint(projectName, s.pool.Name, name)
-	if shared.IsSnapshot(name) {
-		containerMntPoint = driver.GetSnapshotMountPoint(projectName, s.pool.Name, name)
-	}
-
-	containerMountLockID := getContainerMountLockID(s.pool.Name, name)
-	lxdStorageMapLock.Lock()
-	if waitChannel, ok := lxdStorageOngoingOperationMap[containerMountLockID]; ok {
-		lxdStorageMapLock.Unlock()
-		if _, ok := <-waitChannel; ok {
-			logger.Warnf("Received value over semaphore, this should not have happened")
-		}
-		// Give the benefit of the doubt and assume that the other
-		// thread actually succeeded in mounting the storage volume.
-		logger.Debugf("RBD storage volume for container \"%s\" on storage pool \"%s\" appears to be already mounted", s.volume.Name, s.pool.Name)
-		return false, nil
-	}
-
-	lxdStorageOngoingOperationMap[containerMountLockID] = make(chan bool)
-	lxdStorageMapLock.Unlock()
-
-	var ret int
-	var mounterr error
-	ourMount := false
-	RBDDevPath := ""
-	if !shared.IsMountPoint(containerMntPoint) {
-		volumeName := project.Prefix(projectName, name)
-		RBDDevPath, ret = getRBDMappedDevPath(s.ClusterName,
-			s.OSDPoolName, storagePoolVolumeTypeNameContainer,
-			volumeName, true, s.UserName)
-		if ret >= 0 {
-			mountFlags, mountOptions := resolveMountOptions(s.getRBDMountOptions())
-			mounterr = storageDrivers.TryMount(RBDDevPath, containerMntPoint,
-				RBDFilesystem, mountFlags, mountOptions)
-			ourMount = true
-		}
-	}
-
-	lxdStorageMapLock.Lock()
-	if waitChannel, ok := lxdStorageOngoingOperationMap[containerMountLockID]; ok {
-		close(waitChannel)
-		delete(lxdStorageOngoingOperationMap, containerMountLockID)
-	}
-	lxdStorageMapLock.Unlock()
-
-	if mounterr != nil || ret < 0 {
-		logger.Errorf("Failed to mount RBD storage volume for container \"%s\": %s", s.volume.Name, mounterr)
-		return false, mounterr
-	}
-
-	return ourMount, nil
-}
-
-func (s *storageCeph) doContainerSnapshotCreate(projectName, targetName string, sourceName string) error {
-	logger.Debugf(`Creating RBD storage volume for snapshot "%s" on storage pool "%s"`, targetName, s.pool.Name)
-
-	revert := true
-
-	_, targetSnapshotOnlyName, _ := shared.InstanceGetParentAndSnapshotName(targetName)
-	targetSnapshotName := fmt.Sprintf("snapshot_%s", targetSnapshotOnlyName)
-	err := cephRBDSnapshotCreate(s.ClusterName, s.OSDPoolName,
-		project.Prefix(projectName, sourceName), storagePoolVolumeTypeNameContainer,
-		targetSnapshotName, s.UserName)
-	if err != nil {
-		logger.Errorf(`Failed to create snapshot for RBD storage volume for snapshot "%s" on storage pool "%s": %s`, targetName, s.pool.Name, err)
-		return err
-	}
-	logger.Debugf(`Created snapshot for RBD storage volume for image "%s" on storage pool "%s"`, targetName, s.pool.Name)
-
-	defer func() {
-		if !revert {
-			return
-		}
-
-		err := cephRBDSnapshotDelete(s.ClusterName, s.OSDPoolName,
-			sourceName, storagePoolVolumeTypeNameContainer,
-			targetSnapshotName, s.UserName)
-		if err != nil {
-			logger.Warnf(`Failed to delete RBD container storage for snapshot "%s" of container "%s"`, targetSnapshotOnlyName, sourceName)
-		}
-	}()
-
-	targetContainerMntPoint := driver.GetSnapshotMountPoint(projectName, s.pool.Name, targetName)
-	sourceOnlyName, _, _ := shared.InstanceGetParentAndSnapshotName(sourceName)
-	snapshotMntPointSymlinkTarget := shared.VarPath("storage-pools", s.pool.Name, "containers-snapshots", project.Prefix(projectName, sourceOnlyName))
-	snapshotMntPointSymlink := shared.VarPath("snapshots", project.Prefix(projectName, sourceOnlyName))
-	err = driver.CreateSnapshotMountpoint(targetContainerMntPoint, snapshotMntPointSymlinkTarget, snapshotMntPointSymlink)
-	if err != nil {
-		logger.Errorf(`Failed to create mountpoint "%s", snapshot symlink target "%s", snapshot mountpoint symlink"%s" for RBD storage volume "%s" on storage pool "%s": %s`, targetContainerMntPoint, snapshotMntPointSymlinkTarget,
-			snapshotMntPointSymlink, s.volume.Name, s.pool.Name, err)
-		return err
-	}
-	logger.Debugf(`Created mountpoint "%s", snapshot symlink target "%s", snapshot mountpoint symlink"%s" for RBD storage volume "%s" on storage pool "%s"`, targetContainerMntPoint, snapshotMntPointSymlinkTarget, snapshotMntPointSymlink, s.volume.Name, s.pool.Name)
-
-	logger.Debugf(`Created RBD storage volume for snapshot "%s" on storage pool "%s"`, targetName, s.pool.Name)
-
-	revert = false
-
-	return nil
-}
-
-func (s *storageCeph) doCrossPoolVolumeCopy(source *api.StorageVolumeSource) error {
-	// setup storage for the source volume
-	srcStorage, err := storagePoolVolumeInit(s.s, "default", source.Pool, source.Name, storagePoolVolumeTypeCustom)
-	if err != nil {
-		logger.Errorf("Failed to initialize CEPH storage volume \"%s\" on storage pool \"%s\": %s", s.volume.Name, s.pool.Name, err)
-		return err
-	}
-
-	ourMount, err := srcStorage.StoragePoolMount()
-	if err != nil {
-		logger.Errorf("Failed to mount CEPH storage volume \"%s\" on storage pool \"%s\": %s", s.volume.Name, s.pool.Name, err)
-		return err
-	}
-
-	if ourMount {
-		defer srcStorage.StoragePoolUmount()
-	}
-
-	snapshots, err := driver.VolumeSnapshotsGet(s.s, source.Pool, source.Name, storagePoolVolumeTypeCustom)
-	if err != nil {
-		return err
-	}
-
-	err = s.StoragePoolVolumeCreate()
-	if err != nil {
-		logger.Errorf("Failed to create RBD storage volume \"%s\" on storage pool \"%s\": %s", s.volume.Name, s.pool.Name, err)
-		return err
-	}
-
-	ourMount, err = s.StoragePoolVolumeMount()
-	if err != nil {
-		return err
-	}
-	if ourMount {
-		defer s.StoragePoolVolumeUmount()
-	}
-
-	dstVolumeMntPoint := driver.GetStoragePoolVolumeMountPoint(s.pool.Name, s.volume.Name)
-	bwlimit := s.pool.Config["rsync.bwlimit"]
-
-	if !source.VolumeOnly {
-		for _, snap := range snapshots {
-			_, snapOnlyName, _ := shared.InstanceGetParentAndSnapshotName(snap.Name)
-			srcSnapshotMntPoint := driver.GetStoragePoolVolumeSnapshotMountPoint(source.Pool, snap.Name)
-
-			_, err = rsync.LocalCopy(srcSnapshotMntPoint, dstVolumeMntPoint, bwlimit, true)
-			if err != nil {
-				return err
-			}
-
-			err = s.StoragePoolVolumeSnapshotCreate(&api.StorageVolumeSnapshotsPost{Name: fmt.Sprintf("%s/%s", s.volume.Name, snapOnlyName)})
-			if err != nil {
-				return err
-			}
-		}
-	}
-
-	var srcVolumeMntPoint string
-
-	if shared.IsSnapshot(source.Name) {
-		srcVolumeMntPoint = driver.GetStoragePoolVolumeSnapshotMountPoint(source.Pool, source.Name)
-	} else {
-		srcVolumeMntPoint = driver.GetStoragePoolVolumeMountPoint(source.Pool, source.Name)
-	}
-
-	_, err = rsync.LocalCopy(srcVolumeMntPoint, dstVolumeMntPoint, bwlimit, true)
-	if err != nil {
-		os.RemoveAll(dstVolumeMntPoint)
-		logger.Errorf("Failed to rsync into RBD storage volume \"%s\" on storage pool \"%s\": %s", s.volume.Name, s.pool.Name, err)
-		return err
-	}
-
-	return nil
-}
-
-func (s *storageCeph) copyVolumeWithoutSnapshotsFull(source *api.StorageVolumeSource) error {
-	var oldVolumeName string
-
-	isSnapshot := shared.IsSnapshot(source.Name)
-
-	if isSnapshot {
-		_, srcSnapshotOnlyName, _ := shared.InstanceGetParentAndSnapshotName(source.Name)
-		oldVolumeName = fmt.Sprintf("%s/snapshot_%s", s.OSDPoolName, srcSnapshotOnlyName)
-	} else {
-		oldVolumeName = fmt.Sprintf("%s/custom_%s", s.OSDPoolName, source.Name)
-	}
-
-	newVolumeName := fmt.Sprintf("%s/custom_%s", s.OSDPoolName, s.volume.Name)
-
-	err := cephRBDVolumeCopy(s.ClusterName, oldVolumeName, newVolumeName, s.UserName)
-	if err != nil {
-		logger.Errorf("Failed to create non-sparse copy of RBD storage volume \"%s\" on storage pool \"%s\": %s", source.Name, source.Pool, err)
-		return err
-	}
-
-	// Re-generate the UUID
-	err = s.cephRBDGenerateUUID(s.volume.Name, storagePoolVolumeTypeNameCustom)
-	if err != nil {
-		return err
-	}
-
-	// Create the mountpoint
-	volumeMntPoint := driver.GetStoragePoolVolumeMountPoint(s.pool.Name, s.volume.Name)
-	err = os.MkdirAll(volumeMntPoint, 0711)
-	if err != nil {
-		logger.Errorf("Failed to create mountpoint \"%s\" for RBD storage volume \"%s\" on storage pool \"%s\": %s", volumeMntPoint, s.volume.Name, s.pool.Name, err)
-		return err
-	}
-
-	return nil
-}
-
-func (s *storageCeph) copyVolumeWithoutSnapshotsSparse(source *api.StorageVolumeSource) error {
-	sourceOnlyName, snapshotOnlyName, isSnapshot := shared.InstanceGetParentAndSnapshotName(source.Name)
-
-	if isSnapshot {
-		snapshotOnlyName = fmt.Sprintf("snapshot_%s", snapshotOnlyName)
-	} else {
-		// create sparse copy
-		snapshotOnlyName = uuid.NewRandom().String()
-
-		// create snapshot of original volume
-		err := cephRBDSnapshotCreate(s.ClusterName, s.OSDPoolName, sourceOnlyName, storagePoolVolumeTypeNameCustom, snapshotOnlyName, s.UserName)
-		if err != nil {
-			logger.Errorf("Failed to create snapshot of RBD storage volume \"%s\" on storage pool \"%s\": %s", sourceOnlyName, source.Pool, err)
-			return err
-		}
-	}
-
-	// protect volume so we can create clones of it
-	err := cephRBDSnapshotProtect(s.ClusterName, s.OSDPoolName, sourceOnlyName, storagePoolVolumeTypeNameCustom, snapshotOnlyName, s.UserName)
-	if err != nil {
-		logger.Errorf("Failed to protect snapshot for RBD storage volume \"%s\" on storage pool \"%s\": %s", sourceOnlyName, s.pool.Name, err)
-		return err
-	}
-
-	// create new clone
-	err = cephRBDCloneCreate(s.ClusterName, s.OSDPoolName, sourceOnlyName, storagePoolVolumeTypeNameCustom, snapshotOnlyName, s.OSDPoolName, s.volume.Name, storagePoolVolumeTypeNameCustom, s.UserName, s.OSDDataPoolName)
-	if err != nil {
-		logger.Errorf("Failed to clone RBD storage volume \"%s\" on storage pool \"%s\": %s", source.Name, source.Pool, err)
-		return err
-	}
-
-	// Re-generate the UUID
-	err = s.cephRBDGenerateUUID(s.volume.Name, storagePoolVolumeTypeNameCustom)
-	if err != nil {
-		return err
-	}
-
-	// Create the mountpoint
-	volumeMntPoint := driver.GetStoragePoolVolumeMountPoint(s.pool.Name, s.volume.Name)
-	err = os.MkdirAll(volumeMntPoint, 0711)
-	if err != nil {
-		logger.Errorf("Failed to create mountpoint \"%s\" for RBD storage volume \"%s\" on storage pool \"%s\": %s", volumeMntPoint, s.volume.Name, s.pool.Name, err)
-		return err
-	}
-
-	return nil
-}
-
-// cephRBDGenerateUUID regenerates the XFS/btrfs UUID as needed
-func (s *storageCeph) cephRBDGenerateUUID(volumeName string, volumeType string) error {
-	// Map the RBD volume
-	RBDDevPath, err := cephRBDVolumeMap(s.ClusterName, s.OSDPoolName, volumeName, volumeType, s.UserName)
-	if err != nil {
-		return err
-	}
-	defer cephRBDVolumeUnmap(s.ClusterName, s.OSDPoolName, volumeName, volumeType, s.UserName, true)
-
-	// Update the UUID
-	msg, err := driver.FSGenerateNewUUID(s.getRBDFilesystem(), RBDDevPath)
-	if err != nil {
-		return fmt.Errorf("Failed to regenerate UUID for '%v': %v: %v", volumeName, err, msg)
-	}
-
-	return nil
-}
diff --git a/lxd/storage_migration_ceph.go b/lxd/storage_migration_ceph.go
deleted file mode 100644
index c590569c56..0000000000
--- a/lxd/storage_migration_ceph.go
+++ /dev/null
@@ -1,228 +0,0 @@
-package main
-
-import (
-	"fmt"
-	"io"
-	"io/ioutil"
-	"os/exec"
-
-	"github.com/gorilla/websocket"
-	"github.com/pborman/uuid"
-
-	"github.com/lxc/lxd/lxd/instance"
-	"github.com/lxc/lxd/lxd/migration"
-	"github.com/lxc/lxd/lxd/operations"
-	"github.com/lxc/lxd/lxd/project"
-	"github.com/lxc/lxd/shared"
-	"github.com/lxc/lxd/shared/logger"
-)
-
-type rbdMigrationSourceDriver struct {
-	container        instance.Instance
-	snapshots        []instance.Instance
-	rbdSnapshotNames []string
-	ceph             *storageCeph
-	runningSnapName  string
-	stoppedSnapName  string
-}
-
-func (s *rbdMigrationSourceDriver) Snapshots() []instance.Instance {
-	return s.snapshots
-}
-
-func (s *rbdMigrationSourceDriver) Cleanup() {
-	containerName := s.container.Name()
-
-	if s.stoppedSnapName != "" {
-		err := cephRBDSnapshotDelete(s.ceph.ClusterName, s.ceph.OSDPoolName,
-			project.Prefix(s.container.Project(), containerName), storagePoolVolumeTypeNameContainer,
-			s.stoppedSnapName, s.ceph.UserName)
-		if err != nil {
-			logger.Warnf(`Failed to delete RBD snapshot "%s" of container "%s"`, s.stoppedSnapName, containerName)
-		}
-	}
-
-	if s.runningSnapName != "" {
-		err := cephRBDSnapshotDelete(s.ceph.ClusterName, s.ceph.OSDPoolName,
-			project.Prefix(s.container.Project(), containerName), storagePoolVolumeTypeNameContainer,
-			s.runningSnapName, s.ceph.UserName)
-		if err != nil {
-			logger.Warnf(`Failed to delete RBD snapshot "%s" of container "%s"`, s.runningSnapName, containerName)
-		}
-	}
-}
-
-func (s *rbdMigrationSourceDriver) SendAfterCheckpoint(conn *websocket.Conn, bwlimit string) error {
-	containerName := s.container.Name()
-	s.stoppedSnapName = fmt.Sprintf("migration-send-%s", uuid.NewRandom().String())
-	err := cephRBDSnapshotCreate(s.ceph.ClusterName, s.ceph.OSDPoolName,
-		project.Prefix(s.container.Project(), containerName), storagePoolVolumeTypeNameContainer,
-		s.stoppedSnapName, s.ceph.UserName)
-	if err != nil {
-		logger.Errorf(`Failed to create snapshot "%s" for RBD storage volume for image "%s" on storage pool "%s": %s`, s.stoppedSnapName, containerName, s.ceph.pool.Name, err)
-		return err
-	}
-
-	cur := fmt.Sprintf("%s/container_%s@%s", s.ceph.OSDPoolName,
-		project.Prefix(s.container.Project(), containerName), s.stoppedSnapName)
-	err = s.rbdSend(conn, cur, s.runningSnapName, nil)
-	if err != nil {
-		logger.Errorf(`Failed to send exported diff of RBD storage volume "%s" from snapshot "%s": %s`, cur, s.runningSnapName, err)
-		return err
-	}
-	logger.Debugf(`Sent exported diff of RBD storage volume "%s" from snapshot "%s"`, cur, s.stoppedSnapName)
-
-	return nil
-}
-
-func (s *rbdMigrationSourceDriver) SendWhileRunning(conn *websocket.Conn,
-	op *operations.Operation, bwlimit string, containerOnly bool) error {
-	containerName := s.container.Name()
-	if s.container.IsSnapshot() {
-		// ContainerSnapshotStart() will create the clone that is
-		// referenced by sendName here.
-		containerOnlyName, snapOnlyName, _ := shared.InstanceGetParentAndSnapshotName(containerName)
-		sendName := fmt.Sprintf(
-			"%s/snapshots_%s_%s_start_clone",
-			s.ceph.OSDPoolName,
-			containerOnlyName,
-			snapOnlyName)
-		wrapper := migration.ProgressReader(op, "fs_progress", containerName)
-
-		err := s.rbdSend(conn, sendName, "", wrapper)
-		if err != nil {
-			logger.Errorf(`Failed to send RBD storage volume "%s": %s`, sendName, err)
-			return err
-		}
-		logger.Debugf(`Sent RBD storage volume "%s"`, sendName)
-
-		return nil
-	}
-
-	lastSnap := ""
-	if !containerOnly {
-		for i, snap := range s.rbdSnapshotNames {
-			prev := ""
-			if i > 0 {
-				prev = s.rbdSnapshotNames[i-1]
-			}
-
-			lastSnap = snap
-
-			sendSnapName := fmt.Sprintf(
-				"%s/container_%s@%s",
-				s.ceph.OSDPoolName,
-				project.Prefix(s.container.Project(), containerName),
-				snap)
-
-			wrapper := migration.ProgressReader(op, "fs_progress", snap)
-
-			err := s.rbdSend(
-				conn,
-				sendSnapName,
-				prev,
-				wrapper)
-			if err != nil {
-				logger.Errorf(`Failed to send exported diff of RBD storage volume "%s" from snapshot "%s": %s`, sendSnapName, prev, err)
-				return err
-			}
-			logger.Debugf(`Sent exported diff of RBD storage volume "%s" from snapshot "%s"`, sendSnapName, prev)
-		}
-	}
-
-	s.runningSnapName = fmt.Sprintf("migration-send-%s", uuid.NewRandom().String())
-	err := cephRBDSnapshotCreate(s.ceph.ClusterName, s.ceph.OSDPoolName,
-		project.Prefix(s.container.Project(), containerName), storagePoolVolumeTypeNameContainer,
-		s.runningSnapName, s.ceph.UserName)
-	if err != nil {
-		logger.Errorf(`Failed to create snapshot "%s" for RBD storage volume for image "%s" on storage pool "%s": %s`, s.runningSnapName, containerName, s.ceph.pool.Name, err)
-		return err
-	}
-
-	cur := fmt.Sprintf("%s/container_%s@%s", s.ceph.OSDPoolName,
-		project.Prefix(s.container.Project(), containerName), s.runningSnapName)
-	wrapper := migration.ProgressReader(op, "fs_progress", containerName)
-	err = s.rbdSend(conn, cur, lastSnap, wrapper)
-	if err != nil {
-		logger.Errorf(`Failed to send exported diff of RBD storage volume "%s" from snapshot "%s": %s`, s.runningSnapName, lastSnap, err)
-		return err
-	}
-	logger.Debugf(`Sent exported diff of RBD storage volume "%s" from snapshot "%s"`, s.runningSnapName, lastSnap)
-
-	return nil
-}
-
-func (s *rbdMigrationSourceDriver) SendStorageVolume(conn *websocket.Conn, op *operations.Operation, bwlimit string, storage storage, volumeOnly bool) error {
-	msg := fmt.Sprintf("Function not implemented")
-	logger.Errorf(msg)
-	return fmt.Errorf(msg)
-}
-
-// Let's say we want to send the a container "a" including snapshots "snap0" and
-// "snap1" on storage pool "pool1" from LXD "l1" to LXD "l2" on storage pool
-// "pool2":
-//
-// The pool layout on "l1" would be:
-//	pool1/container_a
-//	pool1/container_a at snapshot_snap0
-//	pool1/container_a at snapshot_snap1
-//
-// Then we need to send:
-//	rbd export-diff pool1/container_a at snapshot_snap0 - | rbd import-diff - pool2/container_a
-// (Note that pool2/container_a must have been created by the receiving LXD
-// instance before.)
-//	rbd export-diff pool1/container_a at snapshot_snap1 --from-snap snapshot_snap0 - | rbd import-diff - pool2/container_a
-//	rbd export-diff pool1/container_a --from-snap snapshot_snap1 - | rbd import-diff - pool2/container_a
-func (s *rbdMigrationSourceDriver) rbdSend(conn *websocket.Conn,
-	volumeName string,
-	volumeParentName string,
-	readWrapper func(io.ReadCloser) io.ReadCloser) error {
-	args := []string{
-		"export-diff",
-		"--cluster", s.ceph.ClusterName,
-		volumeName,
-	}
-
-	if volumeParentName != "" {
-		args = append(args, "--from-snap", volumeParentName)
-	}
-
-	// redirect output to stdout
-	args = append(args, "-")
-
-	cmd := exec.Command("rbd", args...)
-
-	stdout, err := cmd.StdoutPipe()
-	if err != nil {
-		return err
-	}
-
-	readPipe := io.ReadCloser(stdout)
-	if readWrapper != nil {
-		readPipe = readWrapper(stdout)
-	}
-
-	stderr, err := cmd.StderrPipe()
-	if err != nil {
-		return err
-	}
-
-	err = cmd.Start()
-	if err != nil {
-		return err
-	}
-
-	<-shared.WebsocketSendStream(conn, readPipe, 4*1024*1024)
-
-	output, err := ioutil.ReadAll(stderr)
-	if err != nil {
-		logger.Debugf(`Failed to read stderr output from "rbd export-diff": %s`, err)
-	}
-
-	err = cmd.Wait()
-	if err != nil {
-		logger.Errorf(`Failed to perform "rbd export-diff": %s`, string(output))
-	}
-
-	return err
-}
diff --git a/lxd/storage_mock.go b/lxd/storage_mock.go
deleted file mode 100644
index 08fee51c8e..0000000000
--- a/lxd/storage_mock.go
+++ /dev/null
@@ -1,263 +0,0 @@
-package main
-
-import (
-	"io"
-
-	"github.com/gorilla/websocket"
-
-	"github.com/lxc/lxd/lxd/backup"
-	"github.com/lxc/lxd/lxd/instance"
-	"github.com/lxc/lxd/lxd/migration"
-	"github.com/lxc/lxd/lxd/operations"
-	"github.com/lxc/lxd/lxd/state"
-	"github.com/lxc/lxd/shared/api"
-	"github.com/lxc/lxd/shared/ioprogress"
-	"github.com/lxc/lxd/shared/logger"
-)
-
-type storageMock struct {
-	storageShared
-}
-
-func (s *storageMock) StorageCoreInit() error {
-	s.sType = storageTypeMock
-	typeName, err := storageTypeToString(s.sType)
-	if err != nil {
-		return err
-	}
-	s.sTypeName = typeName
-
-	return nil
-}
-
-func (s *storageMock) StoragePoolInit() error {
-	err := s.StorageCoreInit()
-	if err != nil {
-		return err
-	}
-
-	return nil
-}
-
-func (s *storageMock) StoragePoolCheck() error {
-	logger.Debugf("Checking MOCK storage pool \"%s\"", s.pool.Name)
-	return nil
-}
-
-func (s *storageMock) StoragePoolCreate() error {
-	logger.Infof("Creating MOCK storage pool \"%s\"", s.pool.Name)
-	logger.Infof("Created MOCK storage pool \"%s\"", s.pool.Name)
-	return nil
-}
-
-func (s *storageMock) StoragePoolDelete() error {
-	logger.Infof("Deleting MOCK storage pool \"%s\"", s.pool.Name)
-	logger.Infof("Deleted MOCK storage pool \"%s\"", s.pool.Name)
-	return nil
-}
-
-func (s *storageMock) StoragePoolMount() (bool, error) {
-	return true, nil
-}
-
-func (s *storageMock) StoragePoolUmount() (bool, error) {
-	return true, nil
-}
-
-func (s *storageMock) GetStoragePoolWritable() api.StoragePoolPut {
-	return s.pool.StoragePoolPut
-}
-
-func (s *storageMock) GetStoragePoolVolumeWritable() api.StorageVolumePut {
-	return api.StorageVolumePut{}
-}
-
-func (s *storageMock) SetStoragePoolWritable(writable *api.StoragePoolPut) {
-	s.pool.StoragePoolPut = *writable
-}
-
-func (s *storageMock) SetStoragePoolVolumeWritable(writable *api.StorageVolumePut) {
-	s.volume.StorageVolumePut = *writable
-}
-
-func (s *storageMock) GetContainerPoolInfo() (int64, string, string) {
-	return s.poolID, s.pool.Name, s.pool.Name
-}
-
-func (s *storageMock) StoragePoolVolumeCreate() error {
-	return nil
-}
-
-func (s *storageMock) StoragePoolVolumeDelete() error {
-	return nil
-}
-
-func (s *storageMock) StoragePoolVolumeMount() (bool, error) {
-	return true, nil
-}
-
-func (s *storageMock) StoragePoolVolumeUmount() (bool, error) {
-	return true, nil
-}
-
-func (s *storageMock) StoragePoolVolumeUpdate(writable *api.StorageVolumePut, changedConfig []string) error {
-	return nil
-}
-
-func (s *storageMock) StoragePoolVolumeRename(newName string) error {
-	return nil
-}
-
-func (s *storageMock) StoragePoolUpdate(writable *api.StoragePoolPut, changedConfig []string) error {
-	return nil
-}
-
-func (s *storageMock) ContainerStorageReady(container instance.Instance) bool {
-	return true
-}
-
-func (s *storageMock) ContainerCreate(container instance.Instance) error {
-	return nil
-}
-
-func (s *storageMock) ContainerCreateFromImage(container instance.Instance, imageFingerprint string, tracker *ioprogress.ProgressTracker) error {
-	return nil
-}
-
-func (s *storageMock) ContainerDelete(container instance.Instance) error {
-	return nil
-}
-
-func (s *storageMock) ContainerCopy(target instance.Instance, source instance.Instance, containerOnly bool) error {
-	return nil
-}
-
-func (s *storageMock) ContainerRefresh(target instance.Instance, source instance.Instance, snapshots []instance.Instance) error {
-	return nil
-}
-
-func (s *storageMock) ContainerMount(c instance.Instance) (bool, error) {
-	return true, nil
-}
-
-func (s *storageMock) ContainerUmount(c instance.Instance, path string) (bool, error) {
-	return true, nil
-}
-
-func (s *storageMock) ContainerRename(container instance.Instance, newName string) error {
-	return nil
-}
-
-func (s *storageMock) ContainerRestore(container instance.Instance, sourceContainer instance.Instance) error {
-	return nil
-}
-
-func (s *storageMock) ContainerGetUsage(container instance.Instance) (int64, error) {
-	return 0, nil
-}
-func (s *storageMock) ContainerSnapshotCreate(snapshotContainer instance.Instance, sourceContainer instance.Instance) error {
-	return nil
-}
-func (s *storageMock) ContainerSnapshotDelete(snapshotContainer instance.Instance) error {
-	return nil
-}
-
-func (s *storageMock) ContainerSnapshotRename(snapshotContainer instance.Instance, newName string) error {
-	return nil
-}
-
-func (s *storageMock) ContainerSnapshotStart(container instance.Instance) (bool, error) {
-	return true, nil
-}
-
-func (s *storageMock) ContainerSnapshotStop(container instance.Instance) (bool, error) {
-	return true, nil
-}
-
-func (s *storageMock) ContainerSnapshotCreateEmpty(snapshotContainer instance.Instance) error {
-	return nil
-}
-
-func (s *storageMock) ContainerBackupCreate(path string, backup backup.Backup, sourceContainer instance.Instance) error {
-	return nil
-}
-
-func (s *storageMock) ContainerBackupLoad(info backup.Info, data io.ReadSeeker, tarArgs []string) error {
-	return nil
-}
-
-func (s *storageMock) ImageCreate(fingerprint string, tracker *ioprogress.ProgressTracker) error {
-	return nil
-}
-
-func (s *storageMock) ImageDelete(fingerprint string) error {
-	return nil
-}
-
-func (s *storageMock) ImageMount(fingerprint string) (bool, error) {
-	return true, nil
-}
-
-func (s *storageMock) ImageUmount(fingerprint string) (bool, error) {
-	return true, nil
-}
-
-func (s *storageMock) MigrationType() migration.MigrationFSType {
-	return migration.MigrationFSType_RSYNC
-}
-
-func (s *storageMock) PreservesInodes() bool {
-	return false
-}
-
-func (s *storageMock) MigrationSource(args MigrationSourceArgs) (MigrationStorageSourceDriver, error) {
-	return nil, nil
-}
-
-func (s *storageMock) MigrationSink(conn *websocket.Conn, op *operations.Operation, args MigrationSinkArgs) error {
-	return nil
-}
-
-func (s *storageMock) StorageEntitySetQuota(volumeType int, size int64, data interface{}) error {
-	return nil
-}
-
-func (s *storageMock) StoragePoolResources() (*api.ResourcesStoragePool, error) {
-	return &api.ResourcesStoragePool{}, nil
-}
-
-func (s *storageMock) StoragePoolVolumeCopy(source *api.StorageVolumeSource) error {
-	return nil
-}
-
-func (s *storageMock) StorageMigrationSource(args MigrationSourceArgs) (MigrationStorageSourceDriver, error) {
-	return nil, nil
-}
-
-func (s *storageMock) StorageMigrationSink(conn *websocket.Conn, op *operations.Operation, args MigrationSinkArgs) error {
-	return nil
-}
-
-func (s *storageMock) GetStoragePool() *api.StoragePool {
-	return nil
-}
-
-func (s *storageMock) GetStoragePoolVolume() *api.StorageVolume {
-	return nil
-}
-
-func (s *storageMock) GetState() *state.State {
-	return nil
-}
-
-func (s *storageMock) StoragePoolVolumeSnapshotCreate(target *api.StorageVolumeSnapshotsPost) error {
-	return nil
-}
-
-func (s *storageMock) StoragePoolVolumeSnapshotDelete() error {
-	return nil
-}
-
-func (s *storageMock) StoragePoolVolumeSnapshotRename(newName string) error {
-	return nil
-}
diff --git a/lxd/storage_shared.go b/lxd/storage_shared.go
deleted file mode 100644
index cf7c974840..0000000000
--- a/lxd/storage_shared.go
+++ /dev/null
@@ -1,88 +0,0 @@
-package main
-
-import (
-	"github.com/lxc/lxd/lxd/state"
-	storagePools "github.com/lxc/lxd/lxd/storage"
-	"github.com/lxc/lxd/shared/api"
-)
-
-type storageShared struct {
-	sType        storageType
-	sTypeName    string
-	sTypeVersion string
-
-	s *state.State
-
-	poolID int64
-	pool   *api.StoragePool
-
-	volume *api.StorageVolume
-}
-
-func (s *storageShared) GetStorageType() storageType {
-	return s.sType
-}
-
-func (s *storageShared) GetStorageTypeName() string {
-	return s.sTypeName
-}
-
-func (s *storageShared) GetStorageTypeVersion() string {
-	return s.sTypeVersion
-}
-
-func (s *storageShared) GetStoragePool() *api.StoragePool {
-	return s.pool
-}
-
-func (s *storageShared) GetStoragePoolVolume() *api.StorageVolume {
-	return s.volume
-}
-
-func (s *storageShared) GetState() *state.State {
-	return s.s
-}
-
-func (s *storageShared) GetStoragePoolWritable() api.StoragePoolPut {
-	return s.pool.Writable()
-}
-
-func (s *storageShared) GetStoragePoolVolumeWritable() api.StorageVolumePut {
-	return s.volume.Writable()
-}
-
-func (s *storageShared) SetStoragePoolWritable(writable *api.StoragePoolPut) {
-	s.pool.StoragePoolPut = *writable
-}
-
-func (s *storageShared) SetStoragePoolVolumeWritable(writable *api.StorageVolumePut) {
-	s.volume.StorageVolumePut = *writable
-}
-
-func (s *storageShared) createImageDbPoolVolume(fingerprint string) error {
-	// Fill in any default volume config.
-	volumeConfig := map[string]string{}
-	err := storagePools.VolumeFillDefault(fingerprint, volumeConfig, s.pool)
-	if err != nil {
-		return err
-	}
-
-	// Create a db entry for the storage volume of the image.
-	_, err = s.s.Cluster.StoragePoolVolumeCreate("default", fingerprint, "", storagePoolVolumeTypeImage, false, s.poolID, volumeConfig)
-	if err != nil {
-		// Try to delete the db entry on error.
-		s.deleteImageDbPoolVolume(fingerprint)
-		return err
-	}
-
-	return nil
-}
-
-func (s *storageShared) deleteImageDbPoolVolume(fingerprint string) error {
-	err := s.s.Cluster.StoragePoolVolumeDelete("default", fingerprint, storagePoolVolumeTypeImage, s.poolID)
-	if err != nil {
-		return err
-	}
-
-	return nil
-}

From b148ab7d702fa747a31fca9aedd743438c78c736 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Thu, 27 Feb 2020 14:50:57 +0000
Subject: [PATCH 02/13] lxd/container: Removes containerCreateEmptySnapshot

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/container.go | 23 -----------------------
 1 file changed, 23 deletions(-)

diff --git a/lxd/container.go b/lxd/container.go
index 028efcaaa9..4b521fbef0 100644
--- a/lxd/container.go
+++ b/lxd/container.go
@@ -90,29 +90,6 @@ func instanceCreateFromBackup(s *state.State, info backup.Info, srcData io.ReadS
 	return postHook, revertHook, nil
 }
 
-func containerCreateEmptySnapshot(s *state.State, args db.InstanceArgs) (instance.Instance, error) {
-	// Create the snapshot
-	c, err := instanceCreateInternal(s, args)
-	if err != nil {
-		return nil, err
-	}
-
-	if c.Type() != instancetype.Container {
-		return nil, fmt.Errorf("Instance type must be container")
-	}
-
-	ct := c.(*containerLXC)
-
-	// Now create the empty snapshot
-	err = ct.Storage().ContainerSnapshotCreateEmpty(c)
-	if err != nil {
-		c.Delete()
-		return nil, err
-	}
-
-	return c, nil
-}
-
 // instanceCreateFromImage creates an instance from a rootfs image.
 func instanceCreateFromImage(d *Daemon, args db.InstanceArgs, hash string, op *operations.Operation) (instance.Instance, error) {
 	s := d.State()

From ec5a3d3a2569e2abbc0dfaf599772f682fbac366 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Thu, 27 Feb 2020 14:51:14 +0000
Subject: [PATCH 03/13] lxd/container/lxc: Removes legacy storage functions

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/container_lxc.go | 37 +------------------------------------
 1 file changed, 1 insertion(+), 36 deletions(-)

diff --git a/lxd/container_lxc.go b/lxd/container_lxc.go
index d538adf7cc..032dfca40d 100644
--- a/lxd/container_lxc.go
+++ b/lxd/container_lxc.go
@@ -458,9 +458,8 @@ type containerLXC struct {
 	idmapset *idmap.IdmapSet
 
 	// Storage
-	// Do not use these variables directly, instead use their associated get functions so they
+	// Do not use this variable directly, instead use their associated get functions so they
 	// will be initialised on demand.
-	storage     storage
 	storagePool storagePools.Pool
 
 	// Clustering
@@ -1805,26 +1804,6 @@ func (c *containerLXC) DeviceEventHandler(runConf *deviceConfig.RunConfig) error
 	return nil
 }
 
-// Initialize storage interface for this container
-func (c *containerLXC) initStorage() error {
-	if c.storagePool != nil {
-		logger.Warn("Use of old storage layer when new storage layer is initialised")
-	}
-
-	if c.storage != nil {
-		return nil
-	}
-
-	s, err := storagePoolVolumeContainerLoadInit(c.state, c.Project(), c.Name())
-	if err != nil {
-		return err
-	}
-
-	c.storage = s
-
-	return nil
-}
-
 // Config handling
 func (c *containerLXC) expandConfig(profiles []api.Profile) error {
 	if profiles == nil && len(c.profiles) > 0 {
@@ -5831,20 +5810,6 @@ func (c *containerLXC) processesState() int64 {
 	return int64(len(pids))
 }
 
-// Storage gets instance's legacy storage pool. Deprecated.
-func (c *containerLXC) Storage() storage {
-	return c.legacyStorage()
-}
-
-// legacyStorage returns the instance's legacy storage pool. Deprecated.
-func (c *containerLXC) legacyStorage() storage {
-	if c.storage == nil {
-		c.initStorage()
-	}
-
-	return c.storage
-}
-
 // getStoragePool returns the current storage pool handle. To avoid a DB lookup each time this
 // function is called, the handle is cached internally in the containerLXC struct.
 func (c *containerLXC) getStoragePool() (storagePools.Pool, error) {

From ff92e6d38d59ccb4451921b542d1193dcac61540 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Thu, 27 Feb 2020 14:54:09 +0000
Subject: [PATCH 04/13] lxd/main/init: Refactors availableStorageDrivers to not
 use old storage layer

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/main_init.go | 34 +++++++++++++---------------------
 1 file changed, 13 insertions(+), 21 deletions(-)

diff --git a/lxd/main_init.go b/lxd/main_init.go
index d757277926..4c92d51d7f 100644
--- a/lxd/main_init.go
+++ b/lxd/main_init.go
@@ -159,50 +159,42 @@ func (c *cmdInit) Run(cmd *cobra.Command, args []string) error {
 }
 
 func (c *cmdInit) availableStorageDrivers(poolType string) []string {
-	drivers := []string{}
-
 	backingFs, err := util.FilesystemDetect(shared.VarPath())
 	if err != nil {
 		backingFs = "dir"
 	}
 
-	// Get info for new drivers.
+	// Get info for supported drivers.
 	s := state.NewState(nil, nil, nil, sys.DefaultOS(), nil, nil, nil, nil, nil)
-	info := storageDrivers.SupportedDrivers(s)
-	availableDrivers := []string{}
-	for _, entry := range info {
-		availableDrivers = append(availableDrivers, entry.Name)
-	}
+	supportedDrivers := storageDrivers.SupportedDrivers(s)
+
+	drivers := make([]string, 0, len(supportedDrivers))
 
-	// Check available backends
-	for _, driver := range supportedStoragePoolDrivers {
-		if poolType == "remote" && !shared.StringInSlice(driver, []string{"ceph", "cephfs"}) {
+	// Check available backends.
+	for _, driver := range supportedDrivers {
+		if poolType == "remote" && !shared.StringInSlice(driver.Name, []string{"ceph", "cephfs"}) {
 			continue
 		}
 
-		if poolType == "local" && shared.StringInSlice(driver, []string{"ceph", "cephfs"}) {
+		if poolType == "local" && shared.StringInSlice(driver.Name, []string{"ceph", "cephfs"}) {
 			continue
 		}
 
-		if poolType == "all" && driver == "cephfs" {
+		if poolType == "all" && driver.Name == "cephfs" {
 			continue
 		}
 
-		if driver == "dir" {
-			drivers = append(drivers, driver)
+		if driver.Name == "dir" {
+			drivers = append(drivers, driver.Name)
 			continue
 		}
 
 		// btrfs can work in user namespaces too. (If source=/some/path/on/btrfs is used.)
-		if shared.RunningInUserNS() && (backingFs != "btrfs" || driver != "btrfs") {
+		if shared.RunningInUserNS() && (backingFs != "btrfs" || driver.Name != "btrfs") {
 			continue
 		}
 
-		// Check if available as a driver.
-		if shared.StringInSlice(driver, availableDrivers) {
-			drivers = append(drivers, driver)
-			continue
-		}
+		drivers = append(drivers, driver.Name)
 	}
 
 	return drivers

From 4ea6eb87dd76035f9b3f382de38ebe8ada513f0d Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Thu, 27 Feb 2020 14:54:37 +0000
Subject: [PATCH 05/13] lxd/main/init/auto: Removes dep on
 supportedStoragePoolDrivers

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/main_init_auto.go | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/lxd/main_init_auto.go b/lxd/main_init_auto.go
index 5f38ed7116..19840d0a59 100644
--- a/lxd/main_init_auto.go
+++ b/lxd/main_init_auto.go
@@ -7,13 +7,14 @@ import (
 	"github.com/spf13/cobra"
 
 	"github.com/lxc/lxd/client"
+	storageDrivers "github.com/lxc/lxd/lxd/storage/drivers"
 	"github.com/lxc/lxd/shared"
 	"github.com/lxc/lxd/shared/api"
 )
 
 func (c *cmdInit) RunAuto(cmd *cobra.Command, args []string, d lxd.InstanceServer) (*cmdInitData, error) {
 	// Sanity checks
-	if c.flagStorageBackend != "" && !shared.StringInSlice(c.flagStorageBackend, supportedStoragePoolDrivers) {
+	if c.flagStorageBackend != "" && !shared.StringInSlice(c.flagStorageBackend, storageDrivers.AllDriverNames()) {
 		return nil, fmt.Errorf("The requested backend '%s' isn't supported by lxd init", c.flagStorageBackend)
 	}
 

From 639c44cf30a5771121398ae456dc492e5881cea4 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Thu, 27 Feb 2020 14:56:30 +0000
Subject: [PATCH 06/13] lxd/migrate: Removes old storage type reference

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/migrate.go | 2 --
 1 file changed, 2 deletions(-)

diff --git a/lxd/migrate.go b/lxd/migrate.go
index bf0a0ea11e..c66efb2b1c 100644
--- a/lxd/migrate.go
+++ b/lxd/migrate.go
@@ -43,7 +43,6 @@ type migrationFields struct {
 	instance     instance.Instance
 
 	// storage specific fields
-	storage    storage
 	volumeOnly bool
 }
 
@@ -273,7 +272,6 @@ type MigrationSinkArgs struct {
 	Snapshots    []*migration.Snapshot
 
 	// Storage specific fields
-	Storage    storage
 	VolumeOnly bool
 
 	// Transport specific fields

From 406fa35a2cd7872d77f66a02de9af4e1fea4455b Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Thu, 27 Feb 2020 14:56:45 +0000
Subject: [PATCH 07/13] lxd/migrate/storage/volumes: Removes reference to old
 storage type

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/migrate_storage_volumes.go | 5 ++---
 1 file changed, 2 insertions(+), 3 deletions(-)

diff --git a/lxd/migrate_storage_volumes.go b/lxd/migrate_storage_volumes.go
index 66a959dd85..7d362a3a29 100644
--- a/lxd/migrate_storage_volumes.go
+++ b/lxd/migrate_storage_volumes.go
@@ -146,8 +146,8 @@ func (s *migrationSourceWs) DoStorage(state *state.State, poolName string, volNa
 
 func NewStorageMigrationSink(args *MigrationSinkArgs) (*migrationSink, error) {
 	sink := migrationSink{
-		src:    migrationFields{storage: args.Storage, volumeOnly: args.VolumeOnly},
-		dest:   migrationFields{storage: args.Storage, volumeOnly: args.VolumeOnly},
+		src:    migrationFields{volumeOnly: args.VolumeOnly},
+		dest:   migrationFields{volumeOnly: args.VolumeOnly},
 		url:    args.Url,
 		dialer: args.Dialer,
 		push:   args.Push,
@@ -315,7 +315,6 @@ func (c *migrationSink) DoStorage(state *state.State, poolName string, req *api.
 			// as part of MigrationSinkArgs below.
 			rsyncFeatures := respHeader.GetRsyncFeaturesSlice()
 			args := MigrationSinkArgs{
-				Storage:       c.dest.storage,
 				RsyncFeatures: rsyncFeatures,
 				Snapshots:     respHeader.Snapshots,
 				VolumeOnly:    c.src.volumeOnly,

From e9364d11b9e307daa164628db9f285393c3a9e5c Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Thu, 27 Feb 2020 14:57:48 +0000
Subject: [PATCH 08/13] lxd/storage: Removes legacy storage interface and
 unused functions

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/storage.go | 216 -------------------------------------------------
 1 file changed, 216 deletions(-)

diff --git a/lxd/storage.go b/lxd/storage.go
index 03397c3931..40a819313c 100644
--- a/lxd/storage.go
+++ b/lxd/storage.go
@@ -3,28 +3,21 @@ package main
 import (
 	"encoding/json"
 	"fmt"
-	"io"
 	"os"
 	"sync"
 	"sync/atomic"
 
-	"github.com/gorilla/websocket"
 	"github.com/pkg/errors"
 
-	"github.com/lxc/lxd/lxd/backup"
 	"github.com/lxc/lxd/lxd/db"
 	"github.com/lxc/lxd/lxd/device"
 	"github.com/lxc/lxd/lxd/instance"
 	"github.com/lxc/lxd/lxd/instance/instancetype"
-	"github.com/lxc/lxd/lxd/migration"
-	"github.com/lxc/lxd/lxd/operations"
 	"github.com/lxc/lxd/lxd/state"
 	storagePools "github.com/lxc/lxd/lxd/storage"
 	storageDrivers "github.com/lxc/lxd/lxd/storage/drivers"
 	"github.com/lxc/lxd/shared"
-	"github.com/lxc/lxd/shared/api"
 	"github.com/lxc/lxd/shared/idmap"
-	"github.com/lxc/lxd/shared/ioprogress"
 	"github.com/lxc/lxd/shared/logger"
 	"github.com/lxc/lxd/shared/version"
 )
@@ -85,200 +78,6 @@ func readStoragePoolDriversCache() map[string]string {
 	return drivers.(map[string]string)
 }
 
-// storageType defines the type of a storage
-type storageType int
-
-const (
-	storageTypeCeph storageType = iota
-	storageTypeMock
-)
-
-var supportedStoragePoolDrivers = []string{"btrfs", "ceph", "cephfs", "dir", "lvm", "zfs"}
-
-func storageTypeToString(sType storageType) (string, error) {
-	switch sType {
-	case storageTypeCeph:
-		return "ceph", nil
-	case storageTypeMock:
-		return "mock", nil
-	}
-
-	return "", fmt.Errorf("Invalid storage type")
-}
-
-func storageStringToType(sName string) (storageType, error) {
-	switch sName {
-	case "ceph":
-		return storageTypeCeph, nil
-	case "mock":
-		return storageTypeMock, nil
-	}
-
-	return -1, fmt.Errorf("Invalid storage type name")
-}
-
-// The storage interface defines the functions needed to implement a storage
-// backend for a given storage driver.
-type storage interface {
-	// Functions dealing with basic driver properties only.
-	StorageCoreInit() error
-	GetStorageType() storageType
-	GetStorageTypeName() string
-	GetStorageTypeVersion() string
-	GetState() *state.State
-
-	// Functions dealing with storage pools.
-	StoragePoolInit() error
-	StoragePoolCheck() error
-	StoragePoolCreate() error
-	StoragePoolDelete() error
-	StoragePoolMount() (bool, error)
-	StoragePoolUmount() (bool, error)
-	StoragePoolResources() (*api.ResourcesStoragePool, error)
-	StoragePoolUpdate(writable *api.StoragePoolPut, changedConfig []string) error
-	GetStoragePoolWritable() api.StoragePoolPut
-	SetStoragePoolWritable(writable *api.StoragePoolPut)
-	GetStoragePool() *api.StoragePool
-
-	// Functions dealing with custom storage volumes.
-	StoragePoolVolumeCreate() error
-	StoragePoolVolumeDelete() error
-	StoragePoolVolumeMount() (bool, error)
-	StoragePoolVolumeUmount() (bool, error)
-	StoragePoolVolumeUpdate(writable *api.StorageVolumePut, changedConfig []string) error
-	StoragePoolVolumeRename(newName string) error
-	StoragePoolVolumeCopy(source *api.StorageVolumeSource) error
-	GetStoragePoolVolumeWritable() api.StorageVolumePut
-	SetStoragePoolVolumeWritable(writable *api.StorageVolumePut)
-	GetStoragePoolVolume() *api.StorageVolume
-
-	// Functions dealing with custom storage volume snapshots.
-	StoragePoolVolumeSnapshotCreate(target *api.StorageVolumeSnapshotsPost) error
-	StoragePoolVolumeSnapshotDelete() error
-	StoragePoolVolumeSnapshotRename(newName string) error
-
-	// Functions dealing with container storage volumes.
-	// ContainerCreate creates an empty container (no rootfs/metadata.yaml)
-	ContainerCreate(container instance.Instance) error
-
-	// ContainerCreateFromImage creates a container from a image.
-	ContainerCreateFromImage(c instance.Instance, fingerprint string, tracker *ioprogress.ProgressTracker) error
-	ContainerDelete(c instance.Instance) error
-	ContainerCopy(target instance.Instance, source instance.Instance, containerOnly bool) error
-	ContainerRefresh(target instance.Instance, source instance.Instance, snapshots []instance.Instance) error
-	ContainerMount(c instance.Instance) (bool, error)
-	ContainerUmount(c instance.Instance, path string) (bool, error)
-	ContainerRename(container instance.Instance, newName string) error
-	ContainerRestore(container instance.Instance, sourceContainer instance.Instance) error
-	ContainerGetUsage(container instance.Instance) (int64, error)
-	GetContainerPoolInfo() (int64, string, string)
-	ContainerStorageReady(container instance.Instance) bool
-
-	ContainerSnapshotCreate(target instance.Instance, source instance.Instance) error
-	ContainerSnapshotDelete(c instance.Instance) error
-	ContainerSnapshotRename(c instance.Instance, newName string) error
-	ContainerSnapshotStart(c instance.Instance) (bool, error)
-	ContainerSnapshotStop(c instance.Instance) (bool, error)
-
-	ContainerBackupCreate(path string, backup backup.Backup, sourceContainer instance.Instance) error
-	ContainerBackupLoad(info backup.Info, data io.ReadSeeker, tarArgs []string) error
-
-	// For use in migrating snapshots.
-	ContainerSnapshotCreateEmpty(c instance.Instance) error
-
-	// Functions dealing with image storage volumes.
-	ImageCreate(fingerprint string, tracker *ioprogress.ProgressTracker) error
-	ImageDelete(fingerprint string) error
-
-	// Storage type agnostic functions.
-	StorageEntitySetQuota(volumeType int, size int64, data interface{}) error
-
-	// Functions dealing with migration.
-	MigrationType() migration.MigrationFSType
-	// Does this storage backend preserve inodes when it is moved across LXD
-	// hosts?
-	PreservesInodes() bool
-
-	// Get the pieces required to migrate the source. This contains a list
-	// of the "object" (i.e. container or snapshot, depending on whether or
-	// not it is a snapshot name) to be migrated in order, and a channel
-	// for arguments of the specific migration command. We use a channel
-	// here so we don't have to invoke `zfs send` or `rsync` or whatever
-	// and keep its stdin/stdout open for each snapshot during the course
-	// of migration, we can do it lazily.
-	//
-	// N.B. that the order here important: e.g. in btrfs/zfs, snapshots
-	// which are parents of other snapshots should be sent first, to save
-	// as much transfer as possible. However, the base container is always
-	// sent as the first object, since that is the grandparent of every
-	// snapshot.
-	//
-	// We leave sending containers which are snapshots of other containers
-	// already present on the target instance as an exercise for the
-	// enterprising developer.
-	MigrationSource(args MigrationSourceArgs) (MigrationStorageSourceDriver, error)
-	MigrationSink(conn *websocket.Conn, op *operations.Operation, args MigrationSinkArgs) error
-
-	StorageMigrationSource(args MigrationSourceArgs) (MigrationStorageSourceDriver, error)
-	StorageMigrationSink(conn *websocket.Conn, op *operations.Operation, args MigrationSinkArgs) error
-}
-
-func storageInit(s *state.State, project, poolName, volumeName string, volumeType int) (storage, error) {
-	// Load the storage pool.
-	poolID, pool, err := s.Cluster.StoragePoolGet(poolName)
-	if err != nil {
-		return nil, errors.Wrapf(err, "Load storage pool %q", poolName)
-	}
-
-	driver := pool.Driver
-	if driver == "" {
-		// This shouldn't actually be possible but better safe than
-		// sorry.
-		return nil, fmt.Errorf("no storage driver was provided")
-	}
-
-	// Load the storage volume.
-	volume := &api.StorageVolume{}
-	if volumeName != "" {
-		_, volume, err = s.Cluster.StoragePoolNodeVolumeGetTypeByProject(project, volumeName, volumeType, poolID)
-		if err != nil {
-			return nil, err
-		}
-	}
-
-	sType, err := storageStringToType(driver)
-	if err != nil {
-		return nil, err
-	}
-
-	switch sType {
-	case storageTypeCeph:
-		ceph := storageCeph{}
-		ceph.poolID = poolID
-		ceph.pool = pool
-		ceph.volume = volume
-		ceph.s = s
-		err = ceph.StoragePoolInit()
-		if err != nil {
-			return nil, err
-		}
-		return &ceph, nil
-	case storageTypeMock:
-		mock := storageMock{}
-		mock.poolID = poolID
-		mock.pool = pool
-		mock.volume = volume
-		mock.s = s
-		err = mock.StoragePoolInit()
-		if err != nil {
-			return nil, err
-		}
-		return &mock, nil
-	}
-
-	return nil, fmt.Errorf("invalid storage type")
-}
-
 func storagePoolVolumeAttachPrepare(s *state.State, poolName string, volumeName string, volumeType int, c *containerLXC) error {
 	// Load the DB records
 	poolID, pool, err := s.Cluster.StoragePoolGet(poolName)
@@ -439,21 +238,6 @@ func storagePoolVolumeAttachPrepare(s *state.State, poolName string, volumeName
 	return nil
 }
 
-func storagePoolVolumeInit(s *state.State, project, poolName, volumeName string, volumeType int) (storage, error) {
-	// No need to detect storage here, its a new container.
-	return storageInit(s, project, poolName, volumeName, volumeType)
-}
-
-func storagePoolVolumeContainerLoadInit(s *state.State, project, containerName string) (storage, error) {
-	// Get the storage pool of a given container.
-	poolName, err := s.Cluster.InstancePool(project, containerName)
-	if err != nil {
-		return nil, errors.Wrapf(err, "Load storage pool for container %q in project %q", containerName, project)
-	}
-
-	return storagePoolVolumeInit(s, project, poolName, containerName, storagePoolVolumeTypeContainer)
-}
-
 func deleteContainerMountpoint(mountPoint string, mountPointSymlink string, storageTypeName string) error {
 	if shared.PathExists(mountPointSymlink) {
 		err := os.Remove(mountPointSymlink)

From 6103d0c508a53c3089505c82bbc1d421b490d278 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Thu, 27 Feb 2020 14:58:06 +0000
Subject: [PATCH 09/13] lxd/storage/drivers/load: Adds AllDriverNames

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/storage/drivers/load.go | 12 +++++++++++-
 1 file changed, 11 insertions(+), 1 deletion(-)

diff --git a/lxd/storage/drivers/load.go b/lxd/storage/drivers/load.go
index 6c5060a704..f661dd98e4 100644
--- a/lxd/storage/drivers/load.go
+++ b/lxd/storage/drivers/load.go
@@ -48,7 +48,7 @@ func Load(state *state.State, driverName string, name string, config map[string]
 
 // SupportedDrivers returns a list of supported storage drivers.
 func SupportedDrivers(s *state.State) []Info {
-	supportedDrivers := []Info{}
+	supportedDrivers := make([]Info, 0, len(drivers))
 
 	for driverName := range drivers {
 		driver, err := Load(s, driverName, "", nil, nil, nil, nil)
@@ -61,3 +61,13 @@ func SupportedDrivers(s *state.State) []Info {
 
 	return supportedDrivers
 }
+
+// AllDriverNames returns a list of all storage driver names.
+func AllDriverNames() []string {
+	supportDriverNames := make([]string, 0, len(drivers))
+	for driverName := range drivers {
+		supportDriverNames = append(supportDriverNames, driverName)
+	}
+
+	return supportDriverNames
+}

From bfd16b0a82d7ecdc00dea2a1d212243efaae005a Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Thu, 27 Feb 2020 14:58:25 +0000
Subject: [PATCH 10/13] lxd/storage/migration: Removes unused functions

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/storage_migration.go | 198 ---------------------------------------
 1 file changed, 198 deletions(-)

diff --git a/lxd/storage_migration.go b/lxd/storage_migration.go
index c44e1da8c3..46ad741639 100644
--- a/lxd/storage_migration.go
+++ b/lxd/storage_migration.go
@@ -1,152 +1,15 @@
 package main
 
 import (
-	"fmt"
 	"time"
 
-	"github.com/gorilla/websocket"
-
 	"github.com/lxc/lxd/lxd/db"
 	deviceConfig "github.com/lxc/lxd/lxd/device/config"
-	"github.com/lxc/lxd/lxd/instance"
 	"github.com/lxc/lxd/lxd/instance/instancetype"
 	"github.com/lxc/lxd/lxd/migration"
-	"github.com/lxc/lxd/lxd/operations"
-	"github.com/lxc/lxd/lxd/project"
-	"github.com/lxc/lxd/lxd/rsync"
-	driver "github.com/lxc/lxd/lxd/storage"
 	"github.com/lxc/lxd/shared"
-	"github.com/lxc/lxd/shared/api"
-	"github.com/lxc/lxd/shared/logger"
 )
 
-// MigrationStorageSourceDriver defines the functions needed to implement a
-// migration source driver.
-type MigrationStorageSourceDriver interface {
-	/* send any bits of the container/snapshots that are possible while the
-	 * container is still running.
-	 */
-	SendWhileRunning(conn *websocket.Conn, op *operations.Operation, bwlimit string, containerOnly bool) error
-
-	/* send the final bits (e.g. a final delta snapshot for zfs, btrfs, or
-	 * do a final rsync) of the fs after the container has been
-	 * checkpointed. This will only be called when a container is actually
-	 * being live migrated.
-	 */
-	SendAfterCheckpoint(conn *websocket.Conn, bwlimit string) error
-
-	/* Called after either success or failure of a migration, can be used
-	 * to clean up any temporary snapshots, etc.
-	 */
-	Cleanup()
-
-	SendStorageVolume(conn *websocket.Conn, op *operations.Operation, bwlimit string, storage storage, volumeOnly bool) error
-}
-
-type rsyncStorageSourceDriver struct {
-	container     instance.Instance
-	snapshots     []instance.Instance
-	rsyncFeatures []string
-}
-
-func (s rsyncStorageSourceDriver) SendStorageVolume(conn *websocket.Conn, op *operations.Operation, bwlimit string, storage storage, volumeOnly bool) error {
-	ourMount, err := storage.StoragePoolVolumeMount()
-	if err != nil {
-		return err
-	}
-	if ourMount {
-		defer storage.StoragePoolVolumeUmount()
-	}
-
-	state := storage.GetState()
-	pool := storage.GetStoragePool()
-	volume := storage.GetStoragePoolVolume()
-
-	if !volumeOnly {
-		snapshots, err := driver.VolumeSnapshotsGet(state, pool.Name, volume.Name, storagePoolVolumeTypeCustom)
-		if err != nil {
-			return err
-		}
-
-		for _, snap := range snapshots {
-			wrapper := migration.ProgressTracker(op, "fs_progress", snap.Name)
-			path := driver.GetStoragePoolVolumeSnapshotMountPoint(pool.Name, snap.Name)
-			path = shared.AddSlash(path)
-			logger.Debugf("Starting to send storage volume snapshot %s on storage pool %s from %s", snap.Name, pool.Name, path)
-
-			err = rsync.Send(volume.Name, path, &shared.WebsocketIO{Conn: conn}, wrapper, s.rsyncFeatures, bwlimit, state.OS.ExecPath)
-			if err != nil {
-				return err
-			}
-		}
-	}
-
-	wrapper := migration.ProgressTracker(op, "fs_progress", volume.Name)
-	path := driver.GetStoragePoolVolumeMountPoint(pool.Name, volume.Name)
-	path = shared.AddSlash(path)
-	logger.Debugf("Starting to send storage volume %s on storage pool %s from %s", volume.Name, pool.Name, path)
-	err = rsync.Send(volume.Name, path, &shared.WebsocketIO{Conn: conn}, wrapper, s.rsyncFeatures, bwlimit, state.OS.ExecPath)
-	if err != nil {
-		return err
-	}
-
-	return nil
-}
-
-func (s rsyncStorageSourceDriver) SendWhileRunning(conn *websocket.Conn, op *operations.Operation, bwlimit string, containerOnly bool) error {
-	ctName, _, _ := shared.InstanceGetParentAndSnapshotName(s.container.Name())
-
-	if !containerOnly {
-		for _, send := range s.snapshots {
-			ourStart, err := send.StorageStart()
-			if err != nil {
-				return err
-			}
-			if ourStart {
-				defer send.StorageStop()
-			}
-
-			path := send.Path()
-			wrapper := migration.ProgressTracker(op, "fs_progress", send.Name())
-			state := s.container.DaemonState()
-			err = rsync.Send(project.Prefix(s.container.Project(), ctName), shared.AddSlash(path), &shared.WebsocketIO{Conn: conn}, wrapper, s.rsyncFeatures, bwlimit, state.OS.ExecPath)
-			if err != nil {
-				return err
-			}
-		}
-	}
-
-	wrapper := migration.ProgressTracker(op, "fs_progress", s.container.Name())
-	state := s.container.DaemonState()
-
-	// Attempt to freeze the container to avoid changing files during transfer
-	if s.container.IsRunning() {
-		err := s.container.Freeze()
-		if err != nil {
-			logger.Errorf("Unable to freeze container during live-migration")
-		} else {
-			defer s.container.Unfreeze()
-		}
-	}
-
-	return rsync.Send(project.Prefix(s.container.Project(), ctName), shared.AddSlash(s.container.Path()), &shared.WebsocketIO{Conn: conn}, wrapper, s.rsyncFeatures, bwlimit, state.OS.ExecPath)
-}
-
-func (s rsyncStorageSourceDriver) SendAfterCheckpoint(conn *websocket.Conn, bwlimit string) error {
-	ctName, _, _ := shared.InstanceGetParentAndSnapshotName(s.container.Name())
-	// resync anything that changed between our first send and the checkpoint
-	state := s.container.DaemonState()
-	return rsync.Send(project.Prefix(s.container.Project(), ctName), shared.AddSlash(s.container.Path()), &shared.WebsocketIO{Conn: conn}, nil, s.rsyncFeatures, bwlimit, state.OS.ExecPath)
-}
-
-func (s rsyncStorageSourceDriver) Cleanup() {
-	// noop
-}
-
-func rsyncStorageMigrationSource(args MigrationSourceArgs) (MigrationStorageSourceDriver, error) {
-	return rsyncStorageSourceDriver{nil, nil, args.RsyncFeatures}, nil
-}
-
 func snapshotProtobufToInstanceArgs(project string, containerName string, snap *migration.Snapshot) db.InstanceArgs {
 	config := map[string]string{}
 
@@ -188,64 +51,3 @@ func snapshotProtobufToInstanceArgs(project string, containerName string, snap *
 
 	return args
 }
-
-func rsyncStorageMigrationSink(conn *websocket.Conn, op *operations.Operation, args MigrationSinkArgs) error {
-	err := args.Storage.StoragePoolVolumeCreate()
-	if err != nil {
-		return err
-	}
-
-	ourMount, err := args.Storage.StoragePoolVolumeMount()
-	if err != nil {
-		return err
-	}
-	if ourMount {
-		defer args.Storage.StoragePoolVolumeUmount()
-	}
-
-	pool := args.Storage.GetStoragePool()
-	volume := args.Storage.GetStoragePoolVolume()
-
-	if !args.VolumeOnly {
-		for _, snap := range args.Snapshots {
-			target := api.StorageVolumeSnapshotsPost{
-				Name: fmt.Sprintf("%s/%s", volume.Name, *snap.Name),
-			}
-
-			dbArgs := &db.StorageVolumeArgs{
-				Name:        fmt.Sprintf("%s/%s", volume.Name, *snap.Name),
-				PoolName:    pool.Name,
-				TypeName:    volume.Type,
-				Snapshot:    true,
-				Config:      volume.Config,
-				Description: volume.Description,
-			}
-
-			_, err = storagePoolVolumeSnapshotDBCreateInternal(args.Storage.GetState(), dbArgs)
-			if err != nil {
-				return err
-			}
-
-			wrapper := migration.ProgressTracker(op, "fs_progress", target.Name)
-			path := driver.GetStoragePoolVolumeMountPoint(pool.Name, volume.Name)
-			path = shared.AddSlash(path)
-			logger.Debugf("Starting to receive storage volume snapshot %s on storage pool %s into %s", target.Name, pool.Name, path)
-
-			err = rsync.Recv(path, &shared.WebsocketIO{Conn: conn}, wrapper, args.RsyncFeatures)
-			if err != nil {
-				return err
-			}
-
-			err = args.Storage.StoragePoolVolumeSnapshotCreate(&target)
-			if err != nil {
-				return err
-			}
-		}
-	}
-
-	wrapper := migration.ProgressTracker(op, "fs_progress", volume.Name)
-	path := driver.GetStoragePoolVolumeMountPoint(pool.Name, volume.Name)
-	path = shared.AddSlash(path)
-	logger.Debugf("Starting to receive storage volume %s on storage pool %s into %s", volume.Name, pool.Name, path)
-	return rsync.Recv(path, &shared.WebsocketIO{Conn: conn}, wrapper, args.RsyncFeatures)
-}

From fcefac9a6cba5afd920196dbd428fbb49fee0e56 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Thu, 27 Feb 2020 14:58:44 +0000
Subject: [PATCH 11/13] lxd/storage/pools/config: Removes ref to
 supportedStoragePoolDrivers

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/storage_pools_config.go | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/lxd/storage_pools_config.go b/lxd/storage_pools_config.go
index e5b04e742d..ce1ca3d54e 100644
--- a/lxd/storage_pools_config.go
+++ b/lxd/storage_pools_config.go
@@ -7,6 +7,7 @@ import (
 
 	"golang.org/x/sys/unix"
 
+	storageDrivers "github.com/lxc/lxd/lxd/storage/drivers"
 	"github.com/lxc/lxd/shared"
 	"github.com/lxc/lxd/shared/units"
 )
@@ -118,7 +119,7 @@ var storagePoolConfigKeys = map[string]func(value string) error{
 
 func storagePoolValidateConfig(name string, driver string, config map[string]string, oldConfig map[string]string) error {
 	err := func(value string) error {
-		return shared.IsOneOf(value, supportedStoragePoolDrivers)
+		return shared.IsOneOf(value, storageDrivers.AllDriverNames())
 	}(driver)
 	if err != nil {
 		return err

From dfc0eb6ba5b1e1eb89dcc3aa56f0bb7268b36d7c Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Thu, 27 Feb 2020 14:59:01 +0000
Subject: [PATCH 12/13] lxd/storage/utils: Remove unused functions

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/storage_utils.go | 44 --------------------------------------------
 1 file changed, 44 deletions(-)

diff --git a/lxd/storage_utils.go b/lxd/storage_utils.go
index 5a37a6b9e1..b0464d6cae 100644
--- a/lxd/storage_utils.go
+++ b/lxd/storage_utils.go
@@ -6,53 +6,9 @@ import (
 
 	"golang.org/x/sys/unix"
 
-	"github.com/lxc/lxd/lxd/instance"
-	driver "github.com/lxc/lxd/lxd/storage"
 	"github.com/lxc/lxd/shared"
-	"github.com/lxc/lxd/shared/logger"
 )
 
-func shrinkVolumeFilesystem(s storage, volumeType int, fsType string, devPath string, mntpoint string, byteSize int64, data interface{}) (func() (bool, error), error) {
-	var cleanupFunc func() (bool, error)
-	switch fsType {
-	case "xfs":
-		logger.Errorf("XFS filesystems cannot be shrunk: dump, mkfs, and restore are required")
-		return nil, fmt.Errorf("xfs filesystems cannot be shrunk: dump, mkfs, and restore are required")
-	case "btrfs":
-		fallthrough
-	case "": // if not specified, default to ext4
-		fallthrough
-	case "ext4":
-		switch volumeType {
-		case storagePoolVolumeTypeContainer:
-			c := data.(instance.Instance)
-			ourMount, err := c.StorageStop()
-			if err != nil {
-				return nil, err
-			}
-			if !ourMount {
-				cleanupFunc = c.StorageStart
-			}
-		case storagePoolVolumeTypeCustom:
-			ourMount, err := s.StoragePoolVolumeUmount()
-			if err != nil {
-				return nil, err
-			}
-			if !ourMount {
-				cleanupFunc = s.StoragePoolVolumeMount
-			}
-		default:
-			return nil, fmt.Errorf(`Resizing not implemented for storage volume type %d`, volumeType)
-		}
-
-	default:
-		return nil, fmt.Errorf(`Shrinking not supported for filesystem type "%s"`, fsType)
-	}
-
-	err := driver.ShrinkFileSystem(fsType, devPath, mntpoint, byteSize)
-	return cleanupFunc, err
-}
-
 // MkfsOptions represents options for filesystem creation.
 type mkfsOptions struct {
 	Label string

From 53aa340a4e2fe8b201561d602191dc0ca2f2b990 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Thu, 27 Feb 2020 14:59:19 +0000
Subject: [PATCH 13/13] lxd/storage/volumes/utils: Removes unused function

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/storage_volumes_utils.go | 29 -----------------------------
 1 file changed, 29 deletions(-)

diff --git a/lxd/storage_volumes_utils.go b/lxd/storage_volumes_utils.go
index ec7103d87a..8569e23426 100644
--- a/lxd/storage_volumes_utils.go
+++ b/lxd/storage_volumes_utils.go
@@ -375,32 +375,3 @@ func profilesUsingPoolVolumeGetNames(db *db.Cluster, volumeName string, volumeTy
 
 	return usedBy, nil
 }
-
-func storagePoolVolumeSnapshotDBCreateInternal(state *state.State, dbArgs *db.StorageVolumeArgs) (storage, error) {
-	// Create database entry for new storage volume.
-	err := storagePools.VolumeDBCreate(state, "default", dbArgs.PoolName, dbArgs.Name, dbArgs.Description, dbArgs.TypeName, true, dbArgs.Config)
-	if err != nil {
-		return nil, err
-	}
-
-	// Convert the volume type name to our internal integer representation.
-	poolID, err := state.Cluster.StoragePoolGetID(dbArgs.PoolName)
-	if err != nil {
-		return nil, err
-	}
-
-	volumeType, err := storagePools.VolumeTypeNameToType(dbArgs.TypeName)
-	if err != nil {
-		state.Cluster.StoragePoolVolumeDelete("default", dbArgs.Name, volumeType, poolID)
-		return nil, err
-	}
-
-	// Initialize new storage volume on the target storage pool.
-	s, err := storagePoolVolumeInit(state, "default", dbArgs.PoolName, dbArgs.Name, volumeType)
-	if err != nil {
-		state.Cluster.StoragePoolVolumeDelete("default", dbArgs.Name, volumeType, poolID)
-		return nil, err
-	}
-
-	return s, nil
-}


More information about the lxc-devel mailing list