[lxc-devel] [lxd/master] Port the zfs driver

stgraber on Github lxc-bot at linuxcontainers.org
Fri Jan 10 04:56:33 UTC 2020


A non-text attachment was scrubbed...
Name: not available
Type: text/x-mailbox
Size: 301 bytes
Desc: not available
URL: <http://lists.linuxcontainers.org/pipermail/lxc-devel/attachments/20200109/d6000e2b/attachment-0001.bin>
-------------- next part --------------
From f614c8d69a174f08be7ba0fe4a0957b603d4b7f1 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?St=C3=A9phane=20Graber?= <stgraber at ubuntu.com>
Date: Thu, 9 Jan 2020 17:25:26 -0500
Subject: [PATCH 1/7] lxd/storage/drivers: Use standard errors
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Signed-off-by: Stéphane Graber <stgraber at ubuntu.com>
---
 lxd/storage/drivers/driver_btrfs_volumes.go  | 12 ++++++------
 lxd/storage/drivers/driver_cephfs_volumes.go |  8 ++++----
 lxd/storage/drivers/driver_dir_volumes.go    | 10 +++++-----
 3 files changed, 15 insertions(+), 15 deletions(-)

diff --git a/lxd/storage/drivers/driver_btrfs_volumes.go b/lxd/storage/drivers/driver_btrfs_volumes.go
index c8826b447f..80e26bf1ee 100644
--- a/lxd/storage/drivers/driver_btrfs_volumes.go
+++ b/lxd/storage/drivers/driver_btrfs_volumes.go
@@ -240,14 +240,14 @@ func (d *btrfs) CreateVolumeFromCopy(vol Volume, srcVol Volume, copySnapshots bo
 // CreateVolumeFromMigration creates a volume being sent via a migration.
 func (d *btrfs) CreateVolumeFromMigration(vol Volume, conn io.ReadWriteCloser, volTargetArgs migration.VolumeTargetArgs, preFiller *VolumeFiller, op *operations.Operation) error {
 	if vol.contentType != ContentTypeFS {
-		return fmt.Errorf("Content type not supported")
+		return ErrNotSupported
 	}
 
 	// Handle simple rsync through generic.
 	if volTargetArgs.MigrationType.FSType == migration.MigrationFSType_RSYNC {
 		return genericCreateVolumeFromMigration(d, nil, vol, conn, volTargetArgs, preFiller, op)
 	} else if volTargetArgs.MigrationType.FSType != migration.MigrationFSType_BTRFS {
-		return fmt.Errorf("Migration type not supported")
+		return ErrNotSupported
 	}
 
 	// Handle btrfs send/receive migration.
@@ -349,11 +349,11 @@ func (d *btrfs) ValidateVolume(vol Volume, removeUnknownKeys bool) error {
 // UpdateVolume applies config changes to the volume.
 func (d *btrfs) UpdateVolume(vol Volume, changedConfig map[string]string) error {
 	if vol.contentType != ContentTypeFS {
-		return fmt.Errorf("Content type not supported")
+		return ErrNotSupported
 	}
 
 	if vol.volType != VolumeTypeCustom {
-		return fmt.Errorf("Volume type not supported")
+		return ErrNotSupported
 	}
 
 	return d.SetVolumeQuota(vol, vol.config["size"], nil)
@@ -476,14 +476,14 @@ func (d *btrfs) RenameVolume(vol Volume, newVolName string, op *operations.Opera
 // MigrateVolume sends a volume for migration.
 func (d *btrfs) MigrateVolume(vol Volume, conn io.ReadWriteCloser, volSrcArgs migration.VolumeSourceArgs, op *operations.Operation) error {
 	if vol.contentType != ContentTypeFS {
-		return fmt.Errorf("Content type not supported")
+		return ErrNotSupported
 	}
 
 	// Handle simple rsync through generic.
 	if volSrcArgs.MigrationType.FSType == migration.MigrationFSType_RSYNC {
 		return d.vfsMigrateVolume(vol, conn, volSrcArgs, op)
 	} else if volSrcArgs.MigrationType.FSType != migration.MigrationFSType_BTRFS {
-		return fmt.Errorf("Migration type not supported")
+		return ErrNotSupported
 	}
 
 	// Handle btrfs send/receive migration.
diff --git a/lxd/storage/drivers/driver_cephfs_volumes.go b/lxd/storage/drivers/driver_cephfs_volumes.go
index a1cb7ebde9..857ea0ce96 100644
--- a/lxd/storage/drivers/driver_cephfs_volumes.go
+++ b/lxd/storage/drivers/driver_cephfs_volumes.go
@@ -18,11 +18,11 @@ import (
 // CreateVolume creates a new storage volume on disk.
 func (d *cephfs) CreateVolume(vol Volume, filler *VolumeFiller, op *operations.Operation) error {
 	if vol.volType != VolumeTypeCustom {
-		return fmt.Errorf("Volume type not supported")
+		return ErrNotSupported
 	}
 
 	if vol.contentType != ContentTypeFS {
-		return fmt.Errorf("Content type not supported")
+		return ErrNotSupported
 	}
 
 	// Create the main volume path.
@@ -141,7 +141,7 @@ func (d *cephfs) CreateVolumeFromCopy(vol Volume, srcVol Volume, copySnapshots b
 // CreateVolumeFromMigration creates a new volume (with or without snapshots) from a migration data stream.
 func (d *cephfs) CreateVolumeFromMigration(vol Volume, conn io.ReadWriteCloser, volTargetArgs migration.VolumeTargetArgs, preFiller *VolumeFiller, op *operations.Operation) error {
 	if volTargetArgs.MigrationType.FSType != migration.MigrationFSType_RSYNC {
-		return fmt.Errorf("Migration type not supported")
+		return ErrNotSupported
 	}
 
 	// Create the main volume path.
@@ -427,7 +427,7 @@ func (d *cephfs) RenameVolume(vol Volume, newName string, op *operations.Operati
 // MigrateVolume streams the volume (with or without snapshots)
 func (d *cephfs) MigrateVolume(vol Volume, conn io.ReadWriteCloser, volSrcArgs migration.VolumeSourceArgs, op *operations.Operation) error {
 	if volSrcArgs.MigrationType.FSType != migration.MigrationFSType_RSYNC {
-		return fmt.Errorf("Migration type not supported")
+		return ErrNotSupported
 	}
 
 	return d.vfsMigrateVolume(vol, conn, volSrcArgs, op)
diff --git a/lxd/storage/drivers/driver_dir_volumes.go b/lxd/storage/drivers/driver_dir_volumes.go
index 67df0ffe44..3a1c8d92b0 100644
--- a/lxd/storage/drivers/driver_dir_volumes.go
+++ b/lxd/storage/drivers/driver_dir_volumes.go
@@ -118,11 +118,11 @@ func (d *dir) CreateVolumeFromCopy(vol Volume, srcVol Volume, copySnapshots bool
 // CreateVolumeFromMigration creates a volume being sent via a migration.
 func (d *dir) CreateVolumeFromMigration(vol Volume, conn io.ReadWriteCloser, volTargetArgs migration.VolumeTargetArgs, preFiller *VolumeFiller, op *operations.Operation) error {
 	if vol.contentType != ContentTypeFS {
-		return fmt.Errorf("Content type not supported")
+		return ErrNotSupported
 	}
 
 	if volTargetArgs.MigrationType.FSType != migration.MigrationFSType_RSYNC {
-		return fmt.Errorf("Migration type not supported")
+		return ErrNotSupported
 	}
 
 	return genericCreateVolumeFromMigration(d, d.setupInitialQuota, vol, conn, volTargetArgs, preFiller, op)
@@ -193,7 +193,7 @@ func (d *dir) ValidateVolume(vol Volume, removeUnknownKeys bool) error {
 // UpdateVolume applies config changes to the volume.
 func (d *dir) UpdateVolume(vol Volume, changedConfig map[string]string) error {
 	if vol.contentType != ContentTypeFS {
-		return fmt.Errorf("Content type not supported")
+		return ErrNotSupported
 	}
 
 	if _, changed := changedConfig["size"]; changed {
@@ -268,11 +268,11 @@ func (d *dir) RenameVolume(vol Volume, newVolName string, op *operations.Operati
 // MigrateVolume sends a volume for migration.
 func (d *dir) MigrateVolume(vol Volume, conn io.ReadWriteCloser, volSrcArgs migration.VolumeSourceArgs, op *operations.Operation) error {
 	if vol.contentType != ContentTypeFS {
-		return fmt.Errorf("Content type not supported")
+		return ErrNotSupported
 	}
 
 	if volSrcArgs.MigrationType.FSType != migration.MigrationFSType_RSYNC {
-		return fmt.Errorf("Migration type not supported")
+		return ErrNotSupported
 	}
 
 	return d.vfsMigrateVolume(vol, conn, volSrcArgs, op)

From c717f330669350d5aa1273b16d5796c7b74f1205 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?St=C3=A9phane=20Graber?= <stgraber at ubuntu.com>
Date: Thu, 9 Jan 2020 17:45:25 -0500
Subject: [PATCH 2/7] lxd/storage/btrfs: Disable send/receive inside containers
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Signed-off-by: Stéphane Graber <stgraber at ubuntu.com>
---
 lxd/storage/drivers/driver_btrfs.go | 6 ++----
 1 file changed, 2 insertions(+), 4 deletions(-)

diff --git a/lxd/storage/drivers/driver_btrfs.go b/lxd/storage/drivers/driver_btrfs.go
index 3f0de22616..e51abbd710 100644
--- a/lxd/storage/drivers/driver_btrfs.go
+++ b/lxd/storage/drivers/driver_btrfs.go
@@ -347,10 +347,8 @@ func (d *btrfs) MigrationTypes(contentType ContentType, refresh bool) []migratio
 		return nil
 	}
 
-	// When performing a refresh, always use rsync. Using btrfs send/receive
-	// here doesn't make sense since it would need to send everything again
-	// which defeats the purpose of a refresh.
-	if refresh {
+	// Only use rsync for refreshes and if running in an unprivileged container.
+	if refresh || d.state.OS.RunningInUserNS {
 		return []migration.Type{
 			{
 				FSType:   migration.MigrationFSType_RSYNC,

From 1366065521a649028279dd6b7f1241c7cb50d16f Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?St=C3=A9phane=20Graber?= <stgraber at ubuntu.com>
Date: Thu, 9 Jan 2020 16:59:07 -0500
Subject: [PATCH 3/7] lxd/init: Support new storage drivers
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Signed-off-by: Stéphane Graber <stgraber at ubuntu.com>
---
 lxd/main_init.go | 27 +++++++++++++++++++++------
 1 file changed, 21 insertions(+), 6 deletions(-)

diff --git a/lxd/main_init.go b/lxd/main_init.go
index a0a26cb157..00a4a574a5 100644
--- a/lxd/main_init.go
+++ b/lxd/main_init.go
@@ -7,6 +7,9 @@ import (
 	"github.com/spf13/cobra"
 
 	"github.com/lxc/lxd/client"
+	"github.com/lxc/lxd/lxd/state"
+	storageDrivers "github.com/lxc/lxd/lxd/storage/drivers"
+	"github.com/lxc/lxd/lxd/sys"
 	"github.com/lxc/lxd/lxd/util"
 	"github.com/lxc/lxd/shared"
 )
@@ -163,6 +166,14 @@ func (c *cmdInit) availableStorageDrivers(poolType string) []string {
 		backingFs = "dir"
 	}
 
+	// Get info for new drivers.
+	s := state.NewState(nil, nil, nil, sys.DefaultOS(), nil, nil, nil, nil, nil)
+	info := storageDrivers.SupportedDrivers(s)
+	availableDrivers := []string{}
+	for _, entry := range info {
+		availableDrivers = append(availableDrivers, entry.Name)
+	}
+
 	// Check available backends
 	for _, driver := range supportedStoragePoolDrivers {
 		if poolType == "remote" && !shared.StringInSlice(driver, []string{"ceph", "cephfs"}) {
@@ -182,19 +193,23 @@ func (c *cmdInit) availableStorageDrivers(poolType string) []string {
 			continue
 		}
 
-		// btrfs can work in user namespaces too. (If
-		// source=/some/path/on/btrfs is used.)
+		// btrfs can work in user namespaces too. (If source=/some/path/on/btrfs is used.)
 		if shared.RunningInUserNS() && (backingFs != "btrfs" || driver != "btrfs") {
 			continue
 		}
 
-		// Initialize a core storage interface for the given driver.
-		_, err := storageCoreInit(driver)
-		if err != nil {
+		// Check if available as a new style driver.
+		if shared.StringInSlice(driver, availableDrivers) {
+			drivers = append(drivers, driver)
 			continue
 		}
 
-		drivers = append(drivers, driver)
+		// Check if available as an old style driver.
+		_, err := storageCoreInit(driver)
+		if err == nil {
+			drivers = append(drivers, driver)
+			continue
+		}
 	}
 
 	return drivers

From e93c05491c7ba7d02daba3e738bd23817ae49d41 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?St=C3=A9phane=20Graber?= <stgraber at ubuntu.com>
Date: Thu, 9 Jan 2020 23:04:24 -0500
Subject: [PATCH 4/7] lxd/migration: Improve multi-pass transfers
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

When dealing with multi-pass transfers of the main volume, it is useful
to the drivers to know whether this will happen and if so, have a way to
persist some state between both stages.

Signed-off-by: Stéphane Graber <stgraber at ubuntu.com>
---
 lxd/migrate_container.go           | 20 +++++++-------------
 lxd/migration/migration_volumes.go |  2 ++
 2 files changed, 9 insertions(+), 13 deletions(-)

diff --git a/lxd/migrate_container.go b/lxd/migrate_container.go
index 3dfd73f50f..53c4f17461 100644
--- a/lxd/migrate_container.go
+++ b/lxd/migrate_container.go
@@ -498,6 +498,7 @@ func (s *migrationSourceWs) Do(state *state.State, migrateOp *operations.Operati
 		return err
 	}
 
+	volSourceArgs := migration.VolumeSourceArgs{}
 	if pool != nil {
 		rsyncBwlimit = pool.Driver().Config()["rsync.bwlimit"]
 		migrationType, err = migration.MatchTypes(respHeader, migration.MigrationFSType_RSYNC, poolMigrationTypes)
@@ -513,13 +514,11 @@ func (s *migrationSourceWs) Do(state *state.State, migrateOp *operations.Operati
 			sendSnapshotNames = respHeader.GetSnapshotNames()
 		}
 
-		volSourceArgs := migration.VolumeSourceArgs{
-			Name:          s.instance.Name(),
-			MigrationType: migrationType,
-			Snapshots:     sendSnapshotNames,
-			TrackProgress: true,
-			FinalSync:     false,
-		}
+		volSourceArgs.Name = s.instance.Name()
+		volSourceArgs.MigrationType = migrationType
+		volSourceArgs.Snapshots = sendSnapshotNames
+		volSourceArgs.TrackProgress = true
+		volSourceArgs.MultiSync = s.live || (respHeader.Criu != nil && *respHeader.Criu == migration.CRIUType_NONE)
 
 		err = pool.MigrateInstance(s.instance, &shared.WebsocketIO{Conn: s.fsConn}, volSourceArgs, migrateOp)
 		if err != nil {
@@ -752,12 +751,7 @@ func (s *migrationSourceWs) Do(state *state.State, migrateOp *operations.Operati
 	// to minimize downtime.
 	if s.live || (respHeader.Criu != nil && *respHeader.Criu == migration.CRIUType_NONE) {
 		if pool != nil {
-			volSourceArgs := migration.VolumeSourceArgs{
-				Name:          s.instance.Name(),
-				MigrationType: migrationType,
-				TrackProgress: true,
-				FinalSync:     true,
-			}
+			volSourceArgs.FinalSync = true
 
 			err = pool.MigrateInstance(s.instance, &shared.WebsocketIO{Conn: s.fsConn}, volSourceArgs, migrateOp)
 			if err != nil {
diff --git a/lxd/migration/migration_volumes.go b/lxd/migration/migration_volumes.go
index 883afc3f86..4ccb7e8944 100644
--- a/lxd/migration/migration_volumes.go
+++ b/lxd/migration/migration_volumes.go
@@ -23,7 +23,9 @@ type VolumeSourceArgs struct {
 	Snapshots     []string
 	MigrationType Type
 	TrackProgress bool
+	MultiSync     bool
 	FinalSync     bool
+	Data          interface{}
 }
 
 // VolumeTargetArgs represents the arguments needed to setup a volume migration sink.

From 9eb762b2a626b926bea5b9db81177115a94294e2 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?St=C3=A9phane=20Graber?= <stgraber at ubuntu.com>
Date: Thu, 9 Jan 2020 23:16:01 -0500
Subject: [PATCH 5/7] lxd/storage: Pass VolumeSourceArgs as pointer
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Signed-off-by: Stéphane Graber <stgraber at ubuntu.com>
---
 lxd/migrate_container.go                     |  2 +-
 lxd/migrate_storage_volumes.go               |  2 +-
 lxd/storage/backend_lxd.go                   | 10 +++++-----
 lxd/storage/backend_mock.go                  |  4 ++--
 lxd/storage/drivers/driver_btrfs_volumes.go  |  2 +-
 lxd/storage/drivers/driver_cephfs_volumes.go |  2 +-
 lxd/storage/drivers/driver_common.go         |  2 +-
 lxd/storage/drivers/driver_dir_volumes.go    |  2 +-
 lxd/storage/drivers/interface.go             |  2 +-
 lxd/storage/pool_interface.go                |  4 ++--
 10 files changed, 16 insertions(+), 16 deletions(-)

diff --git a/lxd/migrate_container.go b/lxd/migrate_container.go
index 53c4f17461..74d6235652 100644
--- a/lxd/migrate_container.go
+++ b/lxd/migrate_container.go
@@ -498,7 +498,7 @@ func (s *migrationSourceWs) Do(state *state.State, migrateOp *operations.Operati
 		return err
 	}
 
-	volSourceArgs := migration.VolumeSourceArgs{}
+	volSourceArgs := &migration.VolumeSourceArgs{}
 	if pool != nil {
 		rsyncBwlimit = pool.Driver().Config()["rsync.bwlimit"]
 		migrationType, err = migration.MatchTypes(respHeader, migration.MigrationFSType_RSYNC, poolMigrationTypes)
diff --git a/lxd/migrate_storage_volumes.go b/lxd/migrate_storage_volumes.go
index f78ffa2119..553ba0bff8 100644
--- a/lxd/migrate_storage_volumes.go
+++ b/lxd/migrate_storage_volumes.go
@@ -153,7 +153,7 @@ func (s *migrationSourceWs) DoStorage(state *state.State, poolName string, volNa
 			return err
 		}
 
-		volSourceArgs := migration.VolumeSourceArgs{
+		volSourceArgs := &migration.VolumeSourceArgs{
 			Name:          volName,
 			MigrationType: migrationType,
 			Snapshots:     snapshotNames,
diff --git a/lxd/storage/backend_lxd.go b/lxd/storage/backend_lxd.go
index 41409f25eb..90b775c090 100644
--- a/lxd/storage/backend_lxd.go
+++ b/lxd/storage/backend_lxd.go
@@ -609,7 +609,7 @@ func (b *lxdBackend) CreateInstanceFromCopy(inst instance.Instance, src instance
 		aEndErrCh := make(chan error, 1)
 		bEndErrCh := make(chan error, 1)
 		go func() {
-			err := srcPool.MigrateInstance(src, aEnd, migration.VolumeSourceArgs{
+			err := srcPool.MigrateInstance(src, aEnd, &migration.VolumeSourceArgs{
 				Name:          src.Name(),
 				Snapshots:     snapshotNames,
 				MigrationType: migrationType,
@@ -750,7 +750,7 @@ func (b *lxdBackend) RefreshInstance(inst instance.Instance, src instance.Instan
 		aEndErrCh := make(chan error, 1)
 		bEndErrCh := make(chan error, 1)
 		go func() {
-			err := srcPool.MigrateInstance(src, aEnd, migration.VolumeSourceArgs{
+			err := srcPool.MigrateInstance(src, aEnd, &migration.VolumeSourceArgs{
 				Name:          src.Name(),
 				Snapshots:     snapshotNames,
 				MigrationType: migrationType,
@@ -1284,7 +1284,7 @@ func (b *lxdBackend) UpdateInstanceSnapshot(inst instance.Instance, newDesc stri
 
 // MigrateInstance sends an instance volume for migration.
 // The args.Name field is ignored and the name of the instance is used instead.
-func (b *lxdBackend) MigrateInstance(inst instance.Instance, conn io.ReadWriteCloser, args migration.VolumeSourceArgs, op *operations.Operation) error {
+func (b *lxdBackend) MigrateInstance(inst instance.Instance, conn io.ReadWriteCloser, args *migration.VolumeSourceArgs, op *operations.Operation) error {
 	logger := logging.AddContext(b.logger, log.Ctx{"project": inst.Project(), "instance": inst.Name(), "args": args})
 	logger.Debug("MigrateInstance started")
 	defer logger.Debug("MigrateInstance finished")
@@ -2143,7 +2143,7 @@ func (b *lxdBackend) CreateCustomVolumeFromCopy(volName, desc string, config map
 	aEndErrCh := make(chan error, 1)
 	bEndErrCh := make(chan error, 1)
 	go func() {
-		err := srcPool.MigrateCustomVolume(aEnd, migration.VolumeSourceArgs{
+		err := srcPool.MigrateCustomVolume(aEnd, &migration.VolumeSourceArgs{
 			Name:          srcVolName,
 			Snapshots:     snapshotNames,
 			MigrationType: migrationType,
@@ -2188,7 +2188,7 @@ func (b *lxdBackend) CreateCustomVolumeFromCopy(volName, desc string, config map
 }
 
 // MigrateCustomVolume sends a volume for migration.
-func (b *lxdBackend) MigrateCustomVolume(conn io.ReadWriteCloser, args migration.VolumeSourceArgs, op *operations.Operation) error {
+func (b *lxdBackend) MigrateCustomVolume(conn io.ReadWriteCloser, args *migration.VolumeSourceArgs, op *operations.Operation) error {
 	logger := logging.AddContext(b.logger, log.Ctx{"volName": args.Name, "args": args})
 	logger.Debug("MigrateCustomVolume started")
 	defer logger.Debug("MigrateCustomVolume finished")
diff --git a/lxd/storage/backend_mock.go b/lxd/storage/backend_mock.go
index 84c907353e..c2eef25bb8 100644
--- a/lxd/storage/backend_mock.go
+++ b/lxd/storage/backend_mock.go
@@ -96,7 +96,7 @@ func (b *mockBackend) UpdateInstanceBackupFile(inst instance.Instance, op *opera
 	return nil
 }
 
-func (b *mockBackend) MigrateInstance(inst instance.Instance, conn io.ReadWriteCloser, args migration.VolumeSourceArgs, op *operations.Operation) error {
+func (b *mockBackend) MigrateInstance(inst instance.Instance, conn io.ReadWriteCloser, args *migration.VolumeSourceArgs, op *operations.Operation) error {
 	return nil
 }
 
@@ -188,7 +188,7 @@ func (b *mockBackend) DeleteCustomVolume(volName string, op *operations.Operatio
 	return nil
 }
 
-func (b *mockBackend) MigrateCustomVolume(conn io.ReadWriteCloser, args migration.VolumeSourceArgs, op *operations.Operation) error {
+func (b *mockBackend) MigrateCustomVolume(conn io.ReadWriteCloser, args *migration.VolumeSourceArgs, op *operations.Operation) error {
 	return nil
 }
 
diff --git a/lxd/storage/drivers/driver_btrfs_volumes.go b/lxd/storage/drivers/driver_btrfs_volumes.go
index 80e26bf1ee..530b4bebf4 100644
--- a/lxd/storage/drivers/driver_btrfs_volumes.go
+++ b/lxd/storage/drivers/driver_btrfs_volumes.go
@@ -474,7 +474,7 @@ func (d *btrfs) RenameVolume(vol Volume, newVolName string, op *operations.Opera
 }
 
 // MigrateVolume sends a volume for migration.
-func (d *btrfs) MigrateVolume(vol Volume, conn io.ReadWriteCloser, volSrcArgs migration.VolumeSourceArgs, op *operations.Operation) error {
+func (d *btrfs) MigrateVolume(vol Volume, conn io.ReadWriteCloser, volSrcArgs *migration.VolumeSourceArgs, op *operations.Operation) error {
 	if vol.contentType != ContentTypeFS {
 		return ErrNotSupported
 	}
diff --git a/lxd/storage/drivers/driver_cephfs_volumes.go b/lxd/storage/drivers/driver_cephfs_volumes.go
index 857ea0ce96..447fec82b8 100644
--- a/lxd/storage/drivers/driver_cephfs_volumes.go
+++ b/lxd/storage/drivers/driver_cephfs_volumes.go
@@ -425,7 +425,7 @@ func (d *cephfs) RenameVolume(vol Volume, newName string, op *operations.Operati
 }
 
 // MigrateVolume streams the volume (with or without snapshots)
-func (d *cephfs) MigrateVolume(vol Volume, conn io.ReadWriteCloser, volSrcArgs migration.VolumeSourceArgs, op *operations.Operation) error {
+func (d *cephfs) MigrateVolume(vol Volume, conn io.ReadWriteCloser, volSrcArgs *migration.VolumeSourceArgs, op *operations.Operation) error {
 	if volSrcArgs.MigrationType.FSType != migration.MigrationFSType_RSYNC {
 		return ErrNotSupported
 	}
diff --git a/lxd/storage/drivers/driver_common.go b/lxd/storage/drivers/driver_common.go
index f671f65c2c..6f8f9e88c4 100644
--- a/lxd/storage/drivers/driver_common.go
+++ b/lxd/storage/drivers/driver_common.go
@@ -230,7 +230,7 @@ func (d *common) vfsRenameVolumeSnapshot(snapVol Volume, newSnapshotName string,
 }
 
 // vfsMigrateVolume is a generic MigrateVolume implementation for VFS-only drivers.
-func (d *common) vfsMigrateVolume(vol Volume, conn io.ReadWriteCloser, volSrcArgs migration.VolumeSourceArgs, op *operations.Operation) error {
+func (d *common) vfsMigrateVolume(vol Volume, conn io.ReadWriteCloser, volSrcArgs *migration.VolumeSourceArgs, op *operations.Operation) error {
 	bwlimit := d.config["rsync.bwlimit"]
 
 	for _, snapName := range volSrcArgs.Snapshots {
diff --git a/lxd/storage/drivers/driver_dir_volumes.go b/lxd/storage/drivers/driver_dir_volumes.go
index 3a1c8d92b0..6e55d1781d 100644
--- a/lxd/storage/drivers/driver_dir_volumes.go
+++ b/lxd/storage/drivers/driver_dir_volumes.go
@@ -266,7 +266,7 @@ func (d *dir) RenameVolume(vol Volume, newVolName string, op *operations.Operati
 }
 
 // MigrateVolume sends a volume for migration.
-func (d *dir) MigrateVolume(vol Volume, conn io.ReadWriteCloser, volSrcArgs migration.VolumeSourceArgs, op *operations.Operation) error {
+func (d *dir) MigrateVolume(vol Volume, conn io.ReadWriteCloser, volSrcArgs *migration.VolumeSourceArgs, op *operations.Operation) error {
 	if vol.contentType != ContentTypeFS {
 		return ErrNotSupported
 	}
diff --git a/lxd/storage/drivers/interface.go b/lxd/storage/drivers/interface.go
index d05f47367d..092c97a5dd 100644
--- a/lxd/storage/drivers/interface.go
+++ b/lxd/storage/drivers/interface.go
@@ -77,7 +77,7 @@ type Driver interface {
 
 	// Migration.
 	MigrationTypes(contentType ContentType, refresh bool) []migration.Type
-	MigrateVolume(vol Volume, conn io.ReadWriteCloser, volSrcArgs migration.VolumeSourceArgs, op *operations.Operation) error
+	MigrateVolume(vol Volume, conn io.ReadWriteCloser, volSrcArgs *migration.VolumeSourceArgs, op *operations.Operation) error
 	CreateVolumeFromMigration(vol Volume, conn io.ReadWriteCloser, volTargetArgs migration.VolumeTargetArgs, preFiller *VolumeFiller, op *operations.Operation) error
 
 	// Backup.
diff --git a/lxd/storage/pool_interface.go b/lxd/storage/pool_interface.go
index dbd6c890b2..2e1e139ecd 100644
--- a/lxd/storage/pool_interface.go
+++ b/lxd/storage/pool_interface.go
@@ -36,7 +36,7 @@ type Pool interface {
 	UpdateInstance(inst instance.Instance, newDesc string, newConfig map[string]string, op *operations.Operation) error
 	UpdateInstanceBackupFile(inst instance.Instance, op *operations.Operation) error
 
-	MigrateInstance(inst instance.Instance, conn io.ReadWriteCloser, args migration.VolumeSourceArgs, op *operations.Operation) error
+	MigrateInstance(inst instance.Instance, conn io.ReadWriteCloser, args *migration.VolumeSourceArgs, op *operations.Operation) error
 	RefreshInstance(inst instance.Instance, src instance.Instance, srcSnapshots []instance.Instance, op *operations.Operation) error
 	BackupInstance(inst instance.Instance, targetPath string, optimized bool, snapshots bool, op *operations.Operation) error
 
@@ -81,5 +81,5 @@ type Pool interface {
 	// Custom volume migration.
 	MigrationTypes(contentType drivers.ContentType, refresh bool) []migration.Type
 	CreateCustomVolumeFromMigration(conn io.ReadWriteCloser, args migration.VolumeTargetArgs, op *operations.Operation) error
-	MigrateCustomVolume(conn io.ReadWriteCloser, args migration.VolumeSourceArgs, op *operations.Operation) error
+	MigrateCustomVolume(conn io.ReadWriteCloser, args *migration.VolumeSourceArgs, op *operations.Operation) error
 }

From 2d4b7d7e9b77f6433ad86b55af3ef5be6f61f691 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?St=C3=A9phane=20Graber?= <stgraber at ubuntu.com>
Date: Sun, 3 Nov 2019 19:36:44 -0500
Subject: [PATCH 6/7] lxd/storage: Port "zfs" to new driver logic
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Signed-off-by: Stéphane Graber <stgraber at ubuntu.com>
---
 lxd/storage/drivers/driver_zfs.go         |  416 ++++++
 lxd/storage/drivers/driver_zfs_utils.go   |  310 +++++
 lxd/storage/drivers/driver_zfs_volumes.go | 1395 +++++++++++++++++++++
 lxd/storage/drivers/load.go               |    1 +
 4 files changed, 2122 insertions(+)
 create mode 100644 lxd/storage/drivers/driver_zfs.go
 create mode 100644 lxd/storage/drivers/driver_zfs_utils.go
 create mode 100644 lxd/storage/drivers/driver_zfs_volumes.go

diff --git a/lxd/storage/drivers/driver_zfs.go b/lxd/storage/drivers/driver_zfs.go
new file mode 100644
index 0000000000..11f1774213
--- /dev/null
+++ b/lxd/storage/drivers/driver_zfs.go
@@ -0,0 +1,416 @@
+package drivers
+
+import (
+	"fmt"
+	"os"
+	"os/exec"
+	"path/filepath"
+	"strconv"
+	"strings"
+
+	"github.com/lxc/lxd/lxd/migration"
+	"github.com/lxc/lxd/lxd/operations"
+	"github.com/lxc/lxd/lxd/util"
+	"github.com/lxc/lxd/shared"
+	"github.com/lxc/lxd/shared/api"
+	"github.com/lxc/lxd/shared/units"
+)
+
+var zfsVersion string
+var zfsLoaded bool
+
+var zfsDefaultSettings = map[string]string{
+	"mountpoint": "none",
+	"setuid":     "on",
+	"exec":       "on",
+	"devices":    "on",
+	"acltype":    "posixacl",
+	"xattr":      "sa",
+}
+
+type zfs struct {
+	common
+}
+
+// load is used to run one-time action per-driver rather than per-pool.
+func (d *zfs) load() error {
+	if zfsLoaded {
+		return nil
+	}
+
+	// Load the kernel module.
+	util.LoadModule("zfs")
+
+	// Validate the needed tools are present.
+	for _, tool := range []string{"zpool", "zfs"} {
+		_, err := exec.LookPath(tool)
+		if err != nil {
+			return fmt.Errorf("Required tool '%s' is missing", tool)
+		}
+	}
+
+	// Get the version information.
+	if zfsVersion == "" {
+		version, err := d.version()
+		if err != nil {
+			return err
+		}
+
+		zfsVersion = version
+	}
+
+	zfsLoaded = true
+	return nil
+}
+
+// Info returns info about the driver and its environment.
+func (d *zfs) Info() Info {
+	info := Info{
+		Name:                  "zfs",
+		Version:               zfsVersion,
+		OptimizedImages:       true,
+		PreservesInodes:       true,
+		Remote:                false,
+		VolumeTypes:           []VolumeType{VolumeTypeCustom, VolumeTypeImage, VolumeTypeContainer, VolumeTypeVM},
+		BlockBacking:          false,
+		RunningQuotaResize:    true,
+		RunningSnapshotFreeze: false,
+	}
+
+	return info
+}
+
+// Create is called during pool creation and is effectively using an empty driver struct.
+// WARNING: The Create() function cannot rely on any of the struct attributes being set.
+func (d *zfs) Create() error {
+	// Store the provided source as we are likely to be mangling it.
+	d.config["volatile.initial_source"] = d.config["source"]
+
+	loopPath := filepath.Join(shared.VarPath("disks"), fmt.Sprintf("%s.img", d.name))
+	if d.config["source"] == "" || d.config["source"] == loopPath {
+		// Create a loop based pool.
+		d.config["source"] = loopPath
+
+		// Set default pool_name.
+		if d.config["zfs.pool_name"] == "" {
+			d.config["zfs.pool_name"] = d.name
+		}
+
+		// Validate pool_name.
+		if strings.Contains(d.config["zfs.pool_name"], "/") {
+			return fmt.Errorf("zfs.pool_name can't point to a dataset when source isn't set")
+		}
+
+		// Create the loop file itself.
+		size, err := units.ParseByteSizeString(d.config["size"])
+		if err != nil {
+			return err
+		}
+
+		err = createSparseFile(loopPath, size)
+		if err != nil {
+			return err
+		}
+
+		// Create the zpool.
+		_, err = shared.RunCommand("zpool", "create", "-f", "-m", "none", "-O", "compression=on", d.config["zfs.pool_name"], loopPath)
+		if err != nil {
+			return err
+		}
+	} else if filepath.IsAbs(d.config["source"]) {
+		// Handle existing block devices.
+		if !shared.IsBlockdevPath(d.config["source"]) {
+			return fmt.Errorf("Custom loop file locations are not supported")
+		}
+
+		// Unset size property since it's irrelevant.
+		d.config["size"] = ""
+
+		// Set default pool_name.
+		if d.config["zfs.pool_name"] == "" {
+			d.config["zfs.pool_name"] = d.name
+		}
+
+		// Validate pool_name.
+		if strings.Contains(d.config["zfs.pool_name"], "/") {
+			return fmt.Errorf("zfs.pool_name can't point to a dataset when source isn't set")
+		}
+
+		// Create the zpool.
+		_, err := shared.RunCommand("zpool", "create", "-f", "-m", "none", "-O", "compression=on", d.config["zfs.pool_name"], d.config["source"])
+		if err != nil {
+			return err
+		}
+
+		// We don't need to keep the original source path around for import.
+		d.config["source"] = d.config["zfs.pool_name"]
+	} else {
+		// Handle an existing zpool.
+		if d.config["zfs.pool_name"] == "" {
+			d.config["zfs.pool_name"] = d.config["source"]
+		}
+
+		// Unset size property since it's irrelevant.
+		d.config["size"] = ""
+
+		// Validate pool_name.
+		if d.config["zfs.pool_name"] != d.config["source"] {
+			return fmt.Errorf("The source must match zfs.pool_name if specified")
+		}
+
+		if strings.Contains(d.config["zfs.pool_name"], "/") {
+			// Handle a dataset.
+			if !d.checkDataset(d.config["zfs.pool_name"]) {
+				err := d.createDataset(d.config["zfs.pool_name"], "mountpoint=none")
+				if err != nil {
+					return err
+				}
+			}
+		} else {
+			// Ensure that the pool is available.
+			_, err := d.Mount()
+			if err != nil {
+				return err
+			}
+		}
+
+		// Confirm that the existing pool/dataset is all empty.
+		datasets, err := d.getDatasets(d.config["zfs.pool_name"])
+		if err != nil {
+			return err
+		}
+
+		if len(datasets) > 0 {
+			return fmt.Errorf("Provided ZFS pool (or dataset) isn't empty")
+		}
+	}
+
+	// Setup revert in case of problems
+	revertPool := true
+	defer func() {
+		if !revertPool {
+			return
+		}
+
+		d.Delete(nil)
+	}()
+
+	// Apply our default configuration.
+	args := []string{}
+	for k, v := range zfsDefaultSettings {
+		args = append(args, fmt.Sprintf("%s=%s", k, v))
+	}
+
+	err := d.setDatasetProperties(d.config["zfs.pool_name"], args...)
+	if err != nil {
+		return err
+	}
+
+	// Create the initial datasets.
+	for _, dataset := range d.initialDatasets() {
+		err := d.createDataset(filepath.Join(d.config["zfs.pool_name"], dataset), "mountpoint=none")
+		if err != nil {
+			return err
+		}
+	}
+
+	revertPool = false
+	return nil
+}
+
+// Delete removes the storage pool from the storage device.
+func (d *zfs) Delete(op *operations.Operation) error {
+	// Check if the dataset/pool is already gone.
+	if !d.checkDataset(d.config["zfs.pool_name"]) {
+		return nil
+	}
+
+	// Confirm that nothing's been left behind
+	datasets, err := d.getDatasets(d.config["zfs.pool_name"])
+	if err != nil {
+		return err
+	}
+
+	initialDatasets := d.initialDatasets()
+	for _, dataset := range datasets {
+		if shared.StringInSlice(dataset, initialDatasets) {
+			continue
+		}
+
+		fields := strings.Split(dataset, "/")
+		if len(fields) > 1 {
+			return fmt.Errorf("ZFS pool has leftover datasets: %s", dataset)
+		}
+	}
+
+	if strings.Contains(d.config["zfs.pool_name"], "/") {
+		// Delete the dataset.
+		_, err := shared.RunCommand("zfs", "destroy", "-r", d.config["zfs.pool_name"])
+		if err != nil {
+			return err
+		}
+	} else {
+		// Delete the pool.
+		_, err := shared.RunCommand("zpool", "destroy", d.config["zfs.pool_name"])
+		if err != nil {
+			return err
+		}
+	}
+
+	// On delete, wipe everything in the directory.
+	err = wipeDirectory(GetPoolMountPath(d.name))
+	if err != nil {
+		return err
+	}
+
+	// Delete any loop file we may have used
+	loopPath := filepath.Join(shared.VarPath("disks"), fmt.Sprintf("%s.img", d.name))
+	if shared.PathExists(loopPath) {
+		err = os.Remove(loopPath)
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+// Validate checks that all provide keys are supported and that no conflicting or missing configuration is present.
+func (d *zfs) Validate(config map[string]string) error {
+	return nil
+}
+
+// Update applies any driver changes required from a configuration change.
+func (d *zfs) Update(changedConfig map[string]string) error {
+	_, ok := changedConfig["zfs.pool_name"]
+	if ok {
+		return fmt.Errorf("zfs.pool_name cannot be modified")
+	}
+
+	return nil
+}
+
+// Mount mounts the storage pool.
+func (d *zfs) Mount() (bool, error) {
+	// Check if already setup.
+	if d.checkDataset(d.config["zfs.pool_name"]) {
+		return false, nil
+	}
+
+	// Check if the pool exists.
+	poolName := strings.Split(d.config["zfs.pool_name"], "/")[0]
+	if d.checkDataset(poolName) {
+		return false, fmt.Errorf("ZFS zpool exists but dataset is missing")
+	}
+
+	// Import the pool.
+	if filepath.IsAbs(d.config["source"]) {
+		disksPath := shared.VarPath("disks")
+		_, err := shared.RunCommand("zpool", "import", "-f", "-d", disksPath, poolName)
+		if err != nil {
+			return false, err
+		}
+	} else {
+		_, err := shared.RunCommand("zpool", "import", poolName)
+		if err != nil {
+			return false, err
+		}
+	}
+
+	// Check that the dataset now exists.
+	if d.checkDataset(d.config["zfs.pool_name"]) {
+		return true, nil
+	}
+
+	return false, fmt.Errorf("ZFS zpool exists but dataset is missing")
+}
+
+// Unmount unmounts the storage pool.
+func (d *zfs) Unmount() (bool, error) {
+	// Skip if using a dataset and not a full pool.
+	if strings.Contains(d.config["zfs.pool_name"], "/") {
+		return false, nil
+	}
+
+	// Check if already unmounted.
+	if !d.checkDataset(d.config["zfs.pool_name"]) {
+		return false, nil
+	}
+
+	// Export the pool.
+	poolName := strings.Split(d.config["zfs.pool_name"], "/")[0]
+	_, err := shared.RunCommand("zpool", "export", poolName)
+	if err != nil {
+		return false, err
+	}
+
+	return true, nil
+}
+
+func (d *zfs) GetResources() (*api.ResourcesStoragePool, error) {
+	// Get the total amount of space.
+	totalStr, err := d.getDatasetProperty(d.config["zfs.pool_name"], "available")
+	if err != nil {
+		return nil, err
+	}
+
+	total, err := strconv.ParseUint(strings.TrimSpace(totalStr), 10, 64)
+	if err != nil {
+		return nil, err
+	}
+
+	// Get the used amount of space.
+	usedStr, err := d.getDatasetProperty(d.config["zfs.pool_name"], "used")
+	if err != nil {
+		return nil, err
+	}
+
+	used, err := strconv.ParseUint(strings.TrimSpace(usedStr), 10, 64)
+	if err != nil {
+		return nil, err
+	}
+
+	// Build the struct.
+	// Inode allocation is dynamic so no use in reporting them.
+	res := api.ResourcesStoragePool{}
+	res.Space.Total = total
+	res.Space.Used = used
+
+	return &res, nil
+}
+
+// MigrationType returns the type of transfer methods to be used when doing migrations between pools in preference order.
+func (d *zfs) MigrationTypes(contentType ContentType, refresh bool) []migration.Type {
+	if contentType != ContentTypeFS {
+		return nil
+	}
+
+	// When performing a refresh, always use rsync. Using zfs send/receive
+	// here doesn't make sense since it would need to send everything again
+	// which defeats the purpose of a refresh.
+	if refresh {
+		return []migration.Type{
+			{
+				FSType:   migration.MigrationFSType_RSYNC,
+				Features: []string{"xattrs", "delete", "compress", "bidirectional"},
+			},
+		}
+	}
+
+	// Detect ZFS features.
+	features := []string{}
+	if len(zfsVersion) >= 3 && zfsVersion[0:3] != "0.6" {
+		features = append(features, "compress")
+	}
+
+	return []migration.Type{
+		{
+			FSType:   migration.MigrationFSType_ZFS,
+			Features: features,
+		},
+		{
+			FSType:   migration.MigrationFSType_RSYNC,
+			Features: []string{"xattrs", "delete", "compress", "bidirectional"},
+		},
+	}
+}
diff --git a/lxd/storage/drivers/driver_zfs_utils.go b/lxd/storage/drivers/driver_zfs_utils.go
new file mode 100644
index 0000000000..a5ac0ea914
--- /dev/null
+++ b/lxd/storage/drivers/driver_zfs_utils.go
@@ -0,0 +1,310 @@
+package drivers
+
+import (
+	"fmt"
+	"io"
+	"io/ioutil"
+	"os/exec"
+	"path/filepath"
+	"strings"
+
+	"github.com/pborman/uuid"
+
+	"github.com/lxc/lxd/lxd/migration"
+	"github.com/lxc/lxd/shared"
+	"github.com/lxc/lxd/shared/ioprogress"
+)
+
+func (d *zfs) dataset(vol Volume, deleted bool) string {
+	name, snapName, _ := shared.InstanceGetParentAndSnapshotName(vol.name)
+	if (vol.volType == VolumeTypeVM || vol.volType == VolumeTypeImage) && vol.contentType == ContentTypeBlock {
+		name = fmt.Sprintf("%s.block", name)
+	}
+
+	if snapName != "" {
+		if deleted {
+			name = fmt.Sprintf("%s at deleted-%s", name, uuid.NewRandom().String())
+		} else {
+			name = fmt.Sprintf("%s at snapshot-%s", name, snapName)
+		}
+	} else if deleted {
+		if vol.volType != VolumeTypeImage {
+			name = uuid.NewRandom().String()
+		}
+
+		return filepath.Join(d.config["zfs.pool_name"], "deleted", string(vol.volType), name)
+	}
+
+	return filepath.Join(d.config["zfs.pool_name"], string(vol.volType), name)
+}
+
+func (d *zfs) createDataset(dataset string, options ...string) error {
+	args := []string{"create"}
+	for _, option := range options {
+		args = append(args, "-o")
+		args = append(args, option)
+	}
+	args = append(args, dataset)
+
+	_, err := shared.RunCommand("zfs", args...)
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (d *zfs) createVolume(dataset string, size int64, options ...string) error {
+	size = (size / 8192) * 8192
+
+	args := []string{"create", "-s", "-V", fmt.Sprintf("%d", size)}
+	for _, option := range options {
+		args = append(args, "-o")
+		args = append(args, option)
+	}
+	args = append(args, dataset)
+
+	_, err := shared.RunCommand("zfs", args...)
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (d *zfs) checkDataset(dataset string) bool {
+	out, err := shared.RunCommand("zfs", "get", "-H", "-o", "name", "name", dataset)
+	if err != nil {
+		return false
+	}
+
+	return strings.TrimSpace(out) == dataset
+}
+
+func (d *zfs) checkVMBlock(vol Volume) bool {
+	return (vol.volType == VolumeTypeVM || vol.volType == VolumeTypeImage) && vol.contentType == ContentTypeBlock
+}
+
+func (d *zfs) getClones(dataset string) ([]string, error) {
+	out, err := shared.RunCommand("zfs", "get", "-H", "-p", "-o", "value", "-r", "clones", dataset)
+	if err != nil {
+		return nil, err
+	}
+
+	clones := []string{}
+	for _, line := range strings.Split(out, "\n") {
+		line = strings.TrimSpace(line)
+		if line == dataset || line == "" || line == "-" {
+			continue
+		}
+
+		line = strings.TrimPrefix(line, fmt.Sprintf("%s/", dataset))
+		clones = append(clones, line)
+	}
+
+	return clones, nil
+}
+
+func (d *zfs) getDatasets(dataset string) ([]string, error) {
+	out, err := shared.RunCommand("zfs", "get", "name", "-o", "name", "-H", "-r", dataset)
+	if err != nil {
+		return nil, err
+	}
+
+	children := []string{}
+	for _, line := range strings.Split(out, "\n") {
+		line = strings.TrimSpace(line)
+		if line == dataset || line == "" {
+			continue
+		}
+
+		line = strings.TrimPrefix(line, dataset)
+		line = strings.TrimPrefix(line, "/")
+		children = append(children, line)
+	}
+
+	return children, nil
+}
+
+func (d *zfs) setDatasetProperties(dataset string, options ...string) error {
+	args := []string{"set"}
+	args = append(args, options...)
+	args = append(args, dataset)
+
+	_, err := shared.RunCommand("zfs", args...)
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (d *zfs) getDatasetProperty(dataset string, key string) (string, error) {
+	output, err := shared.RunCommand("zfs", "get", "-H", "-p", "-o", "value", key, dataset)
+	if err != nil {
+		return "", err
+	}
+
+	return strings.TrimSpace(output), nil
+}
+
+// version returns the ZFS version based on package or kernel module version.
+func (d *zfs) version() (string, error) {
+	// This function is only really ever relevant on Ubuntu as the only
+	// distro that ships out of sync tools and kernel modules
+	out, err := shared.RunCommand("dpkg-query", "--showformat=${Version}", "--show", "zfsutils-linux")
+	if err == nil {
+		return strings.TrimSpace(string(out)), nil
+	}
+
+	// Loaded kernel module version
+	if shared.PathExists("/sys/module/zfs/version") {
+		out, err := ioutil.ReadFile("/sys/module/zfs/version")
+		if err == nil {
+			return strings.TrimSpace(string(out)), nil
+		}
+	}
+
+	// Module information version
+	out, err = shared.RunCommand("modinfo", "-F", "version", "zfs")
+	if err == nil {
+		return strings.TrimSpace(string(out)), nil
+	}
+
+	return "", fmt.Errorf("Could not determine ZFS module version")
+}
+
+// initialDatasets returns the list of all expected datasets.
+func (d *zfs) initialDatasets() []string {
+	entries := []string{"deleted"}
+
+	// Iterate over the listed supported volume types.
+	for _, volType := range d.Info().VolumeTypes {
+		entries = append(entries, BaseDirectories[volType][0])
+		entries = append(entries, filepath.Join("deleted", BaseDirectories[volType][0]))
+	}
+
+	return entries
+}
+
+func (d *zfs) sendDataset(dataset string, parent string, volSrcArgs *migration.VolumeSourceArgs, conn io.ReadWriteCloser, tracker *ioprogress.ProgressTracker) error {
+	// Assemble zfs send command.
+	args := []string{"send"}
+	if shared.StringInSlice("compress", volSrcArgs.MigrationType.Features) {
+		args = append(args, "-c")
+		args = append(args, "-L")
+	}
+	if parent != "" {
+		args = append(args, "-i", parent)
+	}
+	args = append(args, dataset)
+	cmd := exec.Command("zfs", args...)
+
+	// Prepare stdout/stderr.
+	stdout, err := cmd.StdoutPipe()
+	if err != nil {
+		return err
+	}
+
+	stderr, err := cmd.StderrPipe()
+	if err != nil {
+		return err
+	}
+
+	// Setup progress tracker.
+	stdoutPipe := stdout
+	if tracker != nil {
+		stdoutPipe = &ioprogress.ProgressReader{
+			ReadCloser: stdout,
+			Tracker:    tracker,
+		}
+	}
+
+	// Forward any output on stdout.
+	chStdoutPipe := make(chan error, 1)
+	go func() {
+		_, err := io.Copy(conn, stdoutPipe)
+		chStdoutPipe <- err
+		conn.Close()
+	}()
+
+	// Run the command.
+	err = cmd.Start()
+	if err != nil {
+		return err
+	}
+
+	// Read any error.
+	output, _ := ioutil.ReadAll(stderr)
+
+	// Handle errors.
+	errs := []error{}
+	chStdoutPipeErr := <-chStdoutPipe
+
+	err = cmd.Wait()
+	if err != nil {
+		errs = append(errs, err)
+
+		if chStdoutPipeErr != nil {
+			errs = append(errs, chStdoutPipeErr)
+		}
+	}
+
+	if len(errs) > 0 {
+		return fmt.Errorf("zfs send failed: %v (%s)", errs, string(output))
+	}
+
+	return nil
+}
+
+func (d *zfs) receiveDataset(dataset string, conn io.ReadWriteCloser, writeWrapper func(io.WriteCloser) io.WriteCloser) error {
+	// Assemble zfs send command.
+	cmd := exec.Command("zfs", "receive", "-F", "-u", dataset)
+
+	// Prepare stdin/stderr.
+	stdin, err := cmd.StdinPipe()
+	if err != nil {
+		return err
+	}
+
+	stderr, err := cmd.StderrPipe()
+	if err != nil {
+		return err
+	}
+
+	// Forward input through stdin.
+	chCopyConn := make(chan error, 1)
+	go func() {
+		_, err = io.Copy(stdin, conn)
+		stdin.Close()
+		chCopyConn <- err
+	}()
+
+	// Run the command.
+	err = cmd.Start()
+	if err != nil {
+		return err
+	}
+
+	// Read any error.
+	output, _ := ioutil.ReadAll(stderr)
+
+	// Handle errors.
+	errs := []error{}
+	chCopyConnErr := <-chCopyConn
+
+	err = cmd.Wait()
+	if err != nil {
+		errs = append(errs, err)
+
+		if chCopyConnErr != nil {
+			errs = append(errs, chCopyConnErr)
+		}
+	}
+
+	if len(errs) > 0 {
+		return fmt.Errorf("Problem with zfs receive: (%v) %s", errs, string(output))
+	}
+
+	return nil
+}
diff --git a/lxd/storage/drivers/driver_zfs_volumes.go b/lxd/storage/drivers/driver_zfs_volumes.go
new file mode 100644
index 0000000000..6063f6dbeb
--- /dev/null
+++ b/lxd/storage/drivers/driver_zfs_volumes.go
@@ -0,0 +1,1395 @@
+package drivers
+
+import (
+	"fmt"
+	"io"
+	"io/ioutil"
+	"os"
+	"os/exec"
+	"path/filepath"
+	"strconv"
+	"strings"
+	"time"
+
+	"github.com/pborman/uuid"
+	"golang.org/x/sys/unix"
+
+	"github.com/lxc/lxd/lxd/migration"
+	"github.com/lxc/lxd/lxd/operations"
+	"github.com/lxc/lxd/lxd/revert"
+	"github.com/lxc/lxd/shared"
+	"github.com/lxc/lxd/shared/ioprogress"
+	"github.com/lxc/lxd/shared/units"
+)
+
+// CreateVolume creates an empty volume and can optionally fill it by executing the supplied
+// filler function.
+func (d *zfs) CreateVolume(vol Volume, filler *VolumeFiller, op *operations.Operation) error {
+	// Revert handling
+	revert := revert.New()
+	defer revert.Fail()
+
+	if vol.contentType == ContentTypeFS {
+		// Create mountpoint.
+		err := vol.EnsureMountPath()
+		if err != nil {
+			return err
+		}
+
+		revert.Add(func() { os.Remove(vol.MountPath()) })
+	}
+
+	// Look for previously deleted images.
+	if vol.volType == VolumeTypeImage && d.checkDataset(d.dataset(vol, true)) {
+		// Restore the image.
+		_, err := shared.RunCommand("/proc/self/exe", "forkzfs", "--", "rename", d.dataset(vol, true), d.dataset(vol, false))
+		if err != nil {
+			return err
+		}
+
+		return nil
+	}
+
+	// After this point we'll have a volume, so setup revert.
+	revert.Add(func() { d.DeleteVolume(vol, op) })
+
+	if vol.contentType == ContentTypeFS {
+		// Create the filesystem dataset.
+		err := d.createDataset(d.dataset(vol, false), fmt.Sprintf("mountpoint=%s", vol.MountPath()), "canmount=noauto")
+		if err != nil {
+			return err
+		}
+
+		// Apply the size limit.
+		size := vol.ExpandedConfig("size")
+		if size != "" {
+			err := d.SetVolumeQuota(vol, size, op)
+			if err != nil {
+				return err
+			}
+		}
+	} else {
+		// Convert the size.
+		size := vol.ExpandedConfig("size")
+		if size == "" {
+			size = defaultBlockSize
+		}
+
+		sizeBytes, err := units.ParseByteSizeString(size)
+		if err != nil {
+			return err
+		}
+
+		loopPath := filepath.Join(shared.VarPath("disks"), fmt.Sprintf("%s.img", d.name))
+		if d.config["source"] == loopPath {
+			// Create the volume dataset with sync disabled (to avoid kernel lockups when using a disk based pool).
+			err = d.createVolume(d.dataset(vol, false), sizeBytes, "sync=disabled")
+			if err != nil {
+				return err
+			}
+		} else {
+			// Create the volume dataset.
+			err = d.createVolume(d.dataset(vol, false), sizeBytes)
+			if err != nil {
+				return err
+			}
+		}
+	}
+
+	// For VM images, create a filesystem volume too.
+	if d.checkVMBlock(vol) {
+		fsVol := NewVolume(d, d.name, vol.volType, ContentTypeFS, vol.name, vol.config, vol.poolConfig)
+		err := d.CreateVolume(fsVol, nil, op)
+		if err != nil {
+			return err
+		}
+
+		revert.Add(func() { d.DeleteVolume(fsVol, op) })
+	}
+
+	// Mount the dataset.
+	_, err := d.MountVolume(vol, op)
+	if err != nil {
+		return err
+	}
+
+	if vol.contentType == ContentTypeFS {
+		// Set the permissions.
+		err = vol.EnsureMountPath()
+		if err != nil {
+			d.UnmountVolume(vol, op)
+			return err
+		}
+	}
+
+	// Run the volume filler function if supplied.
+	if filler != nil && filler.Fill != nil {
+		if vol.contentType == ContentTypeFS {
+			// Run the filler.
+			err = filler.Fill(vol.MountPath(), "")
+			if err != nil {
+				d.UnmountVolume(vol, op)
+				return err
+			}
+		} else {
+			// Get the device path.
+			devPath, err := d.GetVolumeDiskPath(vol)
+			if err != nil {
+				d.UnmountVolume(vol, op)
+				return err
+			}
+
+			// Run the filler.
+			err = filler.Fill(vol.MountPath(), devPath)
+			if err != nil {
+				d.UnmountVolume(vol, op)
+				return err
+			}
+		}
+	}
+
+	// Unmount the volume.
+	_, err = d.UnmountVolume(vol, op)
+	if err != nil {
+		return err
+	}
+
+	// Setup snapshot and unset mountpoint on image.
+	if vol.volType == VolumeTypeImage {
+		// Create snapshot of the main dataset.
+		_, err := shared.RunCommand("zfs", "snapshot", fmt.Sprintf("%s at readonly", d.dataset(vol, false)))
+		if err != nil {
+			return err
+		}
+
+		if vol.contentType == ContentTypeBlock {
+			// Re-create the readonly snapshot, post-filling.
+			fsVol := NewVolume(d, d.name, vol.volType, ContentTypeFS, vol.name, vol.config, vol.poolConfig)
+
+			_, err := shared.RunCommand("zfs", "destroy", fmt.Sprintf("%s at readonly", d.dataset(fsVol, false)))
+			if err != nil {
+				return err
+			}
+
+			_, err = shared.RunCommand("zfs", "snapshot", fmt.Sprintf("%s at readonly", d.dataset(fsVol, false)))
+			if err != nil {
+				return err
+			}
+		}
+	}
+
+	// All done.
+	revert.Success()
+
+	return nil
+}
+
+// CreateVolumeFromBackup re-creates a volume from its exported state.
+func (d *zfs) CreateVolumeFromBackup(vol Volume, snapshots []string, srcData io.ReadSeeker, optimized bool, op *operations.Operation) (func(vol Volume) error, func(), error) {
+	// Handle the non-optimized tarballs through the generic unpacker.
+	if !optimized {
+		return genericBackupUnpack(d, vol, snapshots, srcData, op)
+	}
+
+	revert := revert.New()
+	defer revert.Fail()
+
+	// Define a revert function that will be used both to revert if an error occurs inside this
+	// function but also return it for use from the calling functions if no error internally.
+	revertHook := func() {
+		for _, snapName := range snapshots {
+			fullSnapshotName := GetSnapshotVolumeName(vol.name, snapName)
+			snapVol := NewVolume(d, d.name, vol.volType, vol.contentType, fullSnapshotName, vol.config, vol.poolConfig)
+			d.DeleteVolumeSnapshot(snapVol, op)
+		}
+
+		// And lastly the main volume.
+		d.DeleteVolume(vol, op)
+	}
+
+	// Only execute the revert function if we have had an error internally.
+	revert.Add(revertHook)
+
+	// Create a temporary directory to unpack the backup into.
+	unpackDir, err := ioutil.TempDir(GetVolumeMountPath(d.name, vol.volType, ""), vol.name)
+	if err != nil {
+		return nil, nil, err
+	}
+	defer os.RemoveAll(unpackDir)
+
+	err = os.Chmod(unpackDir, 0100)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	// Find the compression algorithm used for backup source data.
+	srcData.Seek(0, 0)
+	tarArgs, _, _, err := shared.DetectCompressionFile(srcData)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	// Prepare tar arguments.
+	args := append(tarArgs, []string{
+		"-",
+		"--strip-components=1",
+		"-C", unpackDir, "backup",
+	}...)
+
+	// Unpack the backup.
+	srcData.Seek(0, 0)
+	err = shared.RunCommandWithFds(srcData, nil, "tar", args...)
+	if err != nil {
+		return nil, nil, err
+	}
+	if len(snapshots) > 0 {
+		// Create new snapshots directory.
+		err := createParentSnapshotDirIfMissing(d.name, vol.volType, vol.name)
+		if err != nil {
+			return nil, nil, err
+		}
+	}
+
+	// Restore backups from oldest to newest.
+	for _, snapName := range snapshots {
+		// Open the backup.
+		feeder, err := os.Open(filepath.Join(unpackDir, "snapshots", fmt.Sprintf("%s.bin", snapName)))
+		if err != nil {
+			return nil, nil, err
+		}
+		defer feeder.Close()
+
+		// Extract the backup.
+		dstSnapshot := fmt.Sprintf("%s at snapshot-%s", d.dataset(vol, false), snapName)
+		err = shared.RunCommandWithFds(feeder, nil, "zfs", "receive", "-F", dstSnapshot)
+		if err != nil {
+			return nil, nil, err
+		}
+	}
+
+	// Open the backup.
+	feeder, err := os.Open(filepath.Join(unpackDir, "container.bin"))
+	if err != nil {
+		return nil, nil, err
+	}
+	defer feeder.Close()
+
+	// Extrack the backup.
+	err = shared.RunCommandWithFds(feeder, nil, "zfs", "receive", "-F", d.dataset(vol, false))
+	if err != nil {
+		return nil, nil, err
+	}
+
+	// Strip internal snapshots.
+	entries, err := d.getDatasets(d.dataset(vol, false))
+	if err != nil {
+		return nil, nil, err
+	}
+
+	// Filter only the snapshots.
+	for _, entry := range entries {
+		if strings.HasPrefix(entry, "@snapshot-") {
+			continue
+		}
+
+		if strings.HasPrefix(entry, "@") {
+			_, err := shared.RunCommand("zfs", "destroy", fmt.Sprintf("%s%s", d.dataset(vol, false), entry))
+			if err != nil {
+				return nil, nil, err
+			}
+		}
+	}
+
+	// Re-apply the base mount options.
+	if vol.contentType == ContentTypeFS {
+		err := d.setDatasetProperties(d.dataset(vol, false), fmt.Sprintf("mountpoint=%s", vol.MountPath()), "canmount=noauto")
+		if err != nil {
+			return nil, nil, err
+		}
+	}
+
+	// The import requires a mounted volume, so mount it and have it unmounted as a post hook.
+	_, err = d.MountVolume(vol, op)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	postHook := func(vol Volume) error {
+		_, err := d.UnmountVolume(vol, op)
+		return err
+	}
+
+	revert.Success()
+	return postHook, revertHook, nil
+}
+
+// CreateVolumeFromCopy provides same-pool volume copying functionality.
+func (d *zfs) CreateVolumeFromCopy(vol Volume, srcVol Volume, copySnapshots bool, op *operations.Operation) error {
+	// Revert handling
+	revert := revert.New()
+	defer revert.Fail()
+
+	if vol.contentType == ContentTypeFS {
+		// Create mountpoint.
+		err := vol.EnsureMountPath()
+		if err != nil {
+			return err
+		}
+
+		revert.Add(func() { os.Remove(vol.MountPath()) })
+	}
+
+	// For VMs, also copy the filesystem dataset.
+	if vol.volType == VolumeTypeVM && vol.contentType == ContentTypeBlock {
+		fsVol := NewVolume(d, d.name, vol.volType, ContentTypeFS, vol.name, vol.config, vol.poolConfig)
+		fsSrcVol := NewVolume(d, d.name, srcVol.volType, ContentTypeFS, srcVol.name, srcVol.config, srcVol.poolConfig)
+
+		err := d.CreateVolumeFromCopy(fsVol, fsSrcVol, copySnapshots, op)
+		if err != nil {
+			return err
+		}
+
+		// Delete on revert.
+		revert.Add(func() {
+			d.DeleteVolume(fsVol, op)
+		})
+	}
+
+	// Retrieve snapshots on the source.
+	snapshots := []string{}
+	if !srcVol.IsSnapshot() && copySnapshots {
+		var err error
+		snapshots, err = d.VolumeSnapshots(srcVol, op)
+		if err != nil {
+			return err
+		}
+	}
+
+	var srcSnapshot string
+	if srcVol.volType == VolumeTypeImage {
+		srcSnapshot = fmt.Sprintf("%s at readonly", d.dataset(srcVol, false))
+	} else if srcVol.IsSnapshot() {
+		srcSnapshot = d.dataset(srcVol, false)
+	} else {
+		// Create a new snapshot for copy.
+		srcSnapshot = fmt.Sprintf("%s at copy-%s", d.dataset(srcVol, false), uuid.NewRandom().String())
+
+		_, err := shared.RunCommand("zfs", "snapshot", srcSnapshot)
+		if err != nil {
+			return err
+		}
+
+		// If using "zfs.clone_copy" delete the snapshot at the end.
+		if (d.config["zfs.clone_copy"] != "" && !shared.IsTrue(d.config["zfs.clone_copy"])) || len(snapshots) > 0 {
+			// Delete the snapshot at the end.
+			defer shared.RunCommand("zfs", "destroy", srcSnapshot)
+		} else {
+			// Delete the snapshot on revert.
+			revert.Add(func() {
+				shared.RunCommand("zfs", "destroy", srcSnapshot)
+			})
+		}
+	}
+
+	// Handle zfs.clone_copy
+	if (d.config["zfs.clone_copy"] != "" && !shared.IsTrue(d.config["zfs.clone_copy"])) || len(snapshots) > 0 {
+		snapName := strings.SplitN(srcSnapshot, "@", 2)[1]
+
+		// Send/receive the snapshot.
+		var sender *exec.Cmd
+		receiver := exec.Command("zfs", "receive", d.dataset(vol, false))
+
+		// Handle transferring snapshots.
+		if len(snapshots) > 0 {
+			sender = exec.Command("zfs", "send", "-R", srcSnapshot)
+		} else {
+			sender = exec.Command("zfs", "send", srcSnapshot)
+		}
+
+		// Configure the pipes.
+		receiver.Stdin, _ = sender.StdoutPipe()
+		receiver.Stdout = os.Stdout
+		receiver.Stderr = os.Stderr
+
+		// Run the transfer.
+		err := receiver.Start()
+		if err != nil {
+			return err
+		}
+
+		err = sender.Run()
+		if err != nil {
+			return err
+		}
+
+		err = receiver.Wait()
+		if err != nil {
+			return err
+		}
+
+		// Delete the snapshot.
+		_, err = shared.RunCommand("zfs", "destroy", fmt.Sprintf("%s@%s", d.dataset(vol, false), snapName))
+		if err != nil {
+			return err
+		}
+
+		// Cleanup unexpected snapshots.
+		if len(snapshots) > 0 {
+			children, err := d.getDatasets(d.dataset(vol, false))
+			if err != nil {
+				return err
+			}
+
+			for _, entry := range children {
+				// Check if expected snapshot.
+				if strings.HasPrefix(entry, "@snapshot-") {
+					name := strings.TrimPrefix(entry, "@snapshot-")
+					if shared.StringInSlice(name, snapshots) {
+						continue
+					}
+				}
+
+				// Delete the rest.
+				_, err := shared.RunCommand("zfs", "destroy", fmt.Sprintf("%s%s", d.dataset(vol, false), entry))
+				if err != nil {
+					return err
+				}
+			}
+		}
+	} else {
+		// Clone the snapshot.
+		_, err := shared.RunCommand("zfs", "clone", srcSnapshot, d.dataset(vol, false))
+		if err != nil {
+			return err
+		}
+	}
+
+	// Apply the properties.
+	if vol.contentType == ContentTypeFS {
+		err := d.setDatasetProperties(d.dataset(vol, false), fmt.Sprintf("mountpoint=%s", vol.MountPath()), "canmount=noauto")
+		if err != nil {
+			return err
+		}
+	}
+
+	// All done.
+	revert.Success()
+
+	return nil
+}
+
+// CreateVolumeFromMigration creates a volume being sent via a migration.
+func (d *zfs) CreateVolumeFromMigration(vol Volume, conn io.ReadWriteCloser, volTargetArgs migration.VolumeTargetArgs, filler *VolumeFiller, op *operations.Operation) error {
+	if vol.contentType != ContentTypeFS {
+		return ErrNotSupported
+	}
+
+	// Handle simple rsync through generic.
+	if volTargetArgs.MigrationType.FSType == migration.MigrationFSType_RSYNC {
+		return genericCreateVolumeFromMigration(d, nil, vol, conn, volTargetArgs, filler, op)
+	} else if volTargetArgs.MigrationType.FSType != migration.MigrationFSType_ZFS {
+		return ErrNotSupported
+	}
+
+	// Handle zfs send/receive migration.
+	if len(volTargetArgs.Snapshots) > 0 {
+		// Create the parent directory.
+		err := createParentSnapshotDirIfMissing(d.name, vol.volType, vol.name)
+		if err != nil {
+			return err
+		}
+
+		// Transfer the snapshots.
+		for _, snapName := range volTargetArgs.Snapshots {
+			fullSnapshotName := GetSnapshotVolumeName(vol.name, snapName)
+			wrapper := migration.ProgressWriter(op, "fs_progress", fullSnapshotName)
+
+			err = d.receiveDataset(d.dataset(vol, false), conn, wrapper)
+			if err != nil {
+				return err
+			}
+		}
+	}
+
+	// Transfer the main volume.
+	wrapper := migration.ProgressWriter(op, "fs_progress", vol.name)
+	err := d.receiveDataset(d.dataset(vol, false), conn, wrapper)
+	if err != nil {
+		return err
+	}
+
+	// Strip internal snapshots.
+	entries, err := d.getDatasets(d.dataset(vol, false))
+	if err != nil {
+		return err
+	}
+
+	// Filter only the snapshots.
+	for _, entry := range entries {
+		if strings.HasPrefix(entry, "@snapshot-") {
+			continue
+		}
+
+		if strings.HasPrefix(entry, "@") {
+			_, err := shared.RunCommand("zfs", "destroy", fmt.Sprintf("%s%s", d.dataset(vol, false), entry))
+			if err != nil {
+				return err
+			}
+		}
+	}
+
+	if vol.contentType == ContentTypeFS {
+		// Create mountpoint.
+		err := vol.EnsureMountPath()
+		if err != nil {
+			return err
+		}
+
+		// Re-apply the base mount options.
+		err = d.setDatasetProperties(d.dataset(vol, false), fmt.Sprintf("mountpoint=%s", vol.MountPath()), "canmount=noauto")
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+// RefreshVolume updates an existing volume to match the state of another.
+func (d *zfs) RefreshVolume(vol Volume, srcVol Volume, srcSnapshots []Volume, op *operations.Operation) error {
+	return genericCopyVolume(d, nil, vol, srcVol, srcSnapshots, true, op)
+}
+
+// DeleteVolume deletes a volume of the storage device. If any snapshots of the volume remain then
+// this function will return an error.
+func (d *zfs) DeleteVolume(vol Volume, op *operations.Operation) error {
+	// Check that we have a dataset to delete.
+	if d.checkDataset(d.dataset(vol, false)) {
+		// Handle clones.
+		clones, err := d.getClones(d.dataset(vol, false))
+		if err != nil {
+			return err
+		}
+
+		if len(clones) > 0 {
+			// Move to the deleted path.
+			_, err := shared.RunCommand("/proc/self/exe", "forkzfs", "--", "rename", d.dataset(vol, false), d.dataset(vol, true))
+			if err != nil {
+				return err
+			}
+		} else {
+			// Locate the origin snapshot (if any).
+			origin, err := d.getDatasetProperty(d.dataset(vol, false), "origin")
+			if err != nil {
+				return err
+			}
+
+			// Delete the dataset (and any snapshots left).
+			_, err = shared.RunCommand("zfs", "destroy", "-r", d.dataset(vol, false))
+			if err != nil {
+				return err
+			}
+
+			// Check if the origin can now be deleted.
+			if origin != "" && origin != "-" {
+				dataset := ""
+				if strings.HasPrefix(origin, filepath.Join(d.config["zfs.pool_name"], "deleted")) {
+					// Strip the snapshot name when dealing with a deleted volume.
+					dataset = strings.SplitN(origin, "@", 2)[0]
+				} else if strings.Contains(origin, "@deleted-") || strings.Contains(origin, "@copy-") {
+					// Handle deleted snapshots.
+					dataset = origin
+				}
+
+				if dataset != "" {
+					// Get all clones.
+					clones, err := d.getClones(dataset)
+					if err != nil {
+						return err
+					}
+
+					if len(clones) == 0 {
+						// Delete the origin.
+						_, err := shared.RunCommand("zfs", "destroy", "-r", dataset)
+						if err != nil {
+							return err
+						}
+					}
+				}
+			}
+		}
+	}
+
+	if vol.contentType == ContentTypeFS {
+		// Delete the mountpoint if present.
+		err := os.Remove(vol.MountPath())
+		if err != nil && !os.IsNotExist(err) {
+			return err
+		}
+
+		// Delete the snapshot storage.
+		err = os.RemoveAll(GetVolumeSnapshotDir(d.name, vol.volType, vol.name))
+		if err != nil && !os.IsNotExist(err) {
+			return err
+		}
+	}
+
+	// For VMs, also delete the filesystem dataset.
+	if d.checkVMBlock(vol) {
+		fsVol := NewVolume(d, d.name, vol.volType, ContentTypeFS, vol.name, vol.config, vol.poolConfig)
+		err := d.DeleteVolume(fsVol, op)
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+// HasVolume indicates whether a specific volume exists on the storage pool.
+func (d *zfs) HasVolume(vol Volume) bool {
+	// Check if the dataset exists.
+	return d.checkDataset(d.dataset(vol, false))
+}
+
+// ValidateVolume validates the supplied volume config.
+func (d *zfs) ValidateVolume(vol Volume, removeUnknownKeys bool) error {
+	rules := map[string]func(value string) error{
+		"zfs.remove_snapshots": shared.IsBool,
+		"zfs.use_refquota":     shared.IsBool,
+	}
+
+	return d.validateVolume(vol, rules, removeUnknownKeys)
+}
+
+// UpdateVolume applies config changes to the volume.
+func (d *zfs) UpdateVolume(vol Volume, changedConfig map[string]string) error {
+	for k, v := range changedConfig {
+		if k == "size" {
+			return d.SetVolumeQuota(vol, v, nil)
+		}
+
+		if k == "zfs.use_refquota" {
+			// Get current value.
+			cur := vol.ExpandedConfig("zfs.use_refquota")
+
+			// Get current size.
+			size := changedConfig["size"]
+			if size == "" {
+				size = vol.ExpandedConfig("size")
+			}
+
+			// Skip if no current quota.
+			if size == "" {
+				continue
+			}
+
+			// Skip if no change in effective value.
+			if shared.IsTrue(v) == shared.IsTrue(cur) {
+				continue
+			}
+
+			// Set new quota by temporarily modifying the volume config.
+			vol.config["zfs.use_refquota"] = v
+			err := d.SetVolumeQuota(vol, size, nil)
+			vol.config["zfs.use_refquota"] = cur
+			if err != nil {
+				return err
+			}
+
+			// Unset old quota.
+			err = d.SetVolumeQuota(vol, "", nil)
+			if err != nil {
+				return err
+			}
+		}
+	}
+
+	return nil
+}
+
+// GetVolumeUsage returns the disk space used by the volume.
+func (d *zfs) GetVolumeUsage(vol Volume) (int64, error) {
+	// Determine what key to use.
+	key := "used"
+	if shared.IsTrue(vol.ExpandedConfig("zfs.use_refquota")) {
+		key = "referenced"
+	}
+
+	// Shortcut for refquota filesystems.
+	if key == "referenced" && vol.contentType == ContentTypeFS && shared.IsMountPoint(vol.MountPath()) {
+		var stat unix.Statfs_t
+		err := unix.Statfs(vol.MountPath(), &stat)
+		if err != nil {
+			return -1, err
+		}
+
+		return int64(stat.Blocks-stat.Bfree) * int64(stat.Bsize), nil
+	}
+
+	// Get the current value.
+	value, err := d.getDatasetProperty(d.dataset(vol, false), key)
+	if err != nil {
+		return -1, err
+	}
+
+	// Convert to int.
+	valueInt, err := strconv.ParseInt(value, 10, 64)
+	if err != nil {
+		return -1, err
+	}
+
+	return valueInt, nil
+}
+
+func (d *zfs) SetVolumeQuota(vol Volume, size string, op *operations.Operation) error {
+	if size == "" {
+		size = "0"
+	}
+
+	// Convert to bytes.
+	sizeBytes, err := units.ParseByteSizeString(size)
+	if err != nil {
+		return err
+	}
+
+	// Handle volume datasets.
+	if vol.contentType == ContentTypeBlock {
+		sizeBytes = (sizeBytes / 8192) * 8192
+
+		err := d.setDatasetProperties(d.dataset(vol, false), fmt.Sprintf("volsize=%d", sizeBytes))
+		if err != nil {
+			return err
+		}
+
+		return nil
+	}
+
+	// Handle filesystem datasets.
+	key := "quota"
+	if shared.IsTrue(vol.ExpandedConfig("zfs.use_refquota")) {
+		key = "refquota"
+	}
+
+	value := fmt.Sprintf("%d", sizeBytes)
+	if sizeBytes == 0 {
+		value = "none"
+	}
+
+	err = d.setDatasetProperties(d.dataset(vol, false), fmt.Sprintf("%s=%s", key, value))
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
+// GetVolumeDiskPath returns the location of a root disk block device.
+func (d *zfs) GetVolumeDiskPath(vol Volume) (string, error) {
+	// Shortcut for udev.
+	if tryExists(filepath.Join("/dev/zvol", d.dataset(vol, false))) {
+		return filepath.Join("/dev/zvol", d.dataset(vol, false)), nil
+	}
+
+	// Locate zvol_id.
+	zvolid := "/lib/udev/zvol_id"
+	if !shared.PathExists(zvolid) {
+		var err error
+
+		zvolid, err = exec.LookPath("zvol_id")
+		if err != nil {
+			return "", err
+		}
+	}
+
+	// List all the device nodes.
+	entries, err := ioutil.ReadDir("/dev")
+	if err != nil {
+		return "", err
+	}
+
+	for _, entry := range entries {
+		entryName := entry.Name()
+
+		// Ignore non-zvol devices.
+		if !strings.HasPrefix(entryName, "zd") {
+			continue
+		}
+
+		if strings.Contains(entryName, "p") {
+			continue
+		}
+
+		// Resolve the dataset path.
+		entryPath := filepath.Join("/dev", entryName)
+		output, err := shared.RunCommand(zvolid, entryPath)
+		if err != nil {
+			continue
+		}
+
+		if strings.TrimSpace(output) == d.dataset(vol, false) {
+			return entryPath, nil
+		}
+	}
+
+	return "", fmt.Errorf("Could not locate a zvol for %s", d.dataset(vol, false))
+}
+
+// MountVolume simulates mounting a volume. As dir driver doesn't have volumes to mount it returns
+// false indicating that there is no need to issue an unmount.
+func (d *zfs) MountVolume(vol Volume, op *operations.Operation) (bool, error) {
+	// For VMs, also mount the filesystem dataset.
+	if d.checkVMBlock(vol) {
+		fsVol := NewVolume(d, d.name, vol.volType, ContentTypeFS, vol.name, vol.config, vol.poolConfig)
+		_, err := d.MountVolume(fsVol, op)
+		if err != nil {
+			return false, err
+		}
+	}
+
+	// For block devices, we make them appear.
+	if vol.contentType == ContentTypeBlock {
+		err := d.setDatasetProperties(d.dataset(vol, false), "volmode=dev")
+		if err != nil {
+			return false, err
+		}
+
+		// Wait half a second to give udev a chance to kick in.
+		time.Sleep(500 * time.Millisecond)
+
+		return false, nil
+	}
+
+	// Check if not already mounted.
+	if shared.IsMountPoint(vol.MountPath()) {
+		return false, nil
+	}
+
+	// Mount the dataset.
+	_, err := shared.RunCommand("zfs", "mount", d.dataset(vol, false))
+	if err != nil {
+		return false, err
+	}
+
+	return true, nil
+}
+
+// UnmountVolume simulates unmounting a volume. As dir driver doesn't have volumes to unmount it
+// returns false indicating the volume was already unmounted.
+func (d *zfs) UnmountVolume(vol Volume, op *operations.Operation) (bool, error) {
+	// For VMs, also mount the filesystem dataset.
+	if d.checkVMBlock(vol) {
+		fsVol := NewVolume(d, d.name, vol.volType, ContentTypeFS, vol.name, vol.config, vol.poolConfig)
+		_, err := d.UnmountVolume(fsVol, op)
+		if err != nil {
+			return false, err
+		}
+	}
+
+	// For block devices, we make them disappear.
+	if vol.contentType == ContentTypeBlock {
+		err := d.setDatasetProperties(d.dataset(vol, false), "volmode=none")
+		if err != nil {
+			return false, err
+		}
+
+		return false, nil
+	}
+
+	// Check if still mounted.
+	if !shared.IsMountPoint(vol.MountPath()) {
+		return false, nil
+	}
+
+	// Mount the dataset.
+	_, err := shared.RunCommand("zfs", "unmount", d.dataset(vol, false))
+	if err != nil {
+		return false, err
+	}
+
+	return true, nil
+}
+
+// RenameVolume renames a volume and its snapshots.
+func (d *zfs) RenameVolume(vol Volume, newVolName string, op *operations.Operation) error {
+	newVol := NewVolume(d, d.name, vol.volType, vol.contentType, newVolName, vol.config, vol.poolConfig)
+
+	// Revert handling.
+	revert := revert.New()
+	defer revert.Fail()
+
+	// First rename the VFS paths.
+	err := d.vfsRenameVolume(vol, newVolName, op)
+	if err != nil {
+		return err
+	}
+
+	revert.Add(func() {
+		d.vfsRenameVolume(newVol, vol.name, op)
+	})
+
+	// Rename the ZFS datasets.
+	_, err = shared.RunCommand("zfs", "rename", d.dataset(vol, false), d.dataset(newVol, false))
+	if err != nil {
+		return err
+	}
+
+	revert.Add(func() {
+		shared.RunCommand("zfs", "rename", d.dataset(newVol, false), d.dataset(vol, false))
+	})
+
+	// Update the mountpoints.
+	err = d.setDatasetProperties(d.dataset(newVol, false), fmt.Sprintf("mountpoint=%s", newVol.MountPath()))
+	if err != nil {
+		return err
+	}
+
+	// For VM images, create a filesystem volume too.
+	if d.checkVMBlock(vol) {
+		fsVol := NewVolume(d, d.name, vol.volType, ContentTypeFS, vol.name, vol.config, vol.poolConfig)
+		err := d.RenameVolume(fsVol, newVolName, op)
+		if err != nil {
+			return err
+		}
+
+		revert.Add(func() {
+			newFsVol := NewVolume(d, d.name, newVol.volType, ContentTypeFS, newVol.name, newVol.config, newVol.poolConfig)
+			d.RenameVolume(newFsVol, vol.name, op)
+		})
+	}
+
+	// All done.
+	revert.Success()
+
+	return nil
+}
+
+// MigrateVolume sends a volume for migration.
+func (d *zfs) MigrateVolume(vol Volume, conn io.ReadWriteCloser, volSrcArgs *migration.VolumeSourceArgs, op *operations.Operation) error {
+	if vol.contentType != ContentTypeFS {
+		return ErrNotSupported
+	}
+
+	// Handle simple rsync through generic.
+	if volSrcArgs.MigrationType.FSType == migration.MigrationFSType_RSYNC {
+		return d.vfsMigrateVolume(vol, conn, volSrcArgs, op)
+	} else if volSrcArgs.MigrationType.FSType != migration.MigrationFSType_ZFS {
+		return ErrNotSupported
+	}
+
+	// Handle zfs send/receive migration.
+	var finalParent string
+	if !volSrcArgs.FinalSync {
+		// Transfer the snapshots first.
+		for i, snapName := range volSrcArgs.Snapshots {
+			snapshot, _ := vol.NewSnapshot(snapName)
+
+			// Figure out parent and current subvolumes.
+			parent := ""
+			if i > 0 {
+				oldSnapshot, _ := vol.NewSnapshot(volSrcArgs.Snapshots[i-1])
+				parent = d.dataset(oldSnapshot, false)
+			}
+
+			// Setup progress tracking.
+			var wrapper *ioprogress.ProgressTracker
+			if volSrcArgs.TrackProgress {
+				wrapper = migration.ProgressTracker(op, "fs_progress", snapshot.name)
+			}
+
+			// Send snapshot to recipient (ensure local snapshot volume is mounted if needed).
+			err := d.sendDataset(d.dataset(snapshot, false), parent, volSrcArgs, conn, wrapper)
+			if err != nil {
+				return err
+			}
+
+			finalParent = d.dataset(snapshot, false)
+		}
+	}
+
+	// Setup progress tracking.
+	var wrapper *ioprogress.ProgressTracker
+	if volSrcArgs.TrackProgress {
+		wrapper = migration.ProgressTracker(op, "fs_progress", vol.name)
+	}
+
+	srcSnapshot := d.dataset(vol, false)
+	if !vol.IsSnapshot() {
+		// Create a temporary read-only snapshot.
+		srcSnapshot = fmt.Sprintf("%s at migration-%s", d.dataset(vol, false), uuid.NewRandom().String())
+		_, err := shared.RunCommand("zfs", "snapshot", srcSnapshot)
+		if err != nil {
+			return err
+		}
+
+		if volSrcArgs.MultiSync {
+			if volSrcArgs.FinalSync {
+				finalParent = volSrcArgs.Data.(string)
+				defer shared.RunCommand("zfs", "destroy", finalParent)
+				defer shared.RunCommand("zfs", "destroy", srcSnapshot)
+			} else {
+				volSrcArgs.Data = srcSnapshot
+			}
+		} else {
+			defer shared.RunCommand("zfs", "destroy", srcSnapshot)
+		}
+	}
+
+	// Send the volume itself.
+	err := d.sendDataset(srcSnapshot, finalParent, volSrcArgs, conn, wrapper)
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
+// BackupVolume creates an exported version of a volume.
+func (d *zfs) BackupVolume(vol Volume, targetPath string, optimized bool, snapshots bool, op *operations.Operation) error {
+	// Handle the non-optimized tarballs through the generic packer.
+	if !optimized {
+		return d.vfsBackupVolume(vol, targetPath, snapshots, op)
+	}
+
+	// Handle the optimized tarballs.
+	sendToFile := func(path string, parent string, file string) error {
+		// Prepare zfs send arguments.
+		args := []string{"send"}
+		if parent != "" {
+			args = append(args, "-i", parent)
+		}
+		args = append(args, path)
+
+		// Create the file.
+		fd, err := os.OpenFile(file, os.O_RDWR|os.O_CREATE, 0644)
+		if err != nil {
+			return err
+		}
+		defer fd.Close()
+
+		// Write the subvolume to the file.
+		err = shared.RunCommandWithFds(nil, fd, "zfs", args...)
+		if err != nil {
+			return err
+		}
+
+		return nil
+	}
+
+	// Handle snapshots.
+	finalParent := ""
+	if snapshots {
+		snapshotsPath := fmt.Sprintf("%s/snapshots", targetPath)
+
+		// Retrieve the snapshots.
+		volSnapshots, err := d.VolumeSnapshots(vol, op)
+		if err != nil {
+			return err
+		}
+
+		// Create the snapshot path.
+		if len(volSnapshots) > 0 {
+			err = os.MkdirAll(snapshotsPath, 0711)
+			if err != nil {
+				return err
+			}
+		}
+
+		for i, snapName := range volSnapshots {
+			snapshot, _ := vol.NewSnapshot(snapName)
+
+			// Figure out parent and current subvolumes.
+			parent := ""
+			if i > 0 {
+				oldSnapshot, _ := vol.NewSnapshot(volSnapshots[i-1])
+				parent = d.dataset(oldSnapshot, false)
+			}
+
+			// Make a binary zfs backup.
+			target := fmt.Sprintf("%s/%s.bin", snapshotsPath, snapName)
+
+			err := sendToFile(d.dataset(snapshot, false), parent, target)
+			if err != nil {
+				return err
+			}
+
+			finalParent = d.dataset(snapshot, false)
+		}
+	}
+
+	// Create a temporary read-only snapshot.
+	srcSnapshot := fmt.Sprintf("%s at backup-%s", d.dataset(vol, false), uuid.NewRandom().String())
+	_, err := shared.RunCommand("zfs", "snapshot", srcSnapshot)
+	if err != nil {
+		return err
+	}
+	defer shared.RunCommand("zfs", "destroy", srcSnapshot)
+
+	// Dump the container to a file.
+	fsDump := fmt.Sprintf("%s/container.bin", targetPath)
+	err = sendToFile(srcSnapshot, finalParent, fsDump)
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
+// CreateVolumeSnapshot creates a snapshot of a volume.
+func (d *zfs) CreateVolumeSnapshot(vol Volume, op *operations.Operation) error {
+	parentName, _, _ := shared.InstanceGetParentAndSnapshotName(vol.name)
+
+	// Revert handling.
+	revert := revert.New()
+	defer revert.Fail()
+
+	// Create the parent directory.
+	err := createParentSnapshotDirIfMissing(d.name, vol.volType, parentName)
+	if err != nil {
+		return err
+	}
+
+	// Create snapshot directory.
+	err = vol.EnsureMountPath()
+	if err != nil {
+		return err
+	}
+
+	// Make the snapshot.
+	_, err = shared.RunCommand("zfs", "snapshot", d.dataset(vol, false))
+	if err != nil {
+		return err
+	}
+
+	revert.Add(func() { d.DeleteVolumeSnapshot(vol, op) })
+
+	// For VM images, create a filesystem volume too.
+	if d.checkVMBlock(vol) {
+		fsVol := NewVolume(d, d.name, vol.volType, ContentTypeFS, vol.name, vol.config, vol.poolConfig)
+		err := d.CreateVolumeSnapshot(fsVol, op)
+		if err != nil {
+			return err
+		}
+
+		revert.Add(func() { d.DeleteVolumeSnapshot(fsVol, op) })
+	}
+
+	// All done.
+	revert.Success()
+
+	return nil
+}
+
+// DeleteVolumeSnapshot removes a snapshot from the storage device.
+func (d *zfs) DeleteVolumeSnapshot(vol Volume, op *operations.Operation) error {
+	parentName, _, _ := shared.InstanceGetParentAndSnapshotName(vol.name)
+
+	// Handle clones.
+	clones, err := d.getClones(d.dataset(vol, false))
+	if err != nil {
+		return err
+	}
+
+	if len(clones) > 0 {
+		// Move to the deleted path.
+		_, err := shared.RunCommand("zfs", "rename", d.dataset(vol, false), d.dataset(vol, true))
+		if err != nil {
+			return err
+		}
+	} else {
+		// Delete the snapshot.
+		_, err := shared.RunCommand("zfs", "destroy", d.dataset(vol, false))
+		if err != nil {
+			return err
+		}
+	}
+
+	// Delete the mountpoint.
+	err = os.Remove(vol.MountPath())
+	if err != nil && !os.IsNotExist(err) {
+		return err
+	}
+
+	// Remove the parent snapshot directory if this is the last snapshot being removed.
+	err = deleteParentSnapshotDirIfEmpty(d.name, vol.volType, parentName)
+	if err != nil {
+		return err
+	}
+
+	// For VM images, create a filesystem volume too.
+	if d.checkVMBlock(vol) {
+		fsVol := NewVolume(d, d.name, vol.volType, ContentTypeFS, vol.name, vol.config, vol.poolConfig)
+		err := d.DeleteVolumeSnapshot(fsVol, op)
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+// MountVolumeSnapshot simulates mounting a volume snapshot. As dir driver doesn't have volumes to
+// mount it returns false indicating that there is no need to issue an unmount.
+func (d *zfs) MountVolumeSnapshot(vol Volume, op *operations.Operation) (bool, error) {
+	// Ignore block devices for now.
+	if vol.contentType == ContentTypeBlock {
+		return false, ErrNotSupported
+	}
+
+	// Check if already mounted.
+	if shared.IsMountPoint(vol.MountPath()) {
+		return false, nil
+	}
+
+	// Mount the snapshot directly (not possible through tools).
+	err := TryMount(d.dataset(vol, false), vol.MountPath(), "zfs", 0, "")
+	if err != nil {
+		return false, err
+	}
+
+	return true, nil
+}
+
+// UnmountVolume simulates unmounting a volume snapshot. As dir driver doesn't have volumes to
+// unmount it returns false indicating the volume was already unmounted.
+func (d *zfs) UnmountVolumeSnapshot(vol Volume, op *operations.Operation) (bool, error) {
+	// Ignore block devices for now.
+	if vol.contentType == ContentTypeBlock {
+		return false, ErrNotSupported
+	}
+
+	return forceUnmount(vol.MountPath())
+}
+
+// VolumeSnapshots returns a list of snapshots for the volume.
+func (d *zfs) VolumeSnapshots(vol Volume, op *operations.Operation) ([]string, error) {
+	// Get all children datasets.
+	entries, err := d.getDatasets(d.dataset(vol, false))
+	if err != nil {
+		return nil, err
+	}
+
+	// Filter only the snapshots.
+	snapshots := []string{}
+	for _, entry := range entries {
+		if strings.HasPrefix(entry, "@snapshot-") {
+			snapshots = append(snapshots, strings.TrimPrefix(entry, "@snapshot-"))
+		}
+	}
+
+	return snapshots, nil
+}
+
+// RestoreVolume restores a volume from a snapshot.
+func (d *zfs) RestoreVolume(vol Volume, snapshotName string, op *operations.Operation) error {
+	snapVol := NewVolume(d, d.name, vol.volType, vol.contentType, fmt.Sprintf("%s/%s", vol.name, snapshotName), vol.config, vol.poolConfig)
+
+	// Get the list of snapshots.
+	entries, err := d.getDatasets(d.dataset(vol, false))
+	if err != nil {
+		return err
+	}
+
+	// Check if more recent snapshots exist.
+	idx := -1
+	snapshots := []string{}
+	for i, entry := range entries {
+		if entry == fmt.Sprintf("@snapshot-%s", snapshotName) {
+			// Located the current snapshot.
+			idx = i
+			continue
+		} else if idx < 0 {
+			// Skip any previous snapshot.
+			continue
+		}
+
+		if strings.HasPrefix(entry, "@snapshot-") {
+			// Located a normal snapshot following ours.
+			snapshots = append(snapshots, strings.TrimPrefix(entry, "@snapshot-"))
+			continue
+		}
+
+		if strings.HasPrefix(entry, "@") {
+			// Located an internal snapshot.
+			return fmt.Errorf("Snapshot '%s' cannot be restored due to subsequent internal snapshot(s) (from a copy)", snapshotName)
+		}
+	}
+
+	// Check if snapshot removal is allowed.
+	if len(snapshots) > 0 {
+		if !shared.IsTrue(vol.ExpandedConfig("zfs.remove_snapshots")) {
+			return fmt.Errorf("Snapshot '%s' cannot be restored due to subsequent snapshot(s). Set zfs.remove_snapshots to override", snapshotName)
+		}
+
+		// Setup custom error to tell the backend what to delete.
+		err := ErrDeleteSnapshots{}
+		err.Snapshots = snapshots
+		return err
+	}
+
+	// Restore the snapshot.
+	_, err = shared.RunCommand("zfs", "rollback", d.dataset(snapVol, false))
+	if err != nil {
+		return err
+	}
+
+	// For VM images, restore the associated filesystem dataset too.
+	if d.checkVMBlock(vol) {
+		fsVol := NewVolume(d, d.name, vol.volType, ContentTypeFS, vol.name, vol.config, vol.poolConfig)
+		err := d.RestoreVolume(fsVol, snapshotName, op)
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+// RenameVolumeSnapshot renames a volume snapshot.
+func (d *zfs) RenameVolumeSnapshot(vol Volume, newSnapshotName string, op *operations.Operation) error {
+	parentName, _, _ := shared.InstanceGetParentAndSnapshotName(vol.name)
+	newVol := NewVolume(d, d.name, vol.volType, vol.contentType, fmt.Sprintf("%s/%s", parentName, newSnapshotName), vol.config, vol.poolConfig)
+
+	// Revert handling.
+	revert := revert.New()
+	defer revert.Fail()
+
+	// First rename the VFS paths.
+	err := d.vfsRenameVolumeSnapshot(vol, newSnapshotName, op)
+	if err != nil {
+		return err
+	}
+
+	revert.Add(func() {
+		d.vfsRenameVolumeSnapshot(newVol, vol.name, op)
+	})
+
+	// Rename the ZFS datasets.
+	_, err = shared.RunCommand("zfs", "rename", d.dataset(vol, false), d.dataset(newVol, false))
+	if err != nil {
+		return err
+	}
+
+	revert.Add(func() {
+		shared.RunCommand("zfs", "rename", d.dataset(newVol, false), d.dataset(vol, false))
+	})
+
+	// For VM images, create a filesystem volume too.
+	if d.checkVMBlock(vol) {
+		fsVol := NewVolume(d, d.name, vol.volType, ContentTypeFS, vol.name, vol.config, vol.poolConfig)
+		err := d.RenameVolumeSnapshot(fsVol, newSnapshotName, op)
+		if err != nil {
+			return err
+		}
+
+		revert.Add(func() {
+			newFsVol := NewVolume(d, d.name, newVol.volType, ContentTypeFS, newVol.name, newVol.config, newVol.poolConfig)
+			d.RenameVolumeSnapshot(newFsVol, vol.name, op)
+		})
+	}
+
+	// All done.
+	revert.Success()
+
+	return nil
+}
diff --git a/lxd/storage/drivers/load.go b/lxd/storage/drivers/load.go
index c9374157af..dcb8086659 100644
--- a/lxd/storage/drivers/load.go
+++ b/lxd/storage/drivers/load.go
@@ -9,6 +9,7 @@ var drivers = map[string]func() driver{
 	"dir":    func() driver { return &dir{} },
 	"cephfs": func() driver { return &cephfs{} },
 	"btrfs":  func() driver { return &btrfs{} },
+	"zfs":    func() driver { return &zfs{} },
 }
 
 // Load returns a Driver for an existing low-level storage pool.

From bd409abc5710c918aa525040a234e4aeed3bbb1e Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?St=C3=A9phane=20Graber?= <stgraber at ubuntu.com>
Date: Thu, 9 Jan 2020 00:20:42 -0500
Subject: [PATCH 7/7] tests: Add zfs to list of new drivers
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Signed-off-by: Stéphane Graber <stgraber at ubuntu.com>
---
 test/includes/storage.sh | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/test/includes/storage.sh b/test/includes/storage.sh
index c41901f01a..9672983781 100644
--- a/test/includes/storage.sh
+++ b/test/includes/storage.sh
@@ -130,8 +130,8 @@ umount_loops() {
 }
 
 storage_compatible() {
-    if [ "${1}" = "cephfs" ] || [ "${1}" = "dir" ] || [ "${1}" = "btrfs" ]; then
-        if [ "${2}" = "cephfs" ] || [ "${2}" = "dir" ] || [ "${2}" = "btrfs" ]; then
+    if [ "${1}" = "cephfs" ] || [ "${1}" = "dir" ] || [ "${1}" = "btrfs" ] || [ "${1}" = "zfs" ]; then
+        if [ "${2}" = "cephfs" ] || [ "${2}" = "dir" ] || [ "${2}" = "btrfs" ] || [ "${1}" = "zfs" ]; then
             true
             return
         else


More information about the lxc-devel mailing list