[lxc-devel] [lxd/master] Fix storage volume migration

monstermunchkin on Github lxc-bot at linuxcontainers.org
Thu Apr 11 16:20:05 UTC 2019


A non-text attachment was scrubbed...
Name: not available
Type: text/x-mailbox
Size: 318 bytes
Desc: not available
URL: <http://lists.linuxcontainers.org/pipermail/lxc-devel/attachments/20190411/33ce1920/attachment.bin>
-------------- next part --------------
From e0786d243c73f5d2423584b7f0a707f2b6002dc3 Mon Sep 17 00:00:00 2001
From: Thomas Hipp <thomas.hipp at canonical.com>
Date: Thu, 11 Apr 2019 18:16:23 +0200
Subject: [PATCH 1/6] shared: Add storage_api_remote_volume_snapshots extension

Signed-off-by: Thomas Hipp <thomas.hipp at canonical.com>
---
 shared/version/api.go | 1 +
 1 file changed, 1 insertion(+)

diff --git a/shared/version/api.go b/shared/version/api.go
index 1b398634be..cae5a863c9 100644
--- a/shared/version/api.go
+++ b/shared/version/api.go
@@ -146,6 +146,7 @@ var APIExtensions = []string{
 	"kernel_features",
 	"id_map_current",
 	"event_location",
+	"storage_api_remote_volume_snapshots",
 }
 
 // APIExtensionsCount returns the number of available API extensions.

From 23e04ecaa858ee7a7f328337c5a0d3c7f66ed605 Mon Sep 17 00:00:00 2001
From: Thomas Hipp <thomas.hipp at canonical.com>
Date: Thu, 11 Apr 2019 18:16:05 +0200
Subject: [PATCH 2/6] doc: Add storage_api_remote_volume_snapshots

Signed-off-by: Thomas Hipp <thomas.hipp at canonical.com>
---
 doc/api-extensions.md | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/doc/api-extensions.md b/doc/api-extensions.md
index 03fea2c807..89dae5a56a 100644
--- a/doc/api-extensions.md
+++ b/doc/api-extensions.md
@@ -725,3 +725,6 @@ changed but the kernel map is (e.g. shiftfs).
 
 ## event\_location
 Expose the location of the generation of API events.
+
+## storage\_api\_remote\_volume\_snapshots
+This allows migrating storage volumes including their snapshots.
\ No newline at end of file

From 1cf0674d57a4482a507eb7fd2bba17408cba5006 Mon Sep 17 00:00:00 2001
From: Thomas Hipp <thomas.hipp at canonical.com>
Date: Thu, 11 Apr 2019 09:29:53 +0200
Subject: [PATCH 3/6] shared/api: Extend StorageVolumePost

Add VolumeOnly flag which is used for migrating storage volumes.

Signed-off-by: Thomas Hipp <thomas.hipp at canonical.com>
---
 shared/api/storage_pool_volume.go | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/shared/api/storage_pool_volume.go b/shared/api/storage_pool_volume.go
index 51954e10ed..db916ff0fa 100644
--- a/shared/api/storage_pool_volume.go
+++ b/shared/api/storage_pool_volume.go
@@ -27,6 +27,9 @@ type StorageVolumePost struct {
 
 	// API extension: storage_api_remote_volume_handling
 	Target *StorageVolumePostTarget `json:"target" yaml:"target"`
+
+	// API extension: storage_api_remote_volume_snapshots
+	VolumeOnly bool `json:"volume_only" yaml:"volume_only"`
 }
 
 // StorageVolumePostTarget represents the migration target host and operation

From 8a8dde01b189ee342f2285feef23e3c9d7f85baa Mon Sep 17 00:00:00 2001
From: Thomas Hipp <thomas.hipp at canonical.com>
Date: Thu, 11 Apr 2019 09:33:08 +0200
Subject: [PATCH 4/6] lxd: Fix volume migration with snapshots

This fixes the storage volume migration by not ignoring volume
snapshots.

Signed-off-by: Thomas Hipp <thomas.hipp at canonical.com>
---
 lxd/migrate.go                 |   9 +-
 lxd/migrate_storage_volumes.go | 184 ++++++++++++++++++++++++++++-----
 lxd/storage_btrfs.go           |   2 +-
 lxd/storage_ceph.go            |   6 --
 lxd/storage_ceph_migration.go  |   6 ++
 lxd/storage_migration.go       |  70 ++++++++++++-
 lxd/storage_volumes.go         |   9 +-
 lxd/storage_volumes_utils.go   |  63 ++++++-----
 lxd/storage_zfs.go             |   2 +-
 9 files changed, 282 insertions(+), 69 deletions(-)

diff --git a/lxd/migrate.go b/lxd/migrate.go
index ee20aab7f9..744810220f 100644
--- a/lxd/migrate.go
+++ b/lxd/migrate.go
@@ -41,7 +41,8 @@ type migrationFields struct {
 	container     container
 
 	// storage specific fields
-	storage storage
+	storage    storage
+	volumeOnly bool
 }
 
 func (c *migrationFields) send(m proto.Message) error {
@@ -269,7 +270,8 @@ type MigrationSinkArgs struct {
 	Snapshots     []*migration.Snapshot
 
 	// Storage specific fields
-	Storage storage
+	Storage    storage
+	VolumeOnly bool
 
 	// Transport specific fields
 	RsyncFeatures []string
@@ -283,6 +285,9 @@ type MigrationSourceArgs struct {
 	// Transport specific fields
 	RsyncFeatures []string
 	ZfsFeatures   []string
+
+	// Volume specific fields
+	VolumeOnly bool
 }
 
 func (c *migrationSink) connectWithSecret(secret string) (*websocket.Conn, error) {
diff --git a/lxd/migrate_storage_volumes.go b/lxd/migrate_storage_volumes.go
index 96e26c429c..bb8748e420 100644
--- a/lxd/migrate_storage_volumes.go
+++ b/lxd/migrate_storage_volumes.go
@@ -3,6 +3,7 @@ package main
 import (
 	"fmt"
 
+	"github.com/golang/protobuf/proto"
 	"github.com/gorilla/websocket"
 
 	"github.com/lxc/lxd/lxd/migration"
@@ -11,8 +12,9 @@ import (
 	"github.com/lxc/lxd/shared/logger"
 )
 
-func NewStorageMigrationSource(storage storage) (*migrationSourceWs, error) {
+func NewStorageMigrationSource(storage storage, volumeOnly bool) (*migrationSourceWs, error) {
 	ret := migrationSourceWs{migrationFields{storage: storage}, make(chan bool, 1)}
+	ret.volumeOnly = volumeOnly
 
 	var err error
 	ret.controlSecret, err = shared.RandomCryptoString()
@@ -44,12 +46,43 @@ func (s *migrationSourceWs) DoStorage(migrateOp *operation) error {
 		defer s.storage.StoragePoolVolumeUmount()
 	}
 
+	snapshots := []*migration.Snapshot{}
+	snapshotNames := []string{}
+
+	// Only send snapshots when requested.
+	if !s.volumeOnly {
+		state := s.storage.GetState()
+		pool := s.storage.GetStoragePool()
+		volume := s.storage.GetStoragePoolVolume()
+
+		var err error
+
+		snaps, err := storagePoolVolumeSnapshotsGet(state, pool.Name, volume.Name, storagePoolVolumeTypeCustom)
+		if err == nil {
+			poolID, err := state.Cluster.StoragePoolGetID(pool.Name)
+			if err == nil {
+				for _, name := range snaps {
+					_, snapVolume, err := state.Cluster.StoragePoolNodeVolumeGetType(name, storagePoolVolumeTypeCustom, poolID)
+					if err != nil {
+						continue
+					}
+
+					snapshots = append(snapshots, volumeSnapshotToProtobuf(snapVolume))
+					snapshotNames = append(snapshotNames, shared.ExtractSnapshotName(name))
+				}
+			}
+
+		}
+	}
+
 	// The protocol says we have to send a header no matter what, so let's
 	// do that, but then immediately send an error.
 	myType := s.storage.MigrationType()
 	hasFeature := true
 	header := migration.MigrationHeader{
-		Fs: &myType,
+		Fs:            &myType,
+		SnapshotNames: snapshotNames,
+		Snapshots:     snapshots,
 		RsyncFeatures: &migration.RsyncFeatures{
 			Xattrs:        &hasFeature,
 			Delete:        &hasFeature,
@@ -93,6 +126,7 @@ func (s *migrationSourceWs) DoStorage(migrateOp *operation) error {
 	sourceArgs := MigrationSourceArgs{
 		RsyncFeatures: rsyncFeatures,
 		ZfsFeatures:   zfsFeatures,
+		VolumeOnly:    s.volumeOnly,
 	}
 
 	driver, fsErr := s.storage.StorageMigrationSource(sourceArgs)
@@ -122,7 +156,7 @@ func (s *migrationSourceWs) DoStorage(migrateOp *operation) error {
 		return err
 	}
 
-	err = driver.SendStorageVolume(s.fsConn, migrateOp, bwlimit, s.storage)
+	err = driver.SendStorageVolume(s.fsConn, migrateOp, bwlimit, s.storage, s.volumeOnly)
 	if err != nil {
 		logger.Errorf("Failed to send storage volume")
 		return abort(err)
@@ -147,8 +181,8 @@ func (s *migrationSourceWs) DoStorage(migrateOp *operation) error {
 
 func NewStorageMigrationSink(args *MigrationSinkArgs) (*migrationSink, error) {
 	sink := migrationSink{
-		src:    migrationFields{storage: args.Storage},
-		dest:   migrationFields{storage: args.Storage},
+		src:    migrationFields{storage: args.Storage, volumeOnly: args.VolumeOnly},
+		dest:   migrationFields{storage: args.Storage, volumeOnly: args.VolumeOnly},
 		url:    args.Url,
 		dialer: args.Dialer,
 		push:   args.Push,
@@ -245,7 +279,9 @@ func (c *migrationSink) DoStorage(migrateOp *operation) error {
 	myType := c.src.storage.MigrationType()
 	hasFeature := true
 	resp := migration.MigrationHeader{
-		Fs: &myType,
+		Fs:            &myType,
+		Snapshots:     header.Snapshots,
+		SnapshotNames: header.SnapshotNames,
 		RsyncFeatures: &migration.RsyncFeatures{
 			Xattrs:        &hasFeature,
 			Delete:        &hasFeature,
@@ -271,11 +307,6 @@ func (c *migrationSink) DoStorage(migrateOp *operation) error {
 	// Handle rsync options
 	rsyncFeatures := header.GetRsyncFeaturesSlice()
 
-	args := MigrationSinkArgs{
-		Storage:       c.dest.storage,
-		RsyncFeatures: rsyncFeatures,
-	}
-
 	err = sender(&resp)
 	if err != nil {
 		logger.Errorf("Failed to send storage volume migration header")
@@ -283,26 +314,131 @@ func (c *migrationSink) DoStorage(migrateOp *operation) error {
 		return err
 	}
 
-	var fsConn *websocket.Conn
+	restore := make(chan error)
+
+	go func(c *migrationSink) {
+		/* We do the fs receive in parallel so we don't have to reason
+		 * about when to receive what. The sending side is smart enough
+		 * to send the filesystem bits that it can before it seizes the
+		 * container to start checkpointing, so the total transfer time
+		 * will be minimized even if we're dumb here.
+		 */
+		fsTransfer := make(chan error)
+
+		go func() {
+			var fsConn *websocket.Conn
+			if c.push {
+				fsConn = c.dest.fsConn
+			} else {
+				fsConn = c.src.fsConn
+			}
+
+			args := MigrationSinkArgs{
+				Storage:       c.dest.storage,
+				RsyncFeatures: rsyncFeatures,
+				Snapshots:     header.Snapshots,
+				VolumeOnly:    c.src.volumeOnly,
+			}
+
+			err = mySink(fsConn, migrateOp, args)
+			if err != nil {
+				fsTransfer <- err
+				return
+			}
+
+			fsTransfer <- nil
+		}()
+
+		err := <-fsTransfer
+		if err != nil {
+			restore <- err
+			return
+		}
+
+		restore <- nil
+	}(c)
+
+	var source <-chan migration.MigrationControl
 	if c.push {
-		fsConn = c.dest.fsConn
+		source = c.dest.controlChannel()
 	} else {
-		fsConn = c.src.fsConn
-	}
-
-	err = mySink(fsConn, migrateOp, args)
-	if err != nil {
-		logger.Errorf("Failed to start storage volume migration sink")
-		controller(err)
-		return err
+		source = c.src.controlChannel()
+	}
+
+	for {
+		select {
+		case err = <-restore:
+			if err != nil {
+				disconnector()
+				return err
+			}
+
+			controller(nil)
+			logger.Debugf("Migration sink finished receiving storage volume")
+			return nil
+		case msg, ok := <-source:
+			if !ok {
+				disconnector()
+				return fmt.Errorf("Got error reading source")
+			}
+
+			if !*msg.Success {
+				disconnector()
+				return fmt.Errorf(*msg.Message)
+			} else {
+				// The source can only tell us it failed (e.g. if
+				// checkpointing failed). We have to tell the source
+				// whether or not the restore was successful.
+				logger.Debugf("Unknown message %v from source", msg)
+			}
+		}
 	}
 
-	controller(nil)
-	logger.Debugf("Migration sink finished receiving storage volume")
-	return nil
+	/*
+			var fsConn *websocket.Conn
+			if c.push {
+				fsConn = c.dest.fsConn
+			} else {
+				fsConn = c.src.fsConn
+			}
+
+			err = mySink(fsConn, migrateOp, args)
+			if err != nil {
+				logger.Errorf("Failed to start storage volume migration sink")
+				controller(err)
+				return err
+			}
+
+			controller(nil)
+		logger.Debugf("Migration sink finished receiving storage volume")
+		return nil
+	*/
 }
 
 func (s *migrationSourceWs) ConnectStorageTarget(target api.StorageVolumePostTarget) error {
 	logger.Debugf("Storage migration source is connecting")
 	return s.ConnectTarget(target.Certificate, target.Operation, target.Websockets)
 }
+
+func volumeSnapshotToProtobuf(vol *api.StorageVolume) *migration.Snapshot {
+	config := []*migration.Config{}
+	for k, v := range vol.Config {
+		kCopy := string(k)
+		vCopy := string(v)
+		config = append(config, &migration.Config{Key: &kCopy, Value: &vCopy})
+	}
+
+	snapOnlyName := shared.ExtractSnapshotName(vol.Name)
+
+	return &migration.Snapshot{
+		Name:         &snapOnlyName,
+		LocalConfig:  config,
+		Profiles:     []string{},
+		Ephemeral:    proto.Bool(false),
+		LocalDevices: []*migration.Device{},
+		Architecture: proto.Int32(0),
+		Stateful:     proto.Bool(false),
+		CreationDate: proto.Int64(0),
+		LastUsedDate: proto.Int64(0),
+	}
+}
diff --git a/lxd/storage_btrfs.go b/lxd/storage_btrfs.go
index 40be14a13e..48ced1954b 100644
--- a/lxd/storage_btrfs.go
+++ b/lxd/storage_btrfs.go
@@ -3084,7 +3084,7 @@ func (s *storageBtrfs) doCrossPoolVolumeCopy(sourcePool string, sourceName strin
 	return nil
 }
 
-func (s *btrfsMigrationSourceDriver) SendStorageVolume(conn *websocket.Conn, op *operation, bwlimit string, storage storage) error {
+func (s *btrfsMigrationSourceDriver) SendStorageVolume(conn *websocket.Conn, op *operation, bwlimit string, storage storage, volumeOnly bool) error {
 	msg := fmt.Sprintf("Function not implemented")
 	logger.Errorf(msg)
 	return fmt.Errorf(msg)
diff --git a/lxd/storage_ceph.go b/lxd/storage_ceph.go
index 23ce78067a..313c1f4c66 100644
--- a/lxd/storage_ceph.go
+++ b/lxd/storage_ceph.go
@@ -2750,12 +2750,6 @@ func (s *storageCeph) StoragePoolVolumeCopy(source *api.StorageVolumeSource) err
 	return nil
 }
 
-func (s *rbdMigrationSourceDriver) SendStorageVolume(conn *websocket.Conn, op *operation, bwlimit string, storage storage) error {
-	msg := fmt.Sprintf("Function not implemented")
-	logger.Errorf(msg)
-	return fmt.Errorf(msg)
-}
-
 func (s *storageCeph) StorageMigrationSource(args MigrationSourceArgs) (MigrationStorageSourceDriver, error) {
 	return rsyncStorageMigrationSource(args)
 }
diff --git a/lxd/storage_ceph_migration.go b/lxd/storage_ceph_migration.go
index 5f3e441888..1411f76e19 100644
--- a/lxd/storage_ceph_migration.go
+++ b/lxd/storage_ceph_migration.go
@@ -150,6 +150,12 @@ func (s *rbdMigrationSourceDriver) SendWhileRunning(conn *websocket.Conn,
 	return nil
 }
 
+func (s *rbdMigrationSourceDriver) SendStorageVolume(conn *websocket.Conn, op *operation, bwlimit string, storage storage, volumeOnly bool) error {
+	msg := fmt.Sprintf("Function not implemented")
+	logger.Errorf(msg)
+	return fmt.Errorf(msg)
+}
+
 func (s *storageCeph) MigrationType() migration.MigrationFSType {
 	return migration.MigrationFSType_RBD
 }
diff --git a/lxd/storage_migration.go b/lxd/storage_migration.go
index 81e6c6f014..387f2bef6d 100644
--- a/lxd/storage_migration.go
+++ b/lxd/storage_migration.go
@@ -10,6 +10,7 @@ import (
 	"github.com/lxc/lxd/lxd/migration"
 	"github.com/lxc/lxd/lxd/types"
 	"github.com/lxc/lxd/shared"
+	"github.com/lxc/lxd/shared/api"
 	"github.com/lxc/lxd/shared/logger"
 )
 
@@ -36,7 +37,7 @@ type MigrationStorageSourceDriver interface {
 	 */
 	Cleanup()
 
-	SendStorageVolume(conn *websocket.Conn, op *operation, bwlimit string, storage storage) error
+	SendStorageVolume(conn *websocket.Conn, op *operation, bwlimit string, storage storage, volumeOnly bool) error
 }
 
 type rsyncStorageSourceDriver struct {
@@ -49,7 +50,7 @@ func (s rsyncStorageSourceDriver) Snapshots() []container {
 	return s.snapshots
 }
 
-func (s rsyncStorageSourceDriver) SendStorageVolume(conn *websocket.Conn, op *operation, bwlimit string, storage storage) error {
+func (s rsyncStorageSourceDriver) SendStorageVolume(conn *websocket.Conn, op *operation, bwlimit string, storage storage, volumeOnly bool) error {
 	ourMount, err := storage.StoragePoolVolumeMount()
 	if err != nil {
 		return err
@@ -58,15 +59,39 @@ func (s rsyncStorageSourceDriver) SendStorageVolume(conn *websocket.Conn, op *op
 		defer storage.StoragePoolVolumeUmount()
 	}
 
+	state := storage.GetState()
 	pool := storage.GetStoragePool()
 	volume := storage.GetStoragePoolVolume()
 
+	if !volumeOnly {
+		snapshots, err := storagePoolVolumeSnapshotsGet(state, pool.Name, volume.Name, storagePoolVolumeTypeCustom)
+		if err != nil {
+			return err
+		}
+
+		for _, snap := range snapshots {
+			wrapper := StorageProgressReader(op, "fs_progress", snap)
+			path := getStoragePoolVolumeSnapshotMountPoint(pool.Name, snap)
+			path = shared.AddSlash(path)
+			logger.Debugf("Starting to send storage volume snapshot %s on storage pool %s from %s", snap, pool.Name, path)
+
+			err = RsyncSend(volume.Name, path, conn, wrapper, s.rsyncFeatures, bwlimit, state.OS.ExecPath)
+			if err != nil {
+				return err
+			}
+		}
+	}
+
 	wrapper := StorageProgressReader(op, "fs_progress", volume.Name)
-	state := storage.GetState()
 	path := getStoragePoolVolumeMountPoint(pool.Name, volume.Name)
 	path = shared.AddSlash(path)
 	logger.Debugf("Starting to send storage volume %s on storage pool %s from %s", volume.Name, pool.Name, path)
-	return RsyncSend(volume.Name, path, conn, wrapper, s.rsyncFeatures, bwlimit, state.OS.ExecPath)
+	err = RsyncSend(volume.Name, path, conn, wrapper, s.rsyncFeatures, bwlimit, state.OS.ExecPath)
+	if err != nil {
+		return err
+	}
+
+	return nil
 }
 
 func (s rsyncStorageSourceDriver) SendWhileRunning(conn *websocket.Conn, op *operation, bwlimit string, containerOnly bool) error {
@@ -215,6 +240,43 @@ func rsyncStorageMigrationSink(conn *websocket.Conn, op *operation, args Migrati
 	pool := args.Storage.GetStoragePool()
 	volume := args.Storage.GetStoragePoolVolume()
 
+	if !args.VolumeOnly {
+		for _, snap := range args.Snapshots {
+			target := api.StorageVolumeSnapshotsPost{
+				Name: fmt.Sprintf("%s/%s", volume.Name, *snap.Name),
+			}
+
+			dbArgs := &db.StorageVolumeArgs{
+				Name:        fmt.Sprintf("%s/%s", volume.Name, *snap.Name),
+				PoolName:    pool.Name,
+				TypeName:    volume.Type,
+				Snapshot:    true,
+				Config:      volume.Config,
+				Description: volume.Description,
+			}
+
+			_, err = storagePoolVolumeSnapshotDBCreateInternal(args.Storage.GetState(), dbArgs)
+			if err != nil {
+				return err
+			}
+
+			wrapper := StorageProgressWriter(op, "fs_progress", target.Name)
+			path := getStoragePoolVolumeMountPoint(pool.Name, volume.Name)
+			path = shared.AddSlash(path)
+			logger.Debugf("Starting to receive storage volume snapshot %s on storage pool %s into %s", target.Name, pool.Name, path)
+
+			err = RsyncRecv(path, conn, wrapper, args.RsyncFeatures)
+			if err != nil {
+				return err
+			}
+
+			err = args.Storage.StoragePoolVolumeSnapshotCreate(&target)
+			if err != nil {
+				return err
+			}
+		}
+	}
+
 	wrapper := StorageProgressWriter(op, "fs_progress", volume.Name)
 	path := getStoragePoolVolumeMountPoint(pool.Name, volume.Name)
 	path = shared.AddSlash(path)
diff --git a/lxd/storage_volumes.go b/lxd/storage_volumes.go
index a5d7638a06..3ed6115162 100644
--- a/lxd/storage_volumes.go
+++ b/lxd/storage_volumes.go
@@ -390,9 +390,10 @@ func doVolumeMigration(d *Daemon, poolName string, req *api.StorageVolumesPost)
 		Dialer: websocket.Dialer{
 			TLSClientConfig: config,
 			NetDial:         shared.RFC3493Dialer},
-		Secrets: req.Source.Websockets,
-		Push:    push,
-		Storage: storage,
+		Secrets:    req.Source.Websockets,
+		Push:       push,
+		Storage:    storage,
+		VolumeOnly: req.Source.VolumeOnly,
 	}
 
 	sink, err := NewStorageMigrationSink(&migrationArgs)
@@ -521,7 +522,7 @@ func storagePoolVolumeTypePost(d *Daemon, r *http.Request, volumeTypeName string
 
 	// This is a migration request so send back requested secrets
 	if req.Migration {
-		ws, err := NewStorageMigrationSource(s)
+		ws, err := NewStorageMigrationSource(s, req.VolumeOnly)
 		if err != nil {
 			return InternalError(err)
 		}
diff --git a/lxd/storage_volumes_utils.go b/lxd/storage_volumes_utils.go
index 1ffcefef17..605aac003b 100644
--- a/lxd/storage_volumes_utils.go
+++ b/lxd/storage_volumes_utils.go
@@ -622,39 +622,13 @@ func storagePoolVolumeCreateInternal(state *state.State, poolName string, vol *a
 		err = s.StoragePoolVolumeCreate()
 	} else {
 		if !vol.Source.VolumeOnly {
-			sourcePoolID, err := state.Cluster.StoragePoolGetID(vol.Source.Pool)
-			if err != nil {
-				return err
-			}
-
 			snapshots, err := storagePoolVolumeSnapshotsGet(state, vol.Source.Pool, vol.Source.Name, volumeType)
 			if err != nil {
 				return err
 			}
 
 			for _, snap := range snapshots {
-				_, snapOnlyName, _ := containerGetParentAndSnapshotName(snap)
-
-				volumeID, err := state.Cluster.StoragePoolNodeVolumeGetTypeID(snap, volumeType, sourcePoolID)
-				if err != nil {
-					return err
-				}
-
-				volumeDescription, err := state.Cluster.StorageVolumeDescriptionGet(volumeID)
-				if err != nil {
-					return err
-				}
-
-				dbArgs := &db.StorageVolumeArgs{
-					Name:        fmt.Sprintf("%s/%s", vol.Name, snapOnlyName),
-					PoolName:    poolName,
-					TypeName:    vol.Type,
-					Snapshot:    true,
-					Config:      vol.Config,
-					Description: volumeDescription,
-				}
-
-				_, err = storagePoolVolumeSnapshotDBCreateInternal(state, dbArgs)
+				_, err := storagePoolVolumeSnapshotCreateInternal(state, poolName, vol, shared.ExtractSnapshotName(snap))
 				if err != nil {
 					return err
 				}
@@ -672,6 +646,41 @@ func storagePoolVolumeCreateInternal(state *state.State, poolName string, vol *a
 	return nil
 }
 
+func storagePoolVolumeSnapshotCreateInternal(state *state.State, poolName string, vol *api.StorageVolumesPost, snapshotName string) (storage, error) {
+	volumeType, err := storagePoolVolumeTypeNameToType(vol.Type)
+	if err != nil {
+		return nil, err
+	}
+
+	fullSnapshotName := fmt.Sprintf("%s/%s", vol.Name, snapshotName)
+
+	sourcePoolID, err := state.Cluster.StoragePoolGetID(vol.Source.Pool)
+	if err != nil {
+		return nil, err
+	}
+
+	volumeID, err := state.Cluster.StoragePoolNodeVolumeGetTypeID(fullSnapshotName, volumeType, sourcePoolID)
+	if err != nil {
+		return nil, err
+	}
+
+	volumeDescription, err := state.Cluster.StorageVolumeDescriptionGet(volumeID)
+	if err != nil {
+		return nil, err
+	}
+
+	dbArgs := &db.StorageVolumeArgs{
+		Name:        fmt.Sprintf("%s/%s", vol.Name, snapshotName),
+		PoolName:    poolName,
+		TypeName:    vol.Type,
+		Snapshot:    true,
+		Config:      vol.Config,
+		Description: volumeDescription,
+	}
+
+	return storagePoolVolumeSnapshotDBCreateInternal(state, dbArgs)
+}
+
 func storagePoolVolumeSnapshotDBCreateInternal(state *state.State, dbArgs *db.StorageVolumeArgs) (storage, error) {
 	// Create database entry for new storage volume.
 	err := storagePoolVolumeDBCreate(state, dbArgs.PoolName, dbArgs.Name, dbArgs.Description, dbArgs.TypeName, true, dbArgs.Config)
diff --git a/lxd/storage_zfs.go b/lxd/storage_zfs.go
index 735c55de7d..ddf6a1cf60 100644
--- a/lxd/storage_zfs.go
+++ b/lxd/storage_zfs.go
@@ -3361,7 +3361,7 @@ func (s *storageZfs) StoragePoolVolumeCopy(source *api.StorageVolumeSource) erro
 	return nil
 }
 
-func (s *zfsMigrationSourceDriver) SendStorageVolume(conn *websocket.Conn, op *operation, bwlimit string, storage storage) error {
+func (s *zfsMigrationSourceDriver) SendStorageVolume(conn *websocket.Conn, op *operation, bwlimit string, storage storage, volumeOnly bool) error {
 	msg := fmt.Sprintf("Function not implemented")
 	logger.Errorf(msg)
 	return fmt.Errorf(msg)

From 6a98a7665fc0a84fee13d0d683f690875316da26 Mon Sep 17 00:00:00 2001
From: Thomas Hipp <thomas.hipp at canonical.com>
Date: Thu, 11 Apr 2019 09:45:43 +0200
Subject: [PATCH 5/6] client: Consider volumeOnly option when migrating

Signed-off-by: Thomas Hipp <thomas.hipp at canonical.com>
---
 client/lxd_storage_volumes.go | 7 ++++---
 1 file changed, 4 insertions(+), 3 deletions(-)

diff --git a/client/lxd_storage_volumes.go b/client/lxd_storage_volumes.go
index 5d47a96f7c..f0f3f61328 100644
--- a/client/lxd_storage_volumes.go
+++ b/client/lxd_storage_volumes.go
@@ -402,9 +402,10 @@ func (r *ProtocolLXD) CopyStoragePoolVolume(pool string, source ContainerServer,
 	}
 
 	sourceReq := api.StorageVolumePost{
-		Migration: true,
-		Name:      volume.Name,
-		Pool:      sourcePool,
+		Migration:  true,
+		Name:       volume.Name,
+		Pool:       sourcePool,
+		VolumeOnly: args.VolumeOnly,
 	}
 
 	// Push mode migration

From e77033904076a1fa891f7619ece5b0dc99289fec Mon Sep 17 00:00:00 2001
From: Thomas Hipp <thomas.hipp at canonical.com>
Date: Thu, 11 Apr 2019 10:20:24 +0200
Subject: [PATCH 6/6] tests: Extend migration tests

Add tests for copying and moving storage volumes with snapshots.

Signed-off-by: Thomas Hipp <thomas.hipp at canonical.com>
---
 test/suites/migration.sh | 24 ++++++++++++++++++++++++
 1 file changed, 24 insertions(+)

diff --git a/test/suites/migration.sh b/test/suites/migration.sh
index b6e9a3c34a..15d48d3ec4 100644
--- a/test/suites/migration.sh
+++ b/test/suites/migration.sh
@@ -279,34 +279,58 @@ migration() {
   remote_pool2="lxdtest-$(basename "${lxd2_dir}")"
 
   lxc_remote storage volume create l1:"$remote_pool1" vol1
+  lxc_remote storage volume create l1:"$remote_pool1" vol2
+  lxc_remote storage volume snapshot l1:"$remote_pool1" vol2
 
   # remote storage volume migration in "pull" mode
   lxc_remote storage volume copy l1:"$remote_pool1/vol1" l2:"$remote_pool2/vol2"
   lxc_remote storage volume move l1:"$remote_pool1/vol1" l2:"$remote_pool2/vol3"
   ! lxc_remote storage volume list l1:"$remote_pool1/vol1" || false
+  lxc_remote storage volume copy l1:"$remote_pool1/vol2" l2:"$remote_pool2/vol4" --volume-only
+  lxc_remote storage volume copy l1:"$remote_pool1/vol2" l2:"$remote_pool2/vol5"
+  lxc_remote storage volume move l1:"$remote_pool1/vol2" l2:"$remote_pool2/vol6"
 
   lxc_remote storage volume delete l2:"$remote_pool2" vol2
   lxc_remote storage volume delete l2:"$remote_pool2" vol3
+  lxc_remote storage volume delete l2:"$remote_pool2" vol4
+  lxc_remote storage volume delete l2:"$remote_pool2" vol5
+  lxc_remote storage volume delete l2:"$remote_pool2" vol6
 
   # remote storage volume migration in "push" mode
   lxc_remote storage volume create l1:"$remote_pool1" vol1
+  lxc_remote storage volume create l1:"$remote_pool1" vol2
+  lxc_remote storage volume snapshot l1:"$remote_pool1" vol2
 
   lxc_remote storage volume copy l1:"$remote_pool1/vol1" l2:"$remote_pool2/vol2" --mode=push
   lxc_remote storage volume move l1:"$remote_pool1/vol1" l2:"$remote_pool2/vol3" --mode=push
   ! lxc_remote storage volume list l1:"$remote_pool1/vol1" || false
+  lxc_remote storage volume copy l1:"$remote_pool1/vol2" l2:"$remote_pool2/vol4" --volume-only --mode=push
+  lxc_remote storage volume copy l1:"$remote_pool1/vol2" l2:"$remote_pool2/vol5" --mode=push
+  lxc_remote storage volume move l1:"$remote_pool1/vol2" l2:"$remote_pool2/vol6" --mode=push
 
   lxc_remote storage volume delete l2:"$remote_pool2" vol2
   lxc_remote storage volume delete l2:"$remote_pool2" vol3
+  lxc_remote storage volume delete l2:"$remote_pool2" vol4
+  lxc_remote storage volume delete l2:"$remote_pool2" vol5
+  lxc_remote storage volume delete l2:"$remote_pool2" vol6
 
   # remote storage volume migration in "relay" mode
   lxc_remote storage volume create l1:"$remote_pool1" vol1
+  lxc_remote storage volume create l1:"$remote_pool1" vol2
+  lxc_remote storage volume snapshot l1:"$remote_pool1" vol2
 
   lxc_remote storage volume copy l1:"$remote_pool1/vol1" l2:"$remote_pool2/vol2" --mode=relay
   lxc_remote storage volume move l1:"$remote_pool1/vol1" l2:"$remote_pool2/vol3" --mode=relay
   ! lxc_remote storage volume list l1:"$remote_pool1/vol1" || false
+  lxc_remote storage volume copy l1:"$remote_pool1/vol2" l2:"$remote_pool2/vol4" --volume-only --mode=relay
+  lxc_remote storage volume copy l1:"$remote_pool1/vol2" l2:"$remote_pool2/vol5" --mode=relay
+  lxc_remote storage volume move l1:"$remote_pool1/vol2" l2:"$remote_pool2/vol6" --mode=relay
 
   lxc_remote storage volume delete l2:"$remote_pool2" vol2
   lxc_remote storage volume delete l2:"$remote_pool2" vol3
+  lxc_remote storage volume delete l2:"$remote_pool2" vol4
+  lxc_remote storage volume delete l2:"$remote_pool2" vol5
+  lxc_remote storage volume delete l2:"$remote_pool2" vol6
 
   # Test some migration between projects
   lxc_remote project create l1:proj -c features.images=false -c features.profiles=false


More information about the lxc-devel mailing list