[lxc-devel] [lxd/master] Fix cross fs migration

tych0 on Github lxc-bot at linuxcontainers.org
Thu Sep 15 21:51:48 UTC 2016


A non-text attachment was scrubbed...
Name: not available
Type: text/x-mailbox
Size: 301 bytes
Desc: not available
URL: <http://lists.linuxcontainers.org/pipermail/lxc-devel/attachments/20160915/aecd463f/attachment.bin>
-------------- next part --------------
From f36ac9dfad74f98b94f8588f1911eaa0cf8afecd Mon Sep 17 00:00:00 2001
From: Tycho Andersen <tycho.andersen at canonical.com>
Date: Thu, 15 Sep 2016 13:13:30 +0000
Subject: [PATCH 1/2] actually support copying across different CoW based
 backend types

Closes #2359

Signed-off-by: Tycho Andersen <tycho.andersen at canonical.com>
---
 lxd/migrate.go       |  35 +---------------
 lxd/storage.go       | 112 +++++++++++++++++++++++++++++++++++++++++----------
 lxd/storage_btrfs.go |  27 +++++++++++--
 lxd/storage_dir.go   |   4 +-
 lxd/storage_lvm.go   |   4 +-
 lxd/storage_zfs.go   |  66 +++++++++++++++++++++++++-----
 6 files changed, 176 insertions(+), 72 deletions(-)

diff --git a/lxd/migrate.go b/lxd/migrate.go
index 5377a61..a5ad1ad 100644
--- a/lxd/migrate.go
+++ b/lxd/migrate.go
@@ -582,32 +582,6 @@ func (c *migrationSink) do() error {
 		imagesDir := ""
 		srcIdmap := new(shared.IdmapSet)
 
-		snapshots := []container{}
-		for _, snap := range header.Snapshots {
-			// TODO: we need to propagate snapshot configurations
-			// as well. Right now the container configuration is
-			// done through the initial migration post. Should we
-			// post the snapshots and their configs as well, or do
-			// it some other way?
-			name := c.container.Name() + shared.SnapshotDelimiter + snap
-			args := containerArgs{
-				Ctype:        cTypeSnapshot,
-				Config:       c.container.LocalConfig(),
-				Profiles:     c.container.Profiles(),
-				Ephemeral:    c.container.IsEphemeral(),
-				Architecture: c.container.Architecture(),
-				Devices:      c.container.LocalDevices(),
-				Name:         name,
-			}
-
-			ct, err := containerCreateEmptySnapshot(c.container.Daemon(), args)
-			if err != nil {
-				restore <- err
-				return
-			}
-			snapshots = append(snapshots, ct)
-		}
-
 		for _, idmap := range header.Idmap {
 			e := shared.IdmapEntry{
 				Isuid:    *idmap.Isuid,
@@ -626,7 +600,7 @@ func (c *migrationSink) do() error {
 		 */
 		fsTransfer := make(chan error)
 		go func() {
-			if err := mySink(c.live, c.container, snapshots, c.fsConn); err != nil {
+			if err := mySink(c.live, c.container, header.Snapshots, c.fsConn, srcIdmap); err != nil {
 				fsTransfer <- err
 				return
 			}
@@ -670,13 +644,6 @@ func (c *migrationSink) do() error {
 
 		}
 
-		for _, snap := range snapshots {
-			if err := ShiftIfNecessary(snap, srcIdmap); err != nil {
-				restore <- err
-				return
-			}
-		}
-
 		restore <- nil
 	}(c)
 
diff --git a/lxd/storage.go b/lxd/storage.go
index 7d92d16..4af2b28 100644
--- a/lxd/storage.go
+++ b/lxd/storage.go
@@ -192,7 +192,7 @@ type storage interface {
 	// already present on the target instance as an exercise for the
 	// enterprising developer.
 	MigrationSource(container container) (MigrationStorageSourceDriver, error)
-	MigrationSink(live bool, container container, objects []container, conn *websocket.Conn) error
+	MigrationSink(live bool, container container, objects []string, conn *websocket.Conn, srcIdmap *shared.IdmapSet) error
 }
 
 func newStorage(d *Daemon, sType storageType) (storage, error) {
@@ -556,19 +556,15 @@ func (lw *storageLogWrapper) MigrationSource(container container) (MigrationStor
 	return lw.w.MigrationSource(container)
 }
 
-func (lw *storageLogWrapper) MigrationSink(live bool, container container, objects []container, conn *websocket.Conn) error {
-	objNames := []string{}
-	for _, obj := range objects {
-		objNames = append(objNames, obj.Name())
-	}
-
+func (lw *storageLogWrapper) MigrationSink(live bool, container container, objects []string, conn *websocket.Conn, srcIdmap *shared.IdmapSet) error {
 	lw.log.Debug("MigrationSink", log.Ctx{
 		"live":      live,
 		"container": container.Name(),
-		"objects":   objNames,
+		"objects":   objects,
+		"srcIdmap":  *srcIdmap,
 	})
 
-	return lw.w.MigrationSink(live, container, objects, conn)
+	return lw.w.MigrationSink(live, container, objects, conn, srcIdmap)
 }
 
 func ShiftIfNecessary(container container, srcIdmap *shared.IdmapSet) error {
@@ -608,9 +604,17 @@ func (s rsyncStorageSourceDriver) Snapshots() []container {
 }
 
 func (s rsyncStorageSourceDriver) SendWhileRunning(conn *websocket.Conn) error {
-	toSend := append([]container{s.container}, s.snapshots...)
+	toSend := []container{}
+	toSend = append(toSend, s.snapshots...)
+	toSend = append(toSend, s.container)
 
 	for _, send := range toSend {
+		if send.IsSnapshot() {
+			if err := send.StorageStart(); err != nil {
+				return err
+			}
+			defer send.StorageStop()
+		}
 		path := send.Path()
 		if err := RsyncSend(shared.AddSlash(path), conn); err != nil {
 			return err
@@ -638,21 +642,83 @@ func rsyncMigrationSource(container container) (MigrationStorageSourceDriver, er
 	return rsyncStorageSourceDriver{container, snapshots}, nil
 }
 
-func rsyncMigrationSink(live bool, container container, snapshots []container, conn *websocket.Conn) error {
-	/* the first object is the actual container */
-	if err := RsyncRecv(shared.AddSlash(container.Path()), conn); err != nil {
-		return err
-	}
+func rsyncMigrationSink(live bool, container container, snapshots []string, conn *websocket.Conn, srcIdmap *shared.IdmapSet) error {
+	isDirBackend := container.Storage().GetStorageType() == storageTypeDir
 
-	if len(snapshots) > 0 {
-		err := os.MkdirAll(shared.VarPath(fmt.Sprintf("snapshots/%s", container.Name())), 0700)
-		if err != nil {
+	if isDirBackend {
+		if len(snapshots) > 0 {
+			err := os.MkdirAll(shared.VarPath(fmt.Sprintf("snapshots/%s", container.Name())), 0700)
+			if err != nil {
+				return err
+			}
+		}
+		for _, snap := range snapshots {
+			// TODO: we need to propagate snapshot configurations
+			// as well. Right now the container configuration is
+			// done through the initial migration post. Should we
+			// post the snapshots and their configs as well, or do
+			// it some other way?
+			name := container.Name() + shared.SnapshotDelimiter + snap
+			args := containerArgs{
+				Ctype:        cTypeSnapshot,
+				Config:       container.LocalConfig(),
+				Profiles:     container.Profiles(),
+				Ephemeral:    container.IsEphemeral(),
+				Architecture: container.Architecture(),
+				Devices:      container.LocalDevices(),
+				Name:         name,
+			}
+
+			s, err := containerCreateEmptySnapshot(container.Daemon(), args)
+			if err != nil {
+				return err
+			}
+
+			if err := RsyncRecv(shared.AddSlash(s.Path()), conn); err != nil {
+				return err
+			}
+
+			if err := ShiftIfNecessary(container, srcIdmap); err != nil {
+				return err
+			}
+		}
+
+		if err := RsyncRecv(shared.AddSlash(container.Path()), conn); err != nil {
 			return err
 		}
-	}
+	} else {
+		for _, snap := range snapshots {
+			if err := RsyncRecv(shared.AddSlash(container.Path()), conn); err != nil {
+				return err
+			}
 
-	for _, snap := range snapshots {
-		if err := RsyncRecv(shared.AddSlash(snap.Path()), conn); err != nil {
+			if err := ShiftIfNecessary(container, srcIdmap); err != nil {
+				return err
+			}
+
+			// TODO: we need to propagate snapshot configurations
+			// as well. Right now the container configuration is
+			// done through the initial migration post. Should we
+			// post the snapshots and their configs as well, or do
+			// it some other way?
+			name := container.Name() + shared.SnapshotDelimiter + snap
+			args := containerArgs{
+				Ctype:        cTypeSnapshot,
+				Config:       container.LocalConfig(),
+				Profiles:     container.Profiles(),
+				Ephemeral:    container.IsEphemeral(),
+				Architecture: container.Architecture(),
+				Devices:      container.LocalDevices(),
+				Name:         name,
+			}
+
+			_, err := containerCreateAsSnapshot(container.Daemon(), args, container)
+			if err != nil {
+				return err
+			}
+		}
+
+		if err := RsyncRecv(shared.AddSlash(container.Path()), conn); err != nil {
 			return err
 		}
 	}
@@ -664,6 +730,10 @@ func rsyncMigrationSink(live bool, container container, snapshots []container, c
 		}
 	}
 
+	if err := ShiftIfNecessary(container, srcIdmap); err != nil {
+		return err
+	}
+
 	return nil
 }
 
diff --git a/lxd/storage_btrfs.go b/lxd/storage_btrfs.go
index 79b1aa0..29a92fd 100644
--- a/lxd/storage_btrfs.go
+++ b/lxd/storage_btrfs.go
@@ -985,9 +985,9 @@ func (s *storageBtrfs) MigrationSource(c container) (MigrationStorageSourceDrive
 	return driver, nil
 }
 
-func (s *storageBtrfs) MigrationSink(live bool, container container, snapshots []container, conn *websocket.Conn) error {
+func (s *storageBtrfs) MigrationSink(live bool, container container, snapshots []string, conn *websocket.Conn, srcIdmap *shared.IdmapSet) error {
 	if runningInUserns {
-		return rsyncMigrationSink(live, container, snapshots, conn)
+		return rsyncMigrationSink(live, container, snapshots, conn, srcIdmap)
 	}
 
 	cName := container.Name()
@@ -1057,7 +1057,28 @@ func (s *storageBtrfs) MigrationSink(live bool, container container, snapshots [
 	}
 
 	for _, snap := range snapshots {
-		if err := btrfsRecv(containerPath(cName, true), snap.Path(), true); err != nil {
+		// TODO: we need to propagate snapshot configurations
+		// as well. Right now the container configuration is
+		// done through the initial migration post. Should we
+		// post the snapshots and their configs as well, or do
+		// it some other way?
+		name := container.Name() + shared.SnapshotDelimiter + snap
+		args := containerArgs{
+			Ctype:        cTypeSnapshot,
+			Config:       container.LocalConfig(),
+			Profiles:     container.Profiles(),
+			Ephemeral:    container.IsEphemeral(),
+			Architecture: container.Architecture(),
+			Devices:      container.LocalDevices(),
+			Name:         name,
+		}
+
+		s, err := containerCreateEmptySnapshot(container.Daemon(), args)
+		if err != nil {
+			return err
+		}
+
+		if err := btrfsRecv(containerPath(cName, true), s.Path(), true); err != nil {
 			return err
 		}
 	}
diff --git a/lxd/storage_dir.go b/lxd/storage_dir.go
index 7cfe0a8..8c3d33d 100644
--- a/lxd/storage_dir.go
+++ b/lxd/storage_dir.go
@@ -282,6 +282,6 @@ func (s *storageDir) MigrationSource(container container) (MigrationStorageSourc
 	return rsyncMigrationSource(container)
 }
 
-func (s *storageDir) MigrationSink(live bool, container container, snapshots []container, conn *websocket.Conn) error {
-	return rsyncMigrationSink(live, container, snapshots, conn)
+func (s *storageDir) MigrationSink(live bool, container container, snapshots []string, conn *websocket.Conn, srcIdmap *shared.IdmapSet) error {
+	return rsyncMigrationSink(live, container, snapshots, conn, srcIdmap)
 }
diff --git a/lxd/storage_lvm.go b/lxd/storage_lvm.go
index 2799032..de49050 100644
--- a/lxd/storage_lvm.go
+++ b/lxd/storage_lvm.go
@@ -972,6 +972,6 @@ func (s *storageLvm) MigrationSource(container container) (MigrationStorageSourc
 	return rsyncMigrationSource(container)
 }
 
-func (s *storageLvm) MigrationSink(live bool, container container, snapshots []container, conn *websocket.Conn) error {
-	return rsyncMigrationSink(live, container, snapshots, conn)
+func (s *storageLvm) MigrationSink(live bool, container container, snapshots []string, conn *websocket.Conn, srcIdmap *shared.IdmapSet) error {
+	return rsyncMigrationSink(live, container, snapshots, conn, srcIdmap)
 }
diff --git a/lxd/storage_zfs.go b/lxd/storage_zfs.go
index 27ac0e1..c6fd3e6 100644
--- a/lxd/storage_zfs.go
+++ b/lxd/storage_zfs.go
@@ -79,18 +79,44 @@ func (s *storageZfs) Init(config map[string]interface{}) (storage, error) {
 
 // Things we don't need to care about
 func (s *storageZfs) ContainerStart(container container) error {
-	fs := fmt.Sprintf("containers/%s", container.Name())
 
-	// Just in case the container filesystem got unmounted
-	if !shared.IsMountPoint(shared.VarPath(fs)) {
-		s.zfsMount(fs)
+	if !container.IsSnapshot() {
+		fs := fmt.Sprintf("containers/%s", container.Name())
+
+		// Just in case the container filesystem got unmounted
+		if !shared.IsMountPoint(shared.VarPath(fs)) {
+			s.zfsMount(fs)
+		}
+	} else {
+		/* the zfs CLI tool doesn't support mounting snapshots, but we
+		 * can mount them with the syscall directly...
+		 */
+		fields := strings.SplitN(container.Name(), shared.SnapshotDelimiter, 2)
+		if len(fields) != 2 {
+			return fmt.Errorf("invalid snapshot name %s", container.Name())
+		}
+
+		src := fmt.Sprintf("containers/%s@%s", fields[0], fields[1])
+		dest := shared.VarPath("snapshots", fields[0], fields[1])
+
+		return tryMount(src, dest, "zfs", 0, "")
 	}
 
 	return nil
 }
 
 func (s *storageZfs) ContainerStop(container container) error {
-	return nil
+	if !container.IsSnapshot() {
+		return nil
+	}
+
+	fields := strings.SplitN(container.Name(), shared.SnapshotDelimiter, 2)
+	if len(fields) != 2 {
+		return fmt.Errorf("invalid snapshot name %s", container.Name())
+	}
+
+	p := shared.VarPath("snapshots", fields[0], fields[1])
+	return tryUnmount(p, 0)
 }
 
 // Things we do have to care about
@@ -1373,7 +1399,7 @@ func (s *storageZfs) MigrationSource(ct container) (MigrationStorageSourceDriver
 	return &driver, nil
 }
 
-func (s *storageZfs) MigrationSink(live bool, container container, snapshots []container, conn *websocket.Conn) error {
+func (s *storageZfs) MigrationSink(live bool, container container, snapshots []string, conn *websocket.Conn, srcIdmap *shared.IdmapSet) error {
 	zfsRecv := func(zfsName string) error {
 		zfsFsName := fmt.Sprintf("%s/%s", s.zfsPool, zfsName)
 		args := []string{"receive", "-F", "-u", zfsFsName}
@@ -1420,18 +1446,38 @@ func (s *storageZfs) MigrationSink(live bool, container container, snapshots []c
 	}
 
 	for _, snap := range snapshots {
-		fields := strings.SplitN(snap.Name(), shared.SnapshotDelimiter, 2)
-		name := fmt.Sprintf("containers/%s at snapshot-%s", fields[0], fields[1])
+		// TODO: we need to propagate snapshot configurations
+		// as well. Right now the container configuration is
+		// done through the initial migration post. Should we
+		// post the snapshots and their configs as well, or do
+		// it some other way?
+		snapName := container.Name() + shared.SnapshotDelimiter + snap
+		args := containerArgs{
+			Ctype:        cTypeSnapshot,
+			Config:       container.LocalConfig(),
+			Profiles:     container.Profiles(),
+			Ephemeral:    container.IsEphemeral(),
+			Architecture: container.Architecture(),
+			Devices:      container.LocalDevices(),
+			Name:         snapName,
+		}
+
+		_, err := containerCreateEmptySnapshot(container.Daemon(), args)
+		if err != nil {
+			return err
+		}
+
+		name := fmt.Sprintf("containers/%s at snapshot-%s", container.Name(), snap)
 		if err := zfsRecv(name); err != nil {
 			return err
 		}
 
-		err := os.MkdirAll(shared.VarPath(fmt.Sprintf("snapshots/%s", fields[0])), 0700)
+		err = os.MkdirAll(shared.VarPath(fmt.Sprintf("snapshots/%s", container.Name())), 0700)
 		if err != nil {
 			return err
 		}
 
-		err = os.Symlink("on-zfs", shared.VarPath(fmt.Sprintf("snapshots/%s/%s.zfs", fields[0], fields[1])))
+		err = os.Symlink("on-zfs", shared.VarPath(fmt.Sprintf("snapshots/%s/%s.zfs", container.Name(), snap)))
 		if err != nil {
 			return err
 		}

From 7e9cabb0dcf8d37790db723eb18be8c30f74451e Mon Sep 17 00:00:00 2001
From: Tycho Andersen <tycho.andersen at canonical.com>
Date: Thu, 15 Sep 2016 20:51:31 +0000
Subject: [PATCH 2/2] container copy: preserve snapshot configuration

Previously, we weren't preserving snapshot configuration (as noted in the
TODO). Let's do that.

Signed-off-by: Tycho Andersen <tycho.andersen at canonical.com>
---
 lxd/migrate.go           |  51 +++++++++++++++++++++-
 lxd/migrate.pb.go        | 111 ++++++++++++++++++++++++++++++++++++++++++++++-
 lxd/migrate.proto        |  54 +++++++++++++++--------
 lxd/storage.go           |  75 +++++++++++++++++---------------
 lxd/storage_btrfs.go     |  19 +-------
 lxd/storage_dir.go       |   2 +-
 lxd/storage_lvm.go       |   2 +-
 lxd/storage_zfs.go       |  19 +-------
 test/suites/migration.sh |   4 ++
 9 files changed, 243 insertions(+), 94 deletions(-)

diff --git a/lxd/migrate.go b/lxd/migrate.go
index a5ad1ad..416438a 100644
--- a/lxd/migrate.go
+++ b/lxd/migrate.go
@@ -250,6 +250,37 @@ fi
 	return err
 }
 
+func snapshotToProtobuf(c container) *Snapshot {
+	config := []*Config{}
+	for k, v := range c.LocalConfig() {
+		kCopy := string(k)
+		vCopy := string(v)
+		config = append(config, &Config{Key: &kCopy, Value: &vCopy})
+	}
+
+	devices := []*Device{}
+	for name, d := range c.LocalDevices() {
+		props := []*Config{}
+		for k, v := range d {
+			kCopy := string(k)
+			vCopy := string(v)
+			props = append(props, &Config{Key: &kCopy, Value: &vCopy})
+		}
+
+		devices = append(devices, &Device{Name: &name, Config: props})
+	}
+
+	parts := strings.SplitN(c.Name(), shared.SnapshotDelimiter, 2)
+	isEphemeral := c.IsEphemeral()
+	return &Snapshot{
+		Name:      &parts[len(parts)-1],
+		Config:    config,
+		Profiles:  c.Profiles(),
+		Ephemeral: &isEphemeral,
+		Devices:   devices,
+	}
+}
+
 func (s *migrationSourceWs) Do(migrateOp *operation) error {
 	<-s.allConnected
 
@@ -286,11 +317,11 @@ func (s *migrationSourceWs) Do(migrateOp *operation) error {
 	/* the protocol says we have to send a header no matter what, so let's
 	 * do that, but then immediately send an error.
 	 */
-	snapshots := []string{}
+	snapshots := []*Snapshot{}
 	if fsErr == nil {
 		fullSnaps := driver.Snapshots()
 		for _, snap := range fullSnaps {
-			snapshots = append(snapshots, shared.ExtractSnapshotName(snap.Name()))
+			snapshots = append(snapshots, snapshotToProtobuf(snap))
 		}
 	}
 
@@ -600,6 +631,22 @@ func (c *migrationSink) do() error {
 		 */
 		fsTransfer := make(chan error)
 		go func() {
+			snapshots := []*Snapshot{}
+
+			/* Legacy: we only sent the snapshot names, so we just
+			 * copy the container's config over, same as we used to
+			 * do.
+			 */
+			if len(header.SnapshotNames) > 0 {
+				for _, name := range header.SnapshotNames {
+					base := snapshotToProtobuf(c.container)
+					base.Name = &name
+					snapshots = append(snapshots, base)
+				}
+			} else {
+				snapshots = header.Snapshots
+			}
+
 			if err := mySink(c.live, c.container, header.Snapshots, c.fsConn, srcIdmap); err != nil {
 				fsTransfer <- err
 				return
diff --git a/lxd/migrate.pb.go b/lxd/migrate.pb.go
index 751c0db..ae11974 100644
--- a/lxd/migrate.pb.go
+++ b/lxd/migrate.pb.go
@@ -10,6 +10,9 @@ It is generated from these files:
 
 It has these top-level messages:
 	IDMapType
+	Config
+	Device
+	Snapshot
 	MigrationHeader
 	MigrationControl
 */
@@ -139,11 +142,108 @@ func (m *IDMapType) GetMaprange() int32 {
 	return 0
 }
 
+type Config struct {
+	Key              *string `protobuf:"bytes,1,req,name=key" json:"key,omitempty"`
+	Value            *string `protobuf:"bytes,2,req,name=value" json:"value,omitempty"`
+	XXX_unrecognized []byte  `json:"-"`
+}
+
+func (m *Config) Reset()         { *m = Config{} }
+func (m *Config) String() string { return proto.CompactTextString(m) }
+func (*Config) ProtoMessage()    {}
+
+func (m *Config) GetKey() string {
+	if m != nil && m.Key != nil {
+		return *m.Key
+	}
+	return ""
+}
+
+func (m *Config) GetValue() string {
+	if m != nil && m.Value != nil {
+		return *m.Value
+	}
+	return ""
+}
+
+type Device struct {
+	Name             *string   `protobuf:"bytes,1,req,name=name" json:"name,omitempty"`
+	Config           []*Config `protobuf:"bytes,2,rep,name=config" json:"config,omitempty"`
+	XXX_unrecognized []byte    `json:"-"`
+}
+
+func (m *Device) Reset()         { *m = Device{} }
+func (m *Device) String() string { return proto.CompactTextString(m) }
+func (*Device) ProtoMessage()    {}
+
+func (m *Device) GetName() string {
+	if m != nil && m.Name != nil {
+		return *m.Name
+	}
+	return ""
+}
+
+func (m *Device) GetConfig() []*Config {
+	if m != nil {
+		return m.Config
+	}
+	return nil
+}
+
+type Snapshot struct {
+	Name             *string   `protobuf:"bytes,1,req,name=name" json:"name,omitempty"`
+	Config           []*Config `protobuf:"bytes,2,rep,name=config" json:"config,omitempty"`
+	Profiles         []string  `protobuf:"bytes,3,rep,name=profiles" json:"profiles,omitempty"`
+	Ephemeral        *bool     `protobuf:"varint,4,req,name=ephemeral" json:"ephemeral,omitempty"`
+	Devices          []*Device `protobuf:"bytes,5,rep,name=devices" json:"devices,omitempty"`
+	XXX_unrecognized []byte    `json:"-"`
+}
+
+func (m *Snapshot) Reset()         { *m = Snapshot{} }
+func (m *Snapshot) String() string { return proto.CompactTextString(m) }
+func (*Snapshot) ProtoMessage()    {}
+
+func (m *Snapshot) GetName() string {
+	if m != nil && m.Name != nil {
+		return *m.Name
+	}
+	return ""
+}
+
+func (m *Snapshot) GetConfig() []*Config {
+	if m != nil {
+		return m.Config
+	}
+	return nil
+}
+
+func (m *Snapshot) GetProfiles() []string {
+	if m != nil {
+		return m.Profiles
+	}
+	return nil
+}
+
+func (m *Snapshot) GetEphemeral() bool {
+	if m != nil && m.Ephemeral != nil {
+		return *m.Ephemeral
+	}
+	return false
+}
+
+func (m *Snapshot) GetDevices() []*Device {
+	if m != nil {
+		return m.Devices
+	}
+	return nil
+}
+
 type MigrationHeader struct {
 	Fs               *MigrationFSType `protobuf:"varint,1,req,name=fs,enum=main.MigrationFSType" json:"fs,omitempty"`
 	Criu             *CRIUType        `protobuf:"varint,2,opt,name=criu,enum=main.CRIUType" json:"criu,omitempty"`
 	Idmap            []*IDMapType     `protobuf:"bytes,3,rep,name=idmap" json:"idmap,omitempty"`
-	Snapshots        []string         `protobuf:"bytes,4,rep,name=snapshots" json:"snapshots,omitempty"`
+	SnapshotNames    []string         `protobuf:"bytes,4,rep,name=snapshotNames" json:"snapshotNames,omitempty"`
+	Snapshots        []*Snapshot      `protobuf:"bytes,5,rep,name=snapshots" json:"snapshots,omitempty"`
 	XXX_unrecognized []byte           `json:"-"`
 }
 
@@ -172,7 +272,14 @@ func (m *MigrationHeader) GetIdmap() []*IDMapType {
 	return nil
 }
 
-func (m *MigrationHeader) GetSnapshots() []string {
+func (m *MigrationHeader) GetSnapshotNames() []string {
+	if m != nil {
+		return m.SnapshotNames
+	}
+	return nil
+}
+
+func (m *MigrationHeader) GetSnapshots() []*Snapshot {
 	if m != nil {
 		return m.Snapshots
 	}
diff --git a/lxd/migrate.proto b/lxd/migrate.proto
index bdf5608..6140fb4 100644
--- a/lxd/migrate.proto
+++ b/lxd/migrate.proto
@@ -1,35 +1,53 @@
 package main;
 
 enum MigrationFSType {
-  RSYNC     = 0;
-  BTRFS     = 1;
-  ZFS       = 2;
+	RSYNC		= 0;
+	BTRFS		= 1;
+	ZFS		= 2;
 }
 
 enum CRIUType {
-  CRIU_RSYNC    = 0;
-  PHAUL         = 1;
+	CRIU_RSYNC	= 0;
+	PHAUL		= 1;
 }
 
 message IDMapType {
-  required bool   isuid       = 1;
-  required bool   isgid       = 2;
-  required int32  hostid      = 3;
-  required int32  nsid        = 4;
-  required int32  maprange    = 5;
+	required bool	isuid			= 1;
+	required bool	isgid			= 2;
+	required int32	hostid			= 3;
+	required int32	nsid			= 4;
+	required int32	maprange		= 5;
 }
 
-message MigrationHeader {
-  required MigrationFSType  fs        = 1;
-  optional CRIUType         criu      = 2;
-  repeated IDMapType        idmap     = 3;
+message Config {
+	required string		key	= 1;
+	required string		value	= 2;
+}
+
+message Device {
+	required string		name	= 1;
+	repeated Config		config	= 2;
+}
 
-  repeated string           snapshots = 4;
+message Snapshot {
+	required string			name		= 1;
+	repeated Config 		config		= 2;
+	repeated string			profiles	= 3;
+	required bool			ephemeral	= 4;
+	repeated Device			devices		= 5;
+}
+
+message MigrationHeader {
+	required MigrationFSType		fs		= 1;
+	optional CRIUType			criu		= 2;
+	repeated IDMapType	 		idmap		= 3;
+	repeated string				snapshotNames	= 4;
+	repeated Snapshot			snapshots	= 5;
 }
 
 message MigrationControl {
-  required bool     success     = 1;
+	required bool		success		= 1;
 
-  /* optional failure message if sending a failure */
-  optional string   message     = 2;
+	/* optional failure message if sending a failure */
+	optional string		message		= 2;
 }
diff --git a/lxd/storage.go b/lxd/storage.go
index 4af2b28..e58ee72 100644
--- a/lxd/storage.go
+++ b/lxd/storage.go
@@ -192,7 +192,7 @@ type storage interface {
 	// already present on the target instance as an exercise for the
 	// enterprising developer.
 	MigrationSource(container container) (MigrationStorageSourceDriver, error)
-	MigrationSink(live bool, container container, objects []string, conn *websocket.Conn, srcIdmap *shared.IdmapSet) error
+	MigrationSink(live bool, container container, objects []*Snapshot, conn *websocket.Conn, srcIdmap *shared.IdmapSet) error
 }
 
 func newStorage(d *Daemon, sType storageType) (storage, error) {
@@ -556,11 +556,16 @@ func (lw *storageLogWrapper) MigrationSource(container container) (MigrationStor
 	return lw.w.MigrationSource(container)
 }
 
-func (lw *storageLogWrapper) MigrationSink(live bool, container container, objects []string, conn *websocket.Conn, srcIdmap *shared.IdmapSet) error {
+func (lw *storageLogWrapper) MigrationSink(live bool, container container, objects []*Snapshot, conn *websocket.Conn, srcIdmap *shared.IdmapSet) error {
+	objNames := []string{}
+	for _, obj := range objects {
+		objNames = append(objNames, obj.GetName())
+	}
+
 	lw.log.Debug("MigrationSink", log.Ctx{
 		"live":      live,
 		"container": container.Name(),
-		"objects":   objects,
+		"objects":   objNames,
 		"srcIdmap":  *srcIdmap,
 	})
 
@@ -642,7 +647,35 @@ func rsyncMigrationSource(container container) (MigrationStorageSourceDriver, er
 	return rsyncStorageSourceDriver{container, snapshots}, nil
 }
 
-func rsyncMigrationSink(live bool, container container, snapshots []string, conn *websocket.Conn, srcIdmap *shared.IdmapSet) error {
+func snapshotProtobufToContainerArgs(containerName string, snap *Snapshot) containerArgs {
+	config := map[string]string{}
+
+	for _, ent := range snap.Config {
+		config[ent.GetKey()] = ent.GetValue()
+	}
+
+	devices := shared.Devices{}
+	for _, ent := range snap.Devices {
+		props := map[string]string{}
+		for _, prop := range ent.Config {
+			props[prop.GetKey()] = prop.GetValue()
+		}
+
+		devices[ent.GetName()] = props
+	}
+
+	name := containerName + shared.SnapshotDelimiter + snap.GetName()
+	return containerArgs{
+		Name:      name,
+		Ctype:     cTypeSnapshot,
+		Config:    config,
+		Profiles:  snap.Profiles,
+		Ephemeral: snap.GetEphemeral(),
+		Devices:   devices,
+	}
+}
+
+func rsyncMigrationSink(live bool, container container, snapshots []*Snapshot, conn *websocket.Conn, srcIdmap *shared.IdmapSet) error {
 	isDirBackend := container.Storage().GetStorageType() == storageTypeDir
 
 	if isDirBackend {
@@ -653,22 +686,7 @@ func rsyncMigrationSink(live bool, container container, snapshots []string, conn
 			}
 		}
 		for _, snap := range snapshots {
-			// TODO: we need to propagate snapshot configurations
-			// as well. Right now the container configuration is
-			// done through the initial migration post. Should we
-			// post the snapshots and their configs as well, or do
-			// it some other way?
-			name := container.Name() + shared.SnapshotDelimiter + snap
-			args := containerArgs{
-				Ctype:        cTypeSnapshot,
-				Config:       container.LocalConfig(),
-				Profiles:     container.Profiles(),
-				Ephemeral:    container.IsEphemeral(),
-				Architecture: container.Architecture(),
-				Devices:      container.LocalDevices(),
-				Name:         name,
-			}
-
+			args := snapshotProtobufToContainerArgs(container.Name(), snap)
 			s, err := containerCreateEmptySnapshot(container.Daemon(), args)
 			if err != nil {
 				return err
@@ -696,22 +714,7 @@ func rsyncMigrationSink(live bool, container container, snapshots []string, conn
 				return err
 			}
 
-			// TODO: we need to propagate snapshot configurations
-			// as well. Right now the container configuration is
-			// done through the initial migration post. Should we
-			// post the snapshots and their configs as well, or do
-			// it some other way?
-			name := container.Name() + shared.SnapshotDelimiter + snap
-			args := containerArgs{
-				Ctype:        cTypeSnapshot,
-				Config:       container.LocalConfig(),
-				Profiles:     container.Profiles(),
-				Ephemeral:    container.IsEphemeral(),
-				Architecture: container.Architecture(),
-				Devices:      container.LocalDevices(),
-				Name:         name,
-			}
-
+			args := snapshotProtobufToContainerArgs(container.Name(), snap)
 			_, err := containerCreateAsSnapshot(container.Daemon(), args, container)
 			if err != nil {
 				return err
diff --git a/lxd/storage_btrfs.go b/lxd/storage_btrfs.go
index 29a92fd..3f627de 100644
--- a/lxd/storage_btrfs.go
+++ b/lxd/storage_btrfs.go
@@ -985,7 +985,7 @@ func (s *storageBtrfs) MigrationSource(c container) (MigrationStorageSourceDrive
 	return driver, nil
 }
 
-func (s *storageBtrfs) MigrationSink(live bool, container container, snapshots []string, conn *websocket.Conn, srcIdmap *shared.IdmapSet) error {
+func (s *storageBtrfs) MigrationSink(live bool, container container, snapshots []*Snapshot, conn *websocket.Conn, srcIdmap *shared.IdmapSet) error {
 	if runningInUserns {
 		return rsyncMigrationSink(live, container, snapshots, conn, srcIdmap)
 	}
@@ -1057,22 +1057,7 @@ func (s *storageBtrfs) MigrationSink(live bool, container container, snapshots [
 	}
 
 	for _, snap := range snapshots {
-		// TODO: we need to propagate snapshot configurations
-		// as well. Right now the container configuration is
-		// done through the initial migration post. Should we
-		// post the snapshots and their configs as well, or do
-		// it some other way?
-		name := container.Name() + shared.SnapshotDelimiter + snap
-		args := containerArgs{
-			Ctype:        cTypeSnapshot,
-			Config:       container.LocalConfig(),
-			Profiles:     container.Profiles(),
-			Ephemeral:    container.IsEphemeral(),
-			Architecture: container.Architecture(),
-			Devices:      container.LocalDevices(),
-			Name:         name,
-		}
-
+		args := snapshotProtobufToContainerArgs(container.Name(), snap)
 		s, err := containerCreateEmptySnapshot(container.Daemon(), args)
 		if err != nil {
 			return err
diff --git a/lxd/storage_dir.go b/lxd/storage_dir.go
index 8c3d33d..94eec94 100644
--- a/lxd/storage_dir.go
+++ b/lxd/storage_dir.go
@@ -282,6 +282,6 @@ func (s *storageDir) MigrationSource(container container) (MigrationStorageSourc
 	return rsyncMigrationSource(container)
 }
 
-func (s *storageDir) MigrationSink(live bool, container container, snapshots []string, conn *websocket.Conn, srcIdmap *shared.IdmapSet) error {
+func (s *storageDir) MigrationSink(live bool, container container, snapshots []*Snapshot, conn *websocket.Conn, srcIdmap *shared.IdmapSet) error {
 	return rsyncMigrationSink(live, container, snapshots, conn, srcIdmap)
 }
diff --git a/lxd/storage_lvm.go b/lxd/storage_lvm.go
index de49050..e348370 100644
--- a/lxd/storage_lvm.go
+++ b/lxd/storage_lvm.go
@@ -972,6 +972,6 @@ func (s *storageLvm) MigrationSource(container container) (MigrationStorageSourc
 	return rsyncMigrationSource(container)
 }
 
-func (s *storageLvm) MigrationSink(live bool, container container, snapshots []string, conn *websocket.Conn, srcIdmap *shared.IdmapSet) error {
+func (s *storageLvm) MigrationSink(live bool, container container, snapshots []*Snapshot, conn *websocket.Conn, srcIdmap *shared.IdmapSet) error {
 	return rsyncMigrationSink(live, container, snapshots, conn, srcIdmap)
 }
diff --git a/lxd/storage_zfs.go b/lxd/storage_zfs.go
index c6fd3e6..4b9fa0f 100644
--- a/lxd/storage_zfs.go
+++ b/lxd/storage_zfs.go
@@ -1399,7 +1399,7 @@ func (s *storageZfs) MigrationSource(ct container) (MigrationStorageSourceDriver
 	return &driver, nil
 }
 
-func (s *storageZfs) MigrationSink(live bool, container container, snapshots []string, conn *websocket.Conn, srcIdmap *shared.IdmapSet) error {
+func (s *storageZfs) MigrationSink(live bool, container container, snapshots []*Snapshot, conn *websocket.Conn, srcIdmap *shared.IdmapSet) error {
 	zfsRecv := func(zfsName string) error {
 		zfsFsName := fmt.Sprintf("%s/%s", s.zfsPool, zfsName)
 		args := []string{"receive", "-F", "-u", zfsFsName}
@@ -1446,22 +1446,7 @@ func (s *storageZfs) MigrationSink(live bool, container container, snapshots []s
 	}
 
 	for _, snap := range snapshots {
-		// TODO: we need to propagate snapshot configurations
-		// as well. Right now the container configuration is
-		// done through the initial migration post. Should we
-		// post the snapshots and their configs as well, or do
-		// it some other way?
-		snapName := container.Name() + shared.SnapshotDelimiter + snap
-		args := containerArgs{
-			Ctype:        cTypeSnapshot,
-			Config:       container.LocalConfig(),
-			Profiles:     container.Profiles(),
-			Ephemeral:    container.IsEphemeral(),
-			Architecture: container.Architecture(),
-			Devices:      container.LocalDevices(),
-			Name:         snapName,
-		}
-
+		args := snapshotProtobufToContainerArgs(container.Name(), snap)
 		_, err := containerCreateEmptySnapshot(container.Daemon(), args)
 		if err != nil {
 			return err
diff --git a/test/suites/migration.sh b/test/suites/migration.sh
index da0c928..0c865f6 100644
--- a/test/suites/migration.sh
+++ b/test/suites/migration.sh
@@ -12,8 +12,12 @@ test_migration() {
 
   lxc_remote init testimage nonlive
   # test moving snapshots
+  lxc_remote config set l1:nonlive user.tester foo
   lxc_remote snapshot l1:nonlive
+  lxc_remote config unset l1:nonlive user.tester
   lxc_remote move l1:nonlive l2:
+  lxc_remote config show l2:nonlive/snap0 | grep user.tester | grep foo
+
   # FIXME: make this backend agnostic
   if [ "${LXD_BACKEND}" != "lvm" ]; then
     [ -d "${LXD2_DIR}/containers/nonlive/rootfs" ]


More information about the lxc-devel mailing list