[lxc-devel] [lxd/master] migration: fix stateful restore

brauner on Github lxc-bot at linuxcontainers.org
Wed Mar 22 12:18:47 UTC 2017


A non-text attachment was scrubbed...
Name: not available
Type: text/x-mailbox
Size: 708 bytes
Desc: not available
URL: <http://lists.linuxcontainers.org/pipermail/lxc-devel/attachments/20170322/36da4d3b/attachment.bin>
-------------- next part --------------
From 3815b28ab13f271368bad6e7f560fbbb2465f89a Mon Sep 17 00:00:00 2001
From: Christian Brauner <christian.brauner at ubuntu.com>
Date: Wed, 22 Mar 2017 13:16:01 +0100
Subject: [PATCH] migration: fix stateful restore

With the new storage api work done {m,um}ounting containers is done on demand
so we need to ensure that storage is available at the right time. In the
restore case, when the container is running it is stopped which will also
unmounted the storage volume for e.g. the zfs backend. So let's make sure that
it is mounted again right after.

Signed-off-by: Christian Brauner <christian.brauner at ubuntu.com>
---
 lxd/container_lxc.go | 27 +++++++++++++++++++++++++--
 lxd/container_put.go |  3 ++-
 lxd/storage_zfs.go   | 30 ++++++++++++++++++++++++------
 3 files changed, 51 insertions(+), 9 deletions(-)

diff --git a/lxd/container_lxc.go b/lxd/container_lxc.go
index df1c89c..5267c1a 100644
--- a/lxd/container_lxc.go
+++ b/lxd/container_lxc.go
@@ -2561,6 +2561,14 @@ func (c *containerLXC) Restore(sourceContainer container) error {
 		return err
 	}
 
+	ourStart, err := c.StorageStart()
+	if err != nil {
+		return err
+	}
+	if ourStart {
+		defer c.StorageStop()
+	}
+
 	// Check if we can restore the container
 	err = c.storage.ContainerCanRestore(c, sourceContainer)
 	if err != nil {
@@ -2581,9 +2589,21 @@ func (c *containerLXC) Restore(sourceContainer container) error {
 	wasRunning := false
 	if c.IsRunning() {
 		wasRunning = true
-		if err := c.Stop(false); err != nil {
+
+		// This will unmount the container storage.
+		err := c.Stop(false)
+		if err != nil {
 			return err
 		}
+
+		// Ensure that storage is mounted for state path checks.
+		ourStart, err := c.StorageStart()
+		if err != nil {
+			return err
+		}
+		if ourStart {
+			defer c.StorageStop()
+		}
 	}
 
 	ctxMap = log.Ctx{"name": c.name,
@@ -2627,7 +2647,9 @@ func (c *containerLXC) Restore(sourceContainer container) error {
 	// If the container wasn't running but was stateful, should we restore
 	// it as running?
 	if shared.PathExists(c.StatePath()) {
-		if err := c.Migrate(lxc.MIGRATE_RESTORE, c.StatePath(), "snapshot", false, false); err != nil {
+		shared.LogDebug("Performing stateful restore", ctxMap)
+		err := c.Migrate(lxc.MIGRATE_RESTORE, c.StatePath(), "snapshot", false, false)
+		if err != nil {
 			return err
 		}
 
@@ -2643,6 +2665,7 @@ func (c *containerLXC) Restore(sourceContainer container) error {
 			return err
 		}
 
+		shared.LogDebug("Performed stateful restore", ctxMap)
 		shared.LogInfo("Restored container", ctxMap)
 		return nil
 	}
diff --git a/lxd/container_put.go b/lxd/container_put.go
index 135a41b..ec9c5c5 100644
--- a/lxd/container_put.go
+++ b/lxd/container_put.go
@@ -114,7 +114,8 @@ func containerSnapRestore(d *Daemon, name string, snap string) error {
 		}
 	}
 
-	if err := c.Restore(source); err != nil {
+	err = c.Restore(source)
+	if err != nil {
 		return err
 	}
 
diff --git a/lxd/storage_zfs.go b/lxd/storage_zfs.go
index d5b8df6..25bffad 100644
--- a/lxd/storage_zfs.go
+++ b/lxd/storage_zfs.go
@@ -908,17 +908,35 @@ func (s *storageZfs) ContainerRename(container container, newName string) error
 	return nil
 }
 
-func (s *storageZfs) ContainerRestore(container container, sourceContainer container) error {
-	shared.LogDebugf("Restoring ZFS storage volume for container \"%s\" from %s -> %s.", s.volume.Name, sourceContainer.Name(), container.Name())
+func (s *storageZfs) ContainerRestore(target container, source container) error {
+	shared.LogDebugf("Restoring ZFS storage volume for container \"%s\" from %s -> %s.", s.volume.Name, source.Name(), target.Name())
+
+	// Start storage for source container
+	ourSourceStart, err := source.StorageStart()
+	if err != nil {
+		return err
+	}
+	if ourSourceStart {
+		defer source.StorageStop()
+	}
+
+	// Start storage for target container
+	ourTargetStart, err := target.StorageStart()
+	if err != nil {
+		return err
+	}
+	if ourTargetStart {
+		defer target.StorageStop()
+	}
 
 	// Remove any needed snapshot
-	snaps, err := container.Snapshots()
+	snaps, err := target.Snapshots()
 	if err != nil {
 		return err
 	}
 
 	for i := len(snaps) - 1; i != 0; i-- {
-		if snaps[i].Name() == sourceContainer.Name() {
+		if snaps[i].Name() == source.Name() {
 			break
 		}
 
@@ -929,7 +947,7 @@ func (s *storageZfs) ContainerRestore(container container, sourceContainer conta
 	}
 
 	// Restore the snapshot
-	fields := strings.SplitN(sourceContainer.Name(), shared.SnapshotDelimiter, 2)
+	fields := strings.SplitN(source.Name(), shared.SnapshotDelimiter, 2)
 	cName := fields[0]
 	snapName := fmt.Sprintf("snapshot-%s", fields[1])
 
@@ -938,7 +956,7 @@ func (s *storageZfs) ContainerRestore(container container, sourceContainer conta
 		return err
 	}
 
-	shared.LogDebugf("Restored ZFS storage volume for container \"%s\" from %s -> %s.", s.volume.Name, sourceContainer.Name(), container.Name())
+	shared.LogDebugf("Restored ZFS storage volume for container \"%s\" from %s -> %s.", s.volume.Name, source.Name(), target.Name())
 	return nil
 }
 


More information about the lxc-devel mailing list