[lxc-devel] [lxd/master] Fix bad start/stop of storage in snapshot codepaths

stgraber on Github lxc-bot at linuxcontainers.org
Tue Sep 25 21:38:26 UTC 2018


A non-text attachment was scrubbed...
Name: not available
Type: text/x-mailbox
Size: 301 bytes
Desc: not available
URL: <http://lists.linuxcontainers.org/pipermail/lxc-devel/attachments/20180925/241e4bea/attachment.bin>
-------------- next part --------------
From eb5c617dd137524f6db091979519c28637b79620 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?St=C3=A9phane=20Graber?= <stgraber at ubuntu.com>
Date: Tue, 25 Sep 2018 17:22:12 -0400
Subject: [PATCH 1/2] lxd/storage/lvm: Don't un-necessarily start/stop storage
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Signed-off-by: Stéphane Graber <stgraber at ubuntu.com>
---
 lxd/storage_lvm.go | 28 +++++++++++++---------------
 1 file changed, 13 insertions(+), 15 deletions(-)

diff --git a/lxd/storage_lvm.go b/lxd/storage_lvm.go
index 9d2b0d0634..b08ba31cd4 100644
--- a/lxd/storage_lvm.go
+++ b/lxd/storage_lvm.go
@@ -1399,14 +1399,6 @@ func (s *storageLvm) ContainerRename(container container, newContainerName strin
 func (s *storageLvm) ContainerRestore(target container, source container) error {
 	logger.Debugf("Restoring LVM storage volume for container \"%s\" from %s to %s", s.volume.Name, source.Name(), target.Name())
 
-	ourStart, err := source.StorageStart()
-	if err != nil {
-		return err
-	}
-	if ourStart {
-		defer source.StorageStop()
-	}
-
 	_, sourcePool, _ := source.Storage().GetContainerPoolInfo()
 	if s.pool.Name != sourcePool {
 		return fmt.Errorf("containers must be on the same pool to be restored")
@@ -1419,10 +1411,13 @@ func (s *storageLvm) ContainerRestore(target container, source container) error
 	targetLvmName := containerNameToLVName(targetName)
 	targetPath := target.Path()
 	if s.useThinpool {
-		_, err = target.Storage().ContainerUmount(targetName, targetPath)
+		ourUmount, err := target.Storage().ContainerUmount(targetName, targetPath)
 		if err != nil {
 			return err
 		}
+		if ourUmount {
+			defer target.Storage().ContainerMount(target)
+		}
 
 		poolName := s.getOnDiskPoolName()
 
@@ -1439,18 +1434,21 @@ func (s *storageLvm) ContainerRestore(target container, source container) error
 		if err != nil {
 			return fmt.Errorf("Error creating snapshot LV: %v", err)
 		}
-
-		_, err = target.Storage().ContainerMount(target)
+	} else {
+		ourStart, err := source.StorageStart()
 		if err != nil {
 			return err
 		}
-	} else {
-		ourMount, err := target.Storage().ContainerMount(target)
+		if ourStart {
+			defer source.StorageStop()
+		}
+
+		ourStart, err = target.StorageStart()
 		if err != nil {
 			return err
 		}
-		if ourMount {
-			defer target.Storage().ContainerUmount(targetName, targetPath)
+		if ourStart {
+			defer target.StorageStop()
 		}
 
 		poolName := s.getOnDiskPoolName()

From 2ee7778f4a6d19a744dcf03ef67c8c2350c4be74 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?St=C3=A9phane=20Graber?= <stgraber at ubuntu.com>
Date: Tue, 25 Sep 2018 17:24:07 -0400
Subject: [PATCH 2/2] lxd/storage/ceph: Don't un-necessarily mount snapshots
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Signed-off-by: Stéphane Graber <stgraber at ubuntu.com>
---
 lxd/storage_ceph.go | 49 ++++++++++++++++++++++++---------------------
 1 file changed, 26 insertions(+), 23 deletions(-)

diff --git a/lxd/storage_ceph.go b/lxd/storage_ceph.go
index 6e1afd60eb..7170cd6d95 100644
--- a/lxd/storage_ceph.go
+++ b/lxd/storage_ceph.go
@@ -1403,6 +1403,7 @@ func (s *storageCeph) ContainerUmount(name string, path string) (bool, error) {
 		if _, ok := <-waitChannel; ok {
 			logger.Warnf("Received value over semaphore, this should not have happened")
 		}
+
 		// Give the benefit of the doubt and assume that the other
 		// thread actually succeeded in unmounting the storage volume.
 		logger.Debugf("RBD storage volume for container \"%s\" on storage pool \"%s\" appears to be already unmounted", s.volume.Name, s.pool.Name)
@@ -1593,19 +1594,19 @@ func (s *storageCeph) ContainerRestore(target container, source container) error
 
 	logger.Debugf(`Restoring RBD storage volume for container "%s" from %s to %s`, targetName, sourceName, targetName)
 
-	ourStorageStop, err := source.StorageStop()
+	ourStop, err := source.StorageStop()
 	if err != nil {
 		return err
 	}
-	if ourStorageStop {
+	if ourStop {
 		defer source.StorageStart()
 	}
 
-	ourStorageStop, err = target.StorageStop()
+	ourStop, err = target.StorageStop()
 	if err != nil {
 		return err
 	}
-	if ourStorageStop {
+	if ourStop {
 		defer target.StorageStart()
 	}
 
@@ -1861,38 +1862,40 @@ func (s *storageCeph) ContainerSnapshotStart(c container) (bool, error) {
 }
 
 func (s *storageCeph) ContainerSnapshotStop(c container) (bool, error) {
-	containerName := c.Name()
-	logger.Debugf(`Stopping RBD storage volume for snapshot "%s" on storage pool "%s"`, containerName, s.pool.Name)
+	logger.Debugf(`Stopping RBD storage volume for snapshot "%s" on storage pool "%s"`, c.Name(), s.pool.Name)
 
+	containerName := c.Name()
 	containerMntPoint := getSnapshotMountPoint(s.pool.Name, containerName)
-	if shared.IsMountPoint(containerMntPoint) {
-		err := tryUnmount(containerMntPoint, syscall.MNT_DETACH)
-		if err != nil {
-			logger.Errorf("Failed to unmount %s: %s", containerMntPoint,
-				err)
-			return false, err
-		}
-		logger.Debugf("Unmounted %s", containerMntPoint)
+
+	// Check if already unmounted
+	if !shared.IsMountPoint(containerMntPoint) {
+		return false, nil
+	}
+
+	// Unmount
+	err := tryUnmount(containerMntPoint, syscall.MNT_DETACH)
+	if err != nil {
+		logger.Errorf("Failed to unmount %s: %s", containerMntPoint, err)
+		return false, err
 	}
 
+	logger.Debugf("Unmounted %s", containerMntPoint)
+
 	containerOnlyName, snapOnlyName, _ := containerGetParentAndSnapshotName(containerName)
 	cloneName := fmt.Sprintf("%s_%s_start_clone", containerOnlyName, snapOnlyName)
-	// unmap
-	err := cephRBDVolumeUnmap(s.ClusterName, s.OSDPoolName, cloneName,
-		"snapshots", s.UserName, true)
+
+	// Unmap the RBD volume
+	err = cephRBDVolumeUnmap(s.ClusterName, s.OSDPoolName, cloneName, "snapshots", s.UserName, true)
 	if err != nil {
 		logger.Warnf(`Failed to unmap RBD storage volume for container "%s" on storage pool "%s": %s`, containerName, s.pool.Name, err)
 	} else {
 		logger.Debugf(`Unmapped RBD storage volume for container "%s" on storage pool "%s"`, containerName, s.pool.Name)
 	}
 
-	rbdVolumeExists := cephRBDVolumeExists(s.ClusterName, s.OSDPoolName,
-		cloneName, "snapshots", s.UserName)
-
+	rbdVolumeExists := cephRBDVolumeExists(s.ClusterName, s.OSDPoolName, cloneName, "snapshots", s.UserName)
 	if rbdVolumeExists {
-		// delete
-		err = cephRBDVolumeDelete(s.ClusterName, s.OSDPoolName, cloneName,
-			"snapshots", s.UserName)
+		// Delete the temporary RBD volume
+		err = cephRBDVolumeDelete(s.ClusterName, s.OSDPoolName, cloneName, "snapshots", s.UserName)
 		if err != nil {
 			logger.Errorf(`Failed to delete clone of RBD storage volume for container "%s" on storage pool "%s": %s`, containerName, s.pool.Name, err)
 			return false, err


More information about the lxc-devel mailing list