[lxc-devel] [lxd/master] lxd/storage: Switch Delete to new logic
stgraber on Github
lxc-bot at linuxcontainers.org
Wed Nov 6 20:15:27 UTC 2019
A non-text attachment was scrubbed...
Name: not available
Type: text/x-mailbox
Size: 354 bytes
Desc: not available
URL: <http://lists.linuxcontainers.org/pipermail/lxc-devel/attachments/20191106/6649ee1c/attachment.bin>
-------------- next part --------------
From 96bd4f0a41381a66910e34c2ac539196cc1c4192 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?St=C3=A9phane=20Graber?= <stgraber at ubuntu.com>
Date: Wed, 6 Nov 2019 14:41:02 -0500
Subject: [PATCH] lxd/storage: Switch Delete to new logic
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Signed-off-by: Stéphane Graber <stgraber at ubuntu.com>
---
lxd/storage_pools.go | 88 +++++++++++++++++++++++++++++---------------
1 file changed, 59 insertions(+), 29 deletions(-)
diff --git a/lxd/storage_pools.go b/lxd/storage_pools.go
index cd9e33b2e7..ffc03e92c2 100644
--- a/lxd/storage_pools.go
+++ b/lxd/storage_pools.go
@@ -14,7 +14,8 @@ import (
"github.com/lxc/lxd/lxd/cluster"
"github.com/lxc/lxd/lxd/db"
"github.com/lxc/lxd/lxd/response"
- driver "github.com/lxc/lxd/lxd/storage"
+ storagePools "github.com/lxc/lxd/lxd/storage"
+ storageDrivers "github.com/lxc/lxd/lxd/storage/drivers"
"github.com/lxc/lxd/lxd/util"
"github.com/lxc/lxd/shared"
"github.com/lxc/lxd/shared/api"
@@ -540,11 +541,12 @@ func storagePoolDelete(d *Daemon, r *http.Request) response.Response {
// Check if the pool is pending, if so we just need to delete it from
// the database.
- _, pool, err := d.cluster.StoragePoolGet(poolName)
+ _, dbPool, err := d.cluster.StoragePoolGet(poolName)
if err != nil {
return response.SmartError(err)
}
- if pool.Status == "Pending" {
+
+ if dbPool.Status == "Pending" {
_, err := d.cluster.StoragePoolDelete(poolName)
if err != nil {
return response.SmartError(err)
@@ -552,46 +554,74 @@ func storagePoolDelete(d *Daemon, r *http.Request) response.Response {
return response.EmptySyncResponse
}
- s, err := storagePoolInit(d.State(), poolName)
+ volumeNames, err := d.cluster.StoragePoolVolumesGetNames(poolID)
if err != nil {
return response.InternalError(err)
}
- // If this is a notification for a ceph pool deletion, we don't want to
- // actually delete the pool, since that will be done by the node that
- // notified us. We just need to delete the local mountpoint.
- if s, ok := s.(*storageCeph); ok && isClusterNotification(r) {
- // Delete the mountpoint for the storage pool.
- poolMntPoint := driver.GetStoragePoolMountPoint(s.pool.Name)
- if shared.PathExists(poolMntPoint) {
- err := os.RemoveAll(poolMntPoint)
- if err != nil {
- return response.SmartError(err)
- }
+ pool, err := storagePools.GetPoolByName(d.State(), poolName)
+ if err != storageDrivers.ErrUnknownDriver {
+ if err != nil {
+ return response.InternalError(err)
}
- return response.EmptySyncResponse
- }
- volumeNames, err := d.cluster.StoragePoolVolumesGetNames(poolID)
- if err != nil {
- return response.InternalError(err)
- }
+ // Only delete images if locally stored or running on initial member.
+ if !isClusterNotification(r) || !pool.Driver().Info().Remote {
+ for _, volume := range volumeNames {
+ _, imgInfo, err := d.cluster.ImageGet("default", volume, false, false)
+ if err != nil {
+ return response.InternalError(err)
+ }
- for _, volume := range volumeNames {
- _, imgInfo, err := d.cluster.ImageGet("default", volume, false, false)
+ err = doDeleteImageFromPool(d.State(), imgInfo.Fingerprint, poolName)
+ if err != nil {
+ return response.InternalError(err)
+ }
+ }
+ }
+
+ err = pool.Delete(isClusterNotification(r), nil)
if err != nil {
return response.InternalError(err)
}
-
- err = doDeleteImageFromPool(d.State(), imgInfo.Fingerprint, poolName)
+ } else {
+ s, err := storagePoolInit(d.State(), poolName)
if err != nil {
return response.InternalError(err)
}
- }
- err = s.StoragePoolDelete()
- if err != nil {
- return response.InternalError(err)
+ // If this is a notification for a ceph pool deletion, we don't want to
+ // actually delete the pool, since that will be done by the node that
+ // notified us. We just need to delete the local mountpoint.
+ if s, ok := s.(*storageCeph); ok && isClusterNotification(r) {
+ // Delete the mountpoint for the storage pool.
+ poolMntPoint := storagePools.GetStoragePoolMountPoint(s.pool.Name)
+ if shared.PathExists(poolMntPoint) {
+ err := os.RemoveAll(poolMntPoint)
+ if err != nil {
+ return response.SmartError(err)
+ }
+ }
+
+ return response.EmptySyncResponse
+ }
+
+ for _, volume := range volumeNames {
+ _, imgInfo, err := d.cluster.ImageGet("default", volume, false, false)
+ if err != nil {
+ return response.InternalError(err)
+ }
+
+ err = doDeleteImageFromPool(d.State(), imgInfo.Fingerprint, poolName)
+ if err != nil {
+ return response.InternalError(err)
+ }
+ }
+
+ err = s.StoragePoolDelete()
+ if err != nil {
+ return response.InternalError(err)
+ }
}
// If this is a cluster notification, we're done, any database work
More information about the lxc-devel
mailing list