[lxc-devel] [lxd/master] Stricter node remove checks

freeekanayaka on Github lxc-bot at linuxcontainers.org
Wed Feb 27 11:27:54 UTC 2019


A non-text attachment was scrubbed...
Name: not available
Type: text/x-mailbox
Size: 469 bytes
Desc: not available
URL: <http://lists.linuxcontainers.org/pipermail/lxc-devel/attachments/20190227/4f6a93be/attachment.bin>
-------------- next part --------------
From 77b114a675a3619062803a7d99bb8721a2f00660 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Wed, 27 Feb 2019 12:07:13 +0100
Subject: [PATCH 1/4] Use capital case in error messages returned by
 db.NodeInfo.IsEmpty()

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/db/node.go      | 10 +++++-----
 lxd/db/node_test.go |  4 ++--
 2 files changed, 7 insertions(+), 7 deletions(-)

diff --git a/lxd/db/node.go b/lxd/db/node.go
index c28c5ad199..769043411e 100644
--- a/lxd/db/node.go
+++ b/lxd/db/node.go
@@ -328,15 +328,15 @@ func (c *ClusterTx) NodeIsEmpty(id int64) (string, error) {
 	// Check if the node has any containers.
 	containers, err := query.SelectStrings(c.tx, "SELECT name FROM containers WHERE node_id=?", id)
 	if err != nil {
-		return "", errors.Wrapf(err, "failed to get containers for node %d", id)
+		return "", errors.Wrapf(err, "Failed to get containers for node %d", id)
 	}
 	if len(containers) > 0 {
 		message := fmt.Sprintf(
-			"node still has the following containers: %s", strings.Join(containers, ", "))
+			"Node still has the following containers: %s", strings.Join(containers, ", "))
 		return message, nil
 	}
 
-	// Check if the node has any images available only in.
+	// Check if the node has any images available only in it.
 	images := []struct {
 		fingerprint string
 		nodeID      int64
@@ -357,7 +357,7 @@ SELECT fingerprint, node_id FROM images JOIN images_nodes ON images.id=images_no
 	defer stmt.Close()
 	err = query.SelectObjects(stmt, dest)
 	if err != nil {
-		return "", errors.Wrapf(err, "failed to get image list for node %d", id)
+		return "", errors.Wrapf(err, "Failed to get image list for node %d", id)
 	}
 	index := map[string][]int64{} // Map fingerprints to IDs of nodes
 	for _, image := range images {
@@ -376,7 +376,7 @@ SELECT fingerprint, node_id FROM images JOIN images_nodes ON images.id=images_no
 
 	if len(fingerprints) > 0 {
 		message := fmt.Sprintf(
-			"node still has the following images: %s", strings.Join(fingerprints, ", "))
+			"Node still has the following images: %s", strings.Join(fingerprints, ", "))
 		return message, nil
 	}
 
diff --git a/lxd/db/node_test.go b/lxd/db/node_test.go
index e1c8a66378..720865f173 100644
--- a/lxd/db/node_test.go
+++ b/lxd/db/node_test.go
@@ -232,7 +232,7 @@ INSERT INTO containers (id, node_id, name, architecture, type, project_id) VALUE
 
 	message, err = tx.NodeIsEmpty(id)
 	require.NoError(t, err)
-	assert.Equal(t, "node still has the following containers: foo", message)
+	assert.Equal(t, "Node still has the following containers: foo", message)
 
 	err = tx.NodeClear(id)
 	require.NoError(t, err)
@@ -262,7 +262,7 @@ INSERT INTO images_nodes(image_id, node_id) VALUES(1, ?)`, id)
 
 	message, err := tx.NodeIsEmpty(id)
 	require.NoError(t, err)
-	assert.Equal(t, "node still has the following images: abc", message)
+	assert.Equal(t, "Node still has the following images: abc", message)
 
 	// Insert a new image entry for node 1 (the default node).
 	_, err = tx.Tx().Exec(`

From 16f2c51741e1452dc5a94f8eaa0cbe796636ed79 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Wed, 27 Feb 2019 12:07:54 +0100
Subject: [PATCH 2/4] db.NodeInfo.IsEmpty(): a node with custom volumes is not
 empty

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/db/node.go      | 13 +++++++++++++
 lxd/db/node_test.go | 22 ++++++++++++++++++++++
 2 files changed, 35 insertions(+)

diff --git a/lxd/db/node.go b/lxd/db/node.go
index 769043411e..cc318177dc 100644
--- a/lxd/db/node.go
+++ b/lxd/db/node.go
@@ -380,6 +380,19 @@ SELECT fingerprint, node_id FROM images JOIN images_nodes ON images.id=images_no
 		return message, nil
 	}
 
+	// Check if the node has any custom volumes.
+	volumes, err := query.SelectStrings(
+		c.tx, "SELECT name FROM storage_volumes WHERE node_id=? AND type=?",
+		id, StoragePoolVolumeTypeCustom)
+	if err != nil {
+		return "", errors.Wrapf(err, "Failed to get custom volumes for node %d", id)
+	}
+	if len(volumes) > 0 {
+		message := fmt.Sprintf(
+			"Node still has the following custom volumes: %s", strings.Join(volumes, ", "))
+		return message, nil
+	}
+
 	return "", nil
 }
 
diff --git a/lxd/db/node_test.go b/lxd/db/node_test.go
index 720865f173..4094c2cba7 100644
--- a/lxd/db/node_test.go
+++ b/lxd/db/node_test.go
@@ -274,6 +274,28 @@ INSERT INTO images_nodes(image_id, node_id) VALUES(1, 1)`)
 	assert.Equal(t, "", message)
 }
 
+// A node is considered empty only if it has no custom volumes on it.
+func TestNodeIsEmpty_CustomVolumes(t *testing.T) {
+	tx, cleanup := db.NewTestClusterTx(t)
+	defer cleanup()
+
+	id, err := tx.NodeAdd("buzz", "1.2.3.4:666")
+	require.NoError(t, err)
+
+	_, err = tx.Tx().Exec(`
+INSERT INTO storage_pools (id, name, driver) VALUES (1, 'local', 'zfs')`)
+	require.NoError(t, err)
+
+	_, err = tx.Tx().Exec(`
+INSERT INTO storage_volumes(name, storage_pool_id, node_id, type, project_id)
+  VALUES ('data', 1, ?, ?, 1)`, id, db.StoragePoolVolumeTypeCustom)
+	require.NoError(t, err)
+
+	message, err := tx.NodeIsEmpty(id)
+	require.NoError(t, err)
+	assert.Equal(t, "Node still has the following custom volumes: data", message)
+}
+
 // If there are 2 online nodes, return the address of the one with the least
 // number of containers.
 func TestNodeWithLeastContainers(t *testing.T) {

From a241c418ca6142d7baf363ddea30bc4b23204ee1 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Wed, 27 Feb 2019 12:08:48 +0100
Subject: [PATCH 3/4] Add integration test checking that nodes with custom
 volumes can't be removed

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 test/suites/clustering.sh | 5 +++++
 1 file changed, 5 insertions(+)

diff --git a/test/suites/clustering.sh b/test/suites/clustering.sh
index c33e416848..2afb455ebb 100644
--- a/test/suites/clustering.sh
+++ b/test/suites/clustering.sh
@@ -145,6 +145,11 @@ test_clustering_membership() {
   ! LXD_DIR="${LXD_FOUR_DIR}" lxc cluster remove node3 || false
   LXD_DIR="${LXD_TWO_DIR}" lxc image delete testimage
 
+  # Trying to delete a node which has a custom volume on it results in an error.
+  LXD_DIR="${LXD_FOUR_DIR}" lxc storage volume create data v1
+  ! LXD_DIR="${LXD_FOUR_DIR}" lxc cluster remove node3 || false
+  LXD_DIR="${LXD_FOUR_DIR}" lxc storage volume delete data v1
+
   # The image got deleted from the LXD_DIR tree.
   # shellcheck disable=2086
   [ "$(ls ${LXD_FOUR_DIR}/images)" = "" ] || false

From 21982c80fa98b7fc15aa61462a9fd43cb2a088d9 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Wed, 27 Feb 2019 12:25:54 +0100
Subject: [PATCH 4/4] Prompt for confirmation when using --delete to remove a
 server

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxc/cluster.go            | 34 +++++++++++++++++++++++++++++++++-
 test/suites/clustering.sh |  4 ++--
 2 files changed, 35 insertions(+), 3 deletions(-)

diff --git a/lxc/cluster.go b/lxc/cluster.go
index afc13e6274..b47b232ac2 100644
--- a/lxc/cluster.go
+++ b/lxc/cluster.go
@@ -1,6 +1,7 @@
 package main
 
 import (
+	"bufio"
 	"fmt"
 	"os"
 	"sort"
@@ -10,6 +11,7 @@ import (
 	"github.com/spf13/cobra"
 	yaml "gopkg.in/yaml.v2"
 
+	"github.com/lxc/lxd/shared"
 	"github.com/lxc/lxd/shared/api"
 	cli "github.com/lxc/lxd/shared/cmd"
 	"github.com/lxc/lxd/shared/i18n"
@@ -235,7 +237,8 @@ type cmdClusterRemove struct {
 	global  *cmdGlobal
 	cluster *cmdCluster
 
-	flagForce bool
+	flagForce          bool
+	flagNonInteractive bool
 }
 
 func (c *cmdClusterRemove) Command() *cobra.Command {
@@ -248,10 +251,31 @@ func (c *cmdClusterRemove) Command() *cobra.Command {
 
 	cmd.RunE = c.Run
 	cmd.Flags().BoolVarP(&c.flagForce, "force", "f", false, i18n.G("Force removing a member, even if degraded"))
+	cmd.Flags().BoolVarP(&c.flagNonInteractive, "quiet", "q", true, i18n.G("Don't require user confirmation for using --force"))
 
 	return cmd
 }
 
+func (c *cmdClusterRemove) promptConfirmation(name string) error {
+	reader := bufio.NewReader(os.Stdin)
+	fmt.Printf(i18n.G(`After a server is removed from the cluster, it won't be able to manage
+anymore the containers, images or custom volumes it might currently
+have on it.
+
+The --force flag should only be used if the server has died and is not
+expected to come back.
+
+Are you really sure you want to force removing %s? (yes/no): `), name)
+	input, _ := reader.ReadString('\n')
+	input = strings.TrimSuffix(input, "\n")
+
+	if !shared.StringInSlice(strings.ToLower(input), []string{i18n.G("yes")}) {
+		return fmt.Errorf(i18n.G("User aborted delete operation"))
+	}
+
+	return nil
+}
+
 func (c *cmdClusterRemove) Run(cmd *cobra.Command, args []string) error {
 	// Sanity checks
 	exit, err := c.global.CheckArgs(cmd, args, 1, 1)
@@ -267,6 +291,14 @@ func (c *cmdClusterRemove) Run(cmd *cobra.Command, args []string) error {
 
 	resource := resources[0]
 
+	// Prompt for confiromation if --force is used.
+	if !c.flagNonInteractive && c.flagForce {
+		err := c.promptConfirmation(resource.name)
+		if err != nil {
+			return err
+		}
+	}
+
 	// Delete the cluster member
 	err = resource.server.DeleteClusterMember(resource.name, c.flagForce)
 	if err != nil {
diff --git a/test/suites/clustering.sh b/test/suites/clustering.sh
index 2afb455ebb..bee2480296 100644
--- a/test/suites/clustering.sh
+++ b/test/suites/clustering.sh
@@ -125,7 +125,7 @@ test_clustering_membership() {
   ! LXD_DIR="${LXD_TWO_DIR}" lxc network delete "${bridge}" || false
 
   # Force the removal of the degraded node.
-  LXD_DIR="${LXD_TWO_DIR}" lxc cluster remove node3 --force
+  LXD_DIR="${LXD_TWO_DIR}" lxc cluster remove node3 -q --force
 
   # Sleep a bit to let a heartbeat occur and update the list of raft nodes
   # everywhere, showing that node 4 has been promoted to database node.
@@ -507,7 +507,7 @@ test_clustering_storage() {
     LXD_DIR="${LXD_ONE_DIR}" lxc stop bar --force
 
     LXD_DIR="${LXD_ONE_DIR}" lxc config set cluster.offline_threshold 20
-    LXD_DIR="${LXD_ONE_DIR}" lxc cluster remove node3 --force
+    LXD_DIR="${LXD_ONE_DIR}" lxc cluster remove node3 -q --force
 
     LXD_DIR="${LXD_ONE_DIR}" lxc delete bar
 


More information about the lxc-devel mailing list