[lxc-devel] [lxd/master] This should have been a patch, for easier backporting

freeekanayaka on Github lxc-bot at linuxcontainers.org
Tue Jul 24 17:32:03 UTC 2018


A non-text attachment was scrubbed...
Name: not available
Type: text/x-mailbox
Size: 361 bytes
Desc: not available
URL: <http://lists.linuxcontainers.org/pipermail/lxc-devel/attachments/20180724/839774f3/attachment.bin>
-------------- next part --------------
From b961c740d3f31adbbc4e0e404f2a2b6ae2aebd3a Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Tue, 24 Jul 2018 17:29:55 +0000
Subject: [PATCH] This should have been a patch, for easier backporting

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/db/cluster/update.go      | 50 ++------------------------------
 lxd/db/cluster/update_test.go | 56 ------------------------------------
 lxd/patches.go                | 67 +++++++++++++++++++++++++++++++++++++++++++
 3 files changed, 69 insertions(+), 104 deletions(-)

diff --git a/lxd/db/cluster/update.go b/lxd/db/cluster/update.go
index d53e59e1b..5d09b8280 100644
--- a/lxd/db/cluster/update.go
+++ b/lxd/db/cluster/update.go
@@ -44,54 +44,8 @@ var updates = map[int]schema.Update{
 // The lvm.thinpool_name and lvm.vg_name config keys are node-specific and need
 // to be linked to nodes.
 func updateFromV8(tx *sql.Tx) error {
-	// Fetch the IDs of all existing nodes.
-	nodeIDs, err := query.SelectIntegers(tx, "SELECT id FROM nodes")
-	if err != nil {
-		return errors.Wrap(err, "failed to get IDs of current nodes")
-	}
-
-	// Fetch the IDs of all existing lvm pools.
-	poolIDs, err := query.SelectIntegers(tx, "SELECT id FROM storage_pools WHERE driver='lvm'")
-	if err != nil {
-		return errors.Wrap(err, "failed to get IDs of current lvm pools")
-	}
-
-	for _, poolID := range poolIDs {
-		// Fetch the config for this lvm pool and check if it has the
-		// lvn.thinpool_name or lvm.vg_name keys.
-		config, err := query.SelectConfig(
-			tx, "storage_pools_config", "storage_pool_id=? AND node_id IS NULL", poolID)
-		if err != nil {
-			return errors.Wrap(err, "failed to fetch of lvm pool config")
-		}
-
-		for _, key := range []string{"lvm.thinpool_name", "lvm.vg_name"} {
-			value, ok := config[key]
-			if !ok {
-				continue
-			}
-
-			// Delete the current key
-			_, err = tx.Exec(`
-DELETE FROM storage_pools_config WHERE key=? AND storage_pool_id=? AND node_id IS NULL
-`, key, poolID)
-			if err != nil {
-				return errors.Wrapf(err, "failed to delete %s config", key)
-			}
-
-			// Add the config entry for each node
-			for _, nodeID := range nodeIDs {
-				_, err := tx.Exec(`
-INSERT INTO storage_pools_config(storage_pool_id, node_id, key, value)
-  VALUES(?, ?, ?, ?)
-`, poolID, nodeID, key, value)
-				if err != nil {
-					return errors.Wrapf(err, "failed to create %s node config", key)
-				}
-			}
-		}
-	}
-
+	// Moved to patchLvmNodeSpecificConfigKeys, since there's no schema
+	// change. That makes it easier to backport.
 	return nil
 }
 
diff --git a/lxd/db/cluster/update_test.go b/lxd/db/cluster/update_test.go
index 1c9be1adb..cc8717b3e 100644
--- a/lxd/db/cluster/update_test.go
+++ b/lxd/db/cluster/update_test.go
@@ -346,59 +346,3 @@ INSERT INTO storage_pools_config(storage_pool_id, node_id, key, value)
 	require.NoError(t, err)
 	assert.Equal(t, map[string]string{"zfs.clone_copy": "true"}, config)
 }
-
-func TestUpdateFromV8(t *testing.T) {
-	schema := cluster.Schema()
-	db, err := schema.ExerciseUpdate(9, func(db *sql.DB) {
-		// Create two nodes.
-		_, err := db.Exec(
-			"INSERT INTO nodes VALUES (1, 'n1', '', '1.2.3.4:666', 1, 32, ?, 0)",
-			time.Now())
-		require.NoError(t, err)
-		_, err = db.Exec(
-			"INSERT INTO nodes VALUES (2, 'n2', '', '5.6.7.8:666', 1, 32, ?, 0)",
-			time.Now())
-		require.NoError(t, err)
-
-		// Create a pool p1 of type lvm.
-		_, err = db.Exec("INSERT INTO storage_pools VALUES (1, 'p1', 'lvm', '', 0)")
-		require.NoError(t, err)
-
-		// Create a pool p2 of type lvm.
-		_, err = db.Exec("INSERT INTO storage_pools VALUES (2, 'p2', 'lvm', '', 0)")
-		require.NoError(t, err)
-
-		// Create a lvm.thinpool_name config for p1.
-		_, err = db.Exec(`
-INSERT INTO storage_pools_config(storage_pool_id, node_id, key, value)
-  VALUES(1, NULL, 'lvm.thinpool_name', 'my-pool')
-`)
-		require.NoError(t, err)
-
-		// Create a rsync.bwlimit config for p2.
-		_, err = db.Exec(`
-INSERT INTO storage_pools_config(storage_pool_id, node_id, key, value)
-  VALUES(2, NULL, 'rsync.bwlimit', '64')
-`)
-		require.NoError(t, err)
-	})
-	require.NoError(t, err)
-
-	tx, err := db.Begin()
-	require.NoError(t, err)
-	defer tx.Rollback()
-
-	// Check the lvm.thinpool_name config is now node-specific.
-	for _, nodeID := range []int{1, 2} {
-		config, err := query.SelectConfig(
-			tx, "storage_pools_config", "storage_pool_id=1 AND node_id=?", nodeID)
-		require.NoError(t, err)
-		assert.Equal(t, map[string]string{"lvm.thinpool_name": "my-pool"}, config)
-	}
-
-	// Check the rsync.bwlimit key is still global
-	config, err := query.SelectConfig(
-		tx, "storage_pools_config", "storage_pool_id=2 AND node_id IS NULL")
-	require.NoError(t, err)
-	assert.Equal(t, map[string]string{"rsync.bwlimit": "64"}, config)
-}
diff --git a/lxd/patches.go b/lxd/patches.go
index 74af6895d..85dbaad43 100644
--- a/lxd/patches.go
+++ b/lxd/patches.go
@@ -10,8 +10,10 @@ import (
 
 	"github.com/lxc/lxd/lxd/cluster"
 	"github.com/lxc/lxd/lxd/db"
+	"github.com/lxc/lxd/lxd/db/query"
 	"github.com/lxc/lxd/shared"
 	"github.com/lxc/lxd/shared/logger"
+	"github.com/pkg/errors"
 
 	log "github.com/lxc/lxd/shared/log15"
 )
@@ -55,6 +57,7 @@ var patches = []patch{
 	{name: "devices_new_naming_scheme", run: patchDevicesNewNamingScheme},
 	{name: "storage_api_permissions", run: patchStorageApiPermissions},
 	{name: "container_config_regen", run: patchContainerConfigRegen},
+	{name: "lvm_node_specific_config_keys", run: patchLvmNodeSpecificConfigKeys},
 }
 
 type patch struct {
@@ -1940,6 +1943,70 @@ func patchContainerConfigRegen(name string, d *Daemon) error {
 	return nil
 }
 
+// The lvm.thinpool_name and lvm.vg_name config keys are node-specific and need
+// to be linked to nodes.
+func patchLvmNodeSpecificConfigKeys(name string, d *Daemon) error {
+	tx, err := d.cluster.Begin()
+	if err != nil {
+		return errors.Wrap(err, "failed to begin transaction")
+	}
+
+	// Fetch the IDs of all existing nodes.
+	nodeIDs, err := query.SelectIntegers(tx, "SELECT id FROM nodes")
+	if err != nil {
+		return errors.Wrap(err, "failed to get IDs of current nodes")
+	}
+
+	// Fetch the IDs of all existing lvm pools.
+	poolIDs, err := query.SelectIntegers(tx, "SELECT id FROM storage_pools WHERE driver='lvm'")
+	if err != nil {
+		return errors.Wrap(err, "failed to get IDs of current lvm pools")
+	}
+
+	for _, poolID := range poolIDs {
+		// Fetch the config for this lvm pool and check if it has the
+		// lvn.thinpool_name or lvm.vg_name keys.
+		config, err := query.SelectConfig(
+			tx, "storage_pools_config", "storage_pool_id=? AND node_id IS NULL", poolID)
+		if err != nil {
+			return errors.Wrap(err, "failed to fetch of lvm pool config")
+		}
+
+		for _, key := range []string{"lvm.thinpool_name", "lvm.vg_name"} {
+			value, ok := config[key]
+			if !ok {
+				continue
+			}
+
+			// Delete the current key
+			_, err = tx.Exec(`
+DELETE FROM storage_pools_config WHERE key=? AND storage_pool_id=? AND node_id IS NULL
+`, key, poolID)
+			if err != nil {
+				return errors.Wrapf(err, "failed to delete %s config", key)
+			}
+
+			// Add the config entry for each node
+			for _, nodeID := range nodeIDs {
+				_, err := tx.Exec(`
+INSERT INTO storage_pools_config(storage_pool_id, node_id, key, value)
+  VALUES(?, ?, ?, ?)
+`, poolID, nodeID, key, value)
+				if err != nil {
+					return errors.Wrapf(err, "failed to create %s node config", key)
+				}
+			}
+		}
+	}
+
+	err = tx.Commit()
+	if err != nil {
+		return errors.Wrap(err, "failed to commit transaction")
+	}
+
+	return err
+}
+
 func patchStorageApiDirCleanup(name string, d *Daemon) error {
 	fingerprints, err := d.cluster.ImagesGet(false)
 	if err != nil {


More information about the lxc-devel mailing list