[lxc-devel] [lxd/master] Rename db function names part 5
freeekanayaka on Github
lxc-bot at linuxcontainers.org
Wed May 6 14:58:42 UTC 2020
A non-text attachment was scrubbed...
Name: not available
Type: text/x-mailbox
Size: 301 bytes
Desc: not available
URL: <http://lists.linuxcontainers.org/pipermail/lxc-devel/attachments/20200506/f077eb99/attachment-0001.bin>
-------------- next part --------------
From 518681d1fa9d06b72d227709458c0d7e17ed8e8a Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Wed, 6 May 2020 14:50:20 +0100
Subject: [PATCH 01/43] lxd/db: Rename ProjectNames to GetProjectNames
Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
lxd/db/projects.go | 4 ++--
lxd/instance/instance_utils.go | 2 +-
lxd/storage_pools.go | 2 +-
lxd/storage_volumes_snapshot.go | 2 +-
4 files changed, 5 insertions(+), 5 deletions(-)
diff --git a/lxd/db/projects.go b/lxd/db/projects.go
index 6b1a370160..1db14db58a 100644
--- a/lxd/db/projects.go
+++ b/lxd/db/projects.go
@@ -54,8 +54,8 @@ func (c *ClusterTx) ProjectHasProfiles(name string) (bool, error) {
return projectHasProfiles(c.tx, name)
}
-// ProjectNames returns the names of all available projects.
-func (c *ClusterTx) ProjectNames() ([]string, error) {
+// GetProjectNames returns the names of all available projects.
+func (c *ClusterTx) GetProjectNames() ([]string, error) {
stmt := "SELECT name FROM projects"
names, err := query.SelectStrings(c.tx, stmt)
diff --git a/lxd/instance/instance_utils.go b/lxd/instance/instance_utils.go
index fcb76deed1..7420261218 100644
--- a/lxd/instance/instance_utils.go
+++ b/lxd/instance/instance_utils.go
@@ -545,7 +545,7 @@ func LoadFromAllProjects(s *state.State) ([]Instance, error) {
err := s.Cluster.Transaction(func(tx *db.ClusterTx) error {
var err error
- projects, err = tx.ProjectNames()
+ projects, err = tx.GetProjectNames()
return err
})
if err != nil {
diff --git a/lxd/storage_pools.go b/lxd/storage_pools.go
index 2f7faf9ba0..a7bc12a7dd 100644
--- a/lxd/storage_pools.go
+++ b/lxd/storage_pools.go
@@ -635,7 +635,7 @@ func storagePoolDeleteCheckPreconditions(cluster *db.Cluster, poolName string, p
var projects []string
err = cluster.Transaction(func(tx *db.ClusterTx) error {
- projects, err = tx.ProjectNames()
+ projects, err = tx.GetProjectNames()
return err
})
if err != nil {
diff --git a/lxd/storage_volumes_snapshot.go b/lxd/storage_volumes_snapshot.go
index 3078dbe1bf..6fec3c29fb 100644
--- a/lxd/storage_volumes_snapshot.go
+++ b/lxd/storage_volumes_snapshot.go
@@ -776,7 +776,7 @@ func volumeDetermineNextSnapshotName(d *Daemon, volume db.StorageVolumeArgs, def
var projects []string
err = d.cluster.Transaction(func(tx *db.ClusterTx) error {
- projects, err = tx.ProjectNames()
+ projects, err = tx.GetProjectNames()
return err
})
if err != nil {
From 6a0bd5efacc7a632e1fc8c59b4a05c6d21d9347f Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Wed, 6 May 2020 14:51:45 +0100
Subject: [PATCH 02/43] lxd/db: Rename ProjectMap to GetProjectIDsToNames
Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
lxd/daemon.go | 2 +-
lxd/db/projects.go | 5 +++--
2 files changed, 4 insertions(+), 3 deletions(-)
diff --git a/lxd/daemon.go b/lxd/daemon.go
index 977179e5a6..08d2996ac4 100644
--- a/lxd/daemon.go
+++ b/lxd/daemon.go
@@ -1308,7 +1308,7 @@ func (d *Daemon) setupRBACServer(rbacURL string, rbacKey string, rbacExpiry int6
var result map[int64]string
err := d.cluster.Transaction(func(tx *db.ClusterTx) error {
var err error
- result, err = tx.ProjectMap()
+ result, err = tx.GetProjectIDsToNames()
return err
})
diff --git a/lxd/db/projects.go b/lxd/db/projects.go
index 1db14db58a..86197f3975 100644
--- a/lxd/db/projects.go
+++ b/lxd/db/projects.go
@@ -66,8 +66,9 @@ func (c *ClusterTx) GetProjectNames() ([]string, error) {
return names, nil
}
-// ProjectMap returns the names and ids of all available projects.
-func (c *ClusterTx) ProjectMap() (map[int64]string, error) {
+// GetProjectIDsToNames returns a map associating each project ID to its
+// project name.
+func (c *ClusterTx) GetProjectIDsToNames() (map[int64]string, error) {
stmt := "SELECT id, name FROM projects"
rows, err := c.tx.Query(stmt)
From 1b131338fe4025687c9c1888d7f8eaf93cd2e40b Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Wed, 6 May 2020 14:52:41 +0100
Subject: [PATCH 03/43] lxd/db: Rename ProjectUpdate to UpdateProject
Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
lxd/api_project.go | 2 +-
lxd/db/projects.go | 4 ++--
2 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/lxd/api_project.go b/lxd/api_project.go
index 4630daf26c..6b665be2bd 100644
--- a/lxd/api_project.go
+++ b/lxd/api_project.go
@@ -369,7 +369,7 @@ func projectChange(d *Daemon, project *api.Project, req api.ProjectPut) response
return err
}
- err = tx.ProjectUpdate(project.Name, req)
+ err = tx.UpdateProject(project.Name, req)
if err != nil {
return errors.Wrap(err, "Persist profile changes")
}
diff --git a/lxd/db/projects.go b/lxd/db/projects.go
index 86197f3975..b62395ddfa 100644
--- a/lxd/db/projects.go
+++ b/lxd/db/projects.go
@@ -130,8 +130,8 @@ func (c *ClusterTx) ProjectHasImages(name string) (bool, error) {
return enabled, nil
}
-// ProjectUpdate updates the project matching the given key parameters.
-func (c *ClusterTx) ProjectUpdate(name string, object api.ProjectPut) error {
+// UpdateProject updates the project matching the given key parameters.
+func (c *ClusterTx) UpdateProject(name string, object api.ProjectPut) error {
stmt := c.stmt(projectUpdate)
result, err := stmt.Exec(object.Description, name)
if err != nil {
From 4ce12678b413ccef4b700b587240947f85290b2a Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Wed, 6 May 2020 14:55:38 +0100
Subject: [PATCH 04/43] lxd/db: Rename ProjectLaunchWithoutImages to
InitProjectWithoutImages
Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
lxd/api_project.go | 2 +-
lxd/db/projects.go | 6 ++++--
2 files changed, 5 insertions(+), 3 deletions(-)
diff --git a/lxd/api_project.go b/lxd/api_project.go
index 6b665be2bd..fab15f7656 100644
--- a/lxd/api_project.go
+++ b/lxd/api_project.go
@@ -148,7 +148,7 @@ func projectsPost(d *Daemon, r *http.Request) response.Response {
}
if project.Config["features.images"] == "false" {
- err = tx.ProjectLaunchWithoutImages(project.Name)
+ err = tx.InitProjectWithoutImages(project.Name)
if err != nil {
return err
}
diff --git a/lxd/db/projects.go b/lxd/db/projects.go
index b62395ddfa..d52b8d9e87 100644
--- a/lxd/db/projects.go
+++ b/lxd/db/projects.go
@@ -171,8 +171,10 @@ DELETE FROM projects_config WHERE projects_config.project_id = ?
return nil
}
-// ProjectLaunchWithoutImages updates the images_profiles table when a Project is created with features.images=false.
-func (c *ClusterTx) ProjectLaunchWithoutImages(project string) error {
+// InitProjectWithoutImages updates populates the images_profiles table with
+// all images from the default project when a project is created with
+// features.images=false.
+func (c *ClusterTx) InitProjectWithoutImages(project string) error {
defaultProfileID, err := c.ProfileID(project, "default")
if err != nil {
return errors.Wrap(err, "Fetch project ID")
From 31c151e51c3f5edb49c8405190581f9c7b1506b3 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Wed, 6 May 2020 14:57:51 +0100
Subject: [PATCH 05/43] lxd/db: Rename RaftNodes to GetRaftNodes
Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
lxd/api_cluster_test.go | 4 ++--
lxd/cluster/gateway.go | 2 +-
lxd/cluster/membership.go | 2 +-
lxd/cluster/membership_test.go | 4 ++--
lxd/cluster/recover.go | 2 +-
lxd/db/raft.go | 4 ++--
lxd/db/raft_test.go | 4 ++--
lxd/node/raft.go | 2 +-
lxd/patches.go | 2 +-
9 files changed, 13 insertions(+), 13 deletions(-)
diff --git a/lxd/api_cluster_test.go b/lxd/api_cluster_test.go
index b0a3da46a9..5eb9b12b99 100644
--- a/lxd/api_cluster_test.go
+++ b/lxd/api_cluster_test.go
@@ -143,7 +143,7 @@ func TestCluster_Join(t *testing.T) {
// entry for the joining node itself.
state := daemons[1].State()
err = state.Node.Transaction(func(tx *db.NodeTx) error {
- nodes, err := tx.RaftNodes()
+ nodes, err := tx.GetRaftNodes()
require.NoError(t, err)
require.True(t, len(nodes) >= 1, "no rows in raft_nodes table")
assert.Equal(t, int64(1), nodes[0].ID)
@@ -240,7 +240,7 @@ func TestCluster_JoinServerAddress(t *testing.T) {
// entry for the joining node itself.
state := daemons[1].State()
err = state.Node.Transaction(func(tx *db.NodeTx) error {
- nodes, err := tx.RaftNodes()
+ nodes, err := tx.GetRaftNodes()
require.NoError(t, err)
require.True(t, len(nodes) >= 1, "no rows in raft_nodes table")
assert.Equal(t, int64(1), nodes[0].ID)
diff --git a/lxd/cluster/gateway.go b/lxd/cluster/gateway.go
index abd2ec53b1..7b50b37413 100644
--- a/lxd/cluster/gateway.go
+++ b/lxd/cluster/gateway.go
@@ -548,7 +548,7 @@ func (g *Gateway) LeaderAddress() (string, error) {
}
addresses := []string{}
err = g.db.Transaction(func(tx *db.NodeTx) error {
- nodes, err := tx.RaftNodes()
+ nodes, err := tx.GetRaftNodes()
if err != nil {
return err
}
diff --git a/lxd/cluster/membership.go b/lxd/cluster/membership.go
index 788d7ac3f6..c35a6bddd0 100644
--- a/lxd/cluster/membership.go
+++ b/lxd/cluster/membership.go
@@ -1034,7 +1034,7 @@ func Enabled(node *db.Node) (bool, error) {
// Check that node-related preconditions are met for bootstrapping or joining a
// cluster.
func membershipCheckNodeStateForBootstrapOrJoin(tx *db.NodeTx, address string) error {
- nodes, err := tx.RaftNodes()
+ nodes, err := tx.GetRaftNodes()
if err != nil {
return errors.Wrap(err, "failed to fetch current raft nodes")
}
diff --git a/lxd/cluster/membership_test.go b/lxd/cluster/membership_test.go
index 5f5875a51c..72c4137b59 100644
--- a/lxd/cluster/membership_test.go
+++ b/lxd/cluster/membership_test.go
@@ -98,7 +98,7 @@ func TestBootstrap(t *testing.T) {
// The node-local database has now an entry in the raft_nodes table
err = state.Node.Transaction(func(tx *db.NodeTx) error {
- nodes, err := tx.RaftNodes()
+ nodes, err := tx.GetRaftNodes()
require.NoError(t, err)
require.Len(t, nodes, 1)
assert.Equal(t, uint64(1), nodes[0].ID)
@@ -404,7 +404,7 @@ func (h *membershipFixtures) RaftNodes() []db.RaftNode {
var nodes []db.RaftNode
err := h.state.Node.Transaction(func(tx *db.NodeTx) error {
var err error
- nodes, err = tx.RaftNodes()
+ nodes, err = tx.GetRaftNodes()
return err
})
require.NoError(h.t, err)
diff --git a/lxd/cluster/recover.go b/lxd/cluster/recover.go
index b41eb3cf62..89dcfaa3e6 100644
--- a/lxd/cluster/recover.go
+++ b/lxd/cluster/recover.go
@@ -18,7 +18,7 @@ func ListDatabaseNodes(database *db.Node) ([]string, error) {
nodes := []db.RaftNode{}
err := database.Transaction(func(tx *db.NodeTx) error {
var err error
- nodes, err = tx.RaftNodes()
+ nodes, err = tx.GetRaftNodes()
return err
})
if err != nil {
diff --git a/lxd/db/raft.go b/lxd/db/raft.go
index 8015f42617..d96b81522b 100644
--- a/lxd/db/raft.go
+++ b/lxd/db/raft.go
@@ -26,10 +26,10 @@ const (
RaftSpare = client.Spare
)
-// RaftNodes returns information about all LXD nodes that are members of the
+// GetRaftNodes returns information about all LXD nodes that are members of the
// dqlite Raft cluster (possibly including the local node). If this LXD
// instance is not running in clustered mode, an empty list is returned.
-func (n *NodeTx) RaftNodes() ([]RaftNode, error) {
+func (n *NodeTx) GetRaftNodes() ([]RaftNode, error) {
nodes := []RaftNode{}
dest := func(i int) []interface{} {
nodes = append(nodes, RaftNode{})
diff --git a/lxd/db/raft_test.go b/lxd/db/raft_test.go
index 4c4a2ca0d9..a0b5d13a9e 100644
--- a/lxd/db/raft_test.go
+++ b/lxd/db/raft_test.go
@@ -21,7 +21,7 @@ func TestRaftNodes(t *testing.T) {
id2, err := tx.RaftNodeAdd("5.6.7.8:666")
require.NoError(t, err)
- nodes, err := tx.RaftNodes()
+ nodes, err := tx.GetRaftNodes()
require.NoError(t, err)
assert.Equal(t, uint64(id1), nodes[0].ID)
@@ -128,7 +128,7 @@ func TestRaftNodesReplace(t *testing.T) {
err = tx.RaftNodesReplace(nodes)
assert.NoError(t, err)
- newNodes, err := tx.RaftNodes()
+ newNodes, err := tx.GetRaftNodes()
require.NoError(t, err)
assert.Equal(t, nodes, newNodes)
diff --git a/lxd/node/raft.go b/lxd/node/raft.go
index 8260b6fca7..a6c34b8656 100644
--- a/lxd/node/raft.go
+++ b/lxd/node/raft.go
@@ -39,7 +39,7 @@ func DetermineRaftNode(tx *db.NodeTx) (*db.RaftNode, error) {
return &db.RaftNode{ID: 1}, nil
}
- nodes, err := tx.RaftNodes()
+ nodes, err := tx.GetRaftNodes()
if err != nil {
return nil, err
}
diff --git a/lxd/patches.go b/lxd/patches.go
index 9c565b9f5a..4fa543c840 100644
--- a/lxd/patches.go
+++ b/lxd/patches.go
@@ -3446,7 +3446,7 @@ func patchStorageApiUpdateContainerSnapshots(name string, d *Daemon) error {
func patchClusteringAddRoles(name string, d *Daemon) error {
addresses := []string{}
err := d.State().Node.Transaction(func(tx *db.NodeTx) error {
- nodes, err := tx.RaftNodes()
+ nodes, err := tx.GetRaftNodes()
if err != nil {
return errors.Wrap(err, "Failed to fetch current raft nodes")
}
From aa1f591159c61a5589e1c318fe58880bd493187b Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Wed, 6 May 2020 14:59:27 +0100
Subject: [PATCH 06/43] lxd/db: Rename RaftNodeAddresses to
GetRaftNodeAddresses
Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
lxd/cluster/membership.go | 2 +-
lxd/db/raft.go | 4 ++--
lxd/db/raft_test.go | 4 ++--
3 files changed, 5 insertions(+), 5 deletions(-)
diff --git a/lxd/cluster/membership.go b/lxd/cluster/membership.go
index c35a6bddd0..da067d7ec2 100644
--- a/lxd/cluster/membership.go
+++ b/lxd/cluster/membership.go
@@ -1021,7 +1021,7 @@ func Count(state *state.State) (int, error) {
func Enabled(node *db.Node) (bool, error) {
enabled := false
err := node.Transaction(func(tx *db.NodeTx) error {
- addresses, err := tx.RaftNodeAddresses()
+ addresses, err := tx.GetRaftNodeAddresses()
if err != nil {
return err
}
diff --git a/lxd/db/raft.go b/lxd/db/raft.go
index d96b81522b..fe67a3d1e9 100644
--- a/lxd/db/raft.go
+++ b/lxd/db/raft.go
@@ -47,10 +47,10 @@ func (n *NodeTx) GetRaftNodes() ([]RaftNode, error) {
return nodes, nil
}
-// RaftNodeAddresses returns the addresses of all LXD nodes that are members of
+// GetRaftNodeAddresses returns the addresses of all LXD nodes that are members of
// the dqlite Raft cluster (possibly including the local node). If this LXD
// instance is not running in clustered mode, an empty list is returned.
-func (n *NodeTx) RaftNodeAddresses() ([]string, error) {
+func (n *NodeTx) GetRaftNodeAddresses() ([]string, error) {
return query.SelectStrings(n.tx, "SELECT address FROM raft_nodes")
}
diff --git a/lxd/db/raft_test.go b/lxd/db/raft_test.go
index a0b5d13a9e..f631e77a9b 100644
--- a/lxd/db/raft_test.go
+++ b/lxd/db/raft_test.go
@@ -31,7 +31,7 @@ func TestRaftNodes(t *testing.T) {
}
// Fetch the addresses of all raft nodes.
-func TestRaftNodeAddresses(t *testing.T) {
+func TestGetRaftNodeAddresses(t *testing.T) {
tx, cleanup := db.NewTestNodeTx(t)
defer cleanup()
@@ -41,7 +41,7 @@ func TestRaftNodeAddresses(t *testing.T) {
_, err = tx.RaftNodeAdd("5.6.7.8:666")
require.NoError(t, err)
- addresses, err := tx.RaftNodeAddresses()
+ addresses, err := tx.GetRaftNodeAddresses()
require.NoError(t, err)
assert.Equal(t, []string{"1.2.3.4:666", "5.6.7.8:666"}, addresses)
From 5ed5f322b347ec2310acfad89a4585395352455b Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Wed, 6 May 2020 15:00:29 +0100
Subject: [PATCH 07/43] lxd/db: Rename RaftNodeAddress to GetRaftNodeAddress
Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
lxd/cluster/gateway.go | 2 +-
lxd/db/raft.go | 4 ++--
lxd/db/raft_test.go | 6 +++---
3 files changed, 6 insertions(+), 6 deletions(-)
diff --git a/lxd/cluster/gateway.go b/lxd/cluster/gateway.go
index 7b50b37413..cfab2852fc 100644
--- a/lxd/cluster/gateway.go
+++ b/lxd/cluster/gateway.go
@@ -788,7 +788,7 @@ func (g *Gateway) raftAddress(databaseID uint64) (string, error) {
var address string
err := g.db.Transaction(func(tx *db.NodeTx) error {
var err error
- address, err = tx.RaftNodeAddress(int64(databaseID))
+ address, err = tx.GetRaftNodeAddress(int64(databaseID))
return err
})
if err != nil {
diff --git a/lxd/db/raft.go b/lxd/db/raft.go
index fe67a3d1e9..f4bf1826c7 100644
--- a/lxd/db/raft.go
+++ b/lxd/db/raft.go
@@ -54,9 +54,9 @@ func (n *NodeTx) GetRaftNodeAddresses() ([]string, error) {
return query.SelectStrings(n.tx, "SELECT address FROM raft_nodes")
}
-// RaftNodeAddress returns the address of the LXD raft node with the given ID,
+// GetRaftNodeAddress returns the address of the LXD raft node with the given ID,
// if any matching row exists.
-func (n *NodeTx) RaftNodeAddress(id int64) (string, error) {
+func (n *NodeTx) GetRaftNodeAddress(id int64) (string, error) {
stmt := "SELECT address FROM raft_nodes WHERE id=?"
addresses, err := query.SelectStrings(n.tx, stmt, id)
if err != nil {
diff --git a/lxd/db/raft_test.go b/lxd/db/raft_test.go
index f631e77a9b..d6d3a0ce95 100644
--- a/lxd/db/raft_test.go
+++ b/lxd/db/raft_test.go
@@ -48,7 +48,7 @@ func TestGetRaftNodeAddresses(t *testing.T) {
}
// Fetch the address of the raft node with the given ID.
-func TestRaftNodeAddress(t *testing.T) {
+func TestGetRaftNodeAddress(t *testing.T) {
tx, cleanup := db.NewTestNodeTx(t)
defer cleanup()
@@ -58,7 +58,7 @@ func TestRaftNodeAddress(t *testing.T) {
id, err := tx.RaftNodeAdd("5.6.7.8:666")
require.NoError(t, err)
- address, err := tx.RaftNodeAddress(id)
+ address, err := tx.GetRaftNodeAddress(id)
require.NoError(t, err)
assert.Equal(t, "5.6.7.8:666", address)
}
@@ -77,7 +77,7 @@ func TestRaftNodeFirst(t *testing.T) {
err = tx.RaftNodeFirst("5.6.7.8:666")
assert.NoError(t, err)
- address, err := tx.RaftNodeAddress(1)
+ address, err := tx.GetRaftNodeAddress(1)
require.NoError(t, err)
assert.Equal(t, "5.6.7.8:666", address)
}
From 5349ac37e7d4fd8fd94f26c09dba185ba0dc590c Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Wed, 6 May 2020 15:01:56 +0100
Subject: [PATCH 08/43] lxd/db: Rename RaftNodeFirst to CreateFirstRaftNode
Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
lxd/cluster/membership.go | 2 +-
lxd/db/raft.go | 6 +++---
lxd/db/raft_test.go | 6 +++---
3 files changed, 7 insertions(+), 7 deletions(-)
diff --git a/lxd/cluster/membership.go b/lxd/cluster/membership.go
index da067d7ec2..52e552ab55 100644
--- a/lxd/cluster/membership.go
+++ b/lxd/cluster/membership.go
@@ -56,7 +56,7 @@ func Bootstrap(state *state.State, gateway *Gateway, name string) error {
}
// Add ourselves as first raft node
- err = tx.RaftNodeFirst(address)
+ err = tx.CreateFirstRaftNode(address)
if err != nil {
return errors.Wrap(err, "failed to insert first raft node")
}
diff --git a/lxd/db/raft.go b/lxd/db/raft.go
index f4bf1826c7..c0f3a7b020 100644
--- a/lxd/db/raft.go
+++ b/lxd/db/raft.go
@@ -74,12 +74,12 @@ func (n *NodeTx) GetRaftNodeAddress(id int64) (string, error) {
}
}
-// RaftNodeFirst adds a the first node of the cluster. It ensures that the
-// database ID is 1, to match the server ID of first raft log entry.
+// CreateFirstRaftNode adds a the first node of the cluster. It ensures that the
+// database ID is 1, to match the server ID of the first raft log entry.
//
// This method is supposed to be called when there are no rows in raft_nodes,
// and it will replace whatever existing row has ID 1.
-func (n *NodeTx) RaftNodeFirst(address string) error {
+func (n *NodeTx) CreateFirstRaftNode(address string) error {
columns := []string{"id", "address"}
values := []interface{}{int64(1), address}
id, err := query.UpsertObject(n.tx, "raft_nodes", columns, values)
diff --git a/lxd/db/raft_test.go b/lxd/db/raft_test.go
index d6d3a0ce95..95d0004a52 100644
--- a/lxd/db/raft_test.go
+++ b/lxd/db/raft_test.go
@@ -64,17 +64,17 @@ func TestGetRaftNodeAddress(t *testing.T) {
}
// Add the first raft node.
-func TestRaftNodeFirst(t *testing.T) {
+func TestCreateFirstRaftNode(t *testing.T) {
tx, cleanup := db.NewTestNodeTx(t)
defer cleanup()
- err := tx.RaftNodeFirst("1.2.3.4:666")
+ err := tx.CreateFirstRaftNode("1.2.3.4:666")
assert.NoError(t, err)
err = tx.RaftNodeDelete(1)
assert.NoError(t, err)
- err = tx.RaftNodeFirst("5.6.7.8:666")
+ err = tx.CreateFirstRaftNode("5.6.7.8:666")
assert.NoError(t, err)
address, err := tx.GetRaftNodeAddress(1)
From 559547262f078a943ec14b150874030527f3b029 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Wed, 6 May 2020 15:03:12 +0100
Subject: [PATCH 09/43] lxd/db: Rename RaftNodeAdd to CreateRaftNode
Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
lxd/cluster/membership_test.go | 2 +-
lxd/cluster/raft_test.go | 2 +-
lxd/db/raft.go | 4 ++--
lxd/db/raft_test.go | 20 ++++++++++----------
lxd/node/raft_test.go | 2 +-
5 files changed, 15 insertions(+), 15 deletions(-)
diff --git a/lxd/cluster/membership_test.go b/lxd/cluster/membership_test.go
index 72c4137b59..63826b5f14 100644
--- a/lxd/cluster/membership_test.go
+++ b/lxd/cluster/membership_test.go
@@ -393,7 +393,7 @@ func (h *membershipFixtures) ClusterAddress(address string) {
// Add the given address to the raft_nodes table.
func (h *membershipFixtures) RaftNode(address string) {
err := h.state.Node.Transaction(func(tx *db.NodeTx) error {
- _, err := tx.RaftNodeAdd(address)
+ _, err := tx.CreateRaftNode(address)
return err
})
require.NoError(h.t, err)
diff --git a/lxd/cluster/raft_test.go b/lxd/cluster/raft_test.go
index 7e85cbf6a3..8bc9ee9fcf 100644
--- a/lxd/cluster/raft_test.go
+++ b/lxd/cluster/raft_test.go
@@ -22,7 +22,7 @@ func setRaftRole(t *testing.T, database *db.Node, address string) client.NodeSto
if err != nil {
return err
}
- _, err = tx.RaftNodeAdd(address)
+ _, err = tx.CreateRaftNode(address)
return err
}))
diff --git a/lxd/db/raft.go b/lxd/db/raft.go
index c0f3a7b020..3215bf16ab 100644
--- a/lxd/db/raft.go
+++ b/lxd/db/raft.go
@@ -92,9 +92,9 @@ func (n *NodeTx) CreateFirstRaftNode(address string) error {
return nil
}
-// RaftNodeAdd adds a node to the current list of LXD nodes that are part of the
+// CreateRaftNode adds a node to the current list of LXD nodes that are part of the
// dqlite Raft cluster. It returns the ID of the newly inserted row.
-func (n *NodeTx) RaftNodeAdd(address string) (int64, error) {
+func (n *NodeTx) CreateRaftNode(address string) (int64, error) {
columns := []string{"address"}
values := []interface{}{address}
return query.UpsertObject(n.tx, "raft_nodes", columns, values)
diff --git a/lxd/db/raft_test.go b/lxd/db/raft_test.go
index 95d0004a52..2a5fb4c475 100644
--- a/lxd/db/raft_test.go
+++ b/lxd/db/raft_test.go
@@ -15,10 +15,10 @@ func TestRaftNodes(t *testing.T) {
tx, cleanup := db.NewTestNodeTx(t)
defer cleanup()
- id1, err := tx.RaftNodeAdd("1.2.3.4:666")
+ id1, err := tx.CreateRaftNode("1.2.3.4:666")
require.NoError(t, err)
- id2, err := tx.RaftNodeAdd("5.6.7.8:666")
+ id2, err := tx.CreateRaftNode("5.6.7.8:666")
require.NoError(t, err)
nodes, err := tx.GetRaftNodes()
@@ -35,10 +35,10 @@ func TestGetRaftNodeAddresses(t *testing.T) {
tx, cleanup := db.NewTestNodeTx(t)
defer cleanup()
- _, err := tx.RaftNodeAdd("1.2.3.4:666")
+ _, err := tx.CreateRaftNode("1.2.3.4:666")
require.NoError(t, err)
- _, err = tx.RaftNodeAdd("5.6.7.8:666")
+ _, err = tx.CreateRaftNode("5.6.7.8:666")
require.NoError(t, err)
addresses, err := tx.GetRaftNodeAddresses()
@@ -52,10 +52,10 @@ func TestGetRaftNodeAddress(t *testing.T) {
tx, cleanup := db.NewTestNodeTx(t)
defer cleanup()
- _, err := tx.RaftNodeAdd("1.2.3.4:666")
+ _, err := tx.CreateRaftNode("1.2.3.4:666")
require.NoError(t, err)
- id, err := tx.RaftNodeAdd("5.6.7.8:666")
+ id, err := tx.CreateRaftNode("5.6.7.8:666")
require.NoError(t, err)
address, err := tx.GetRaftNodeAddress(id)
@@ -83,11 +83,11 @@ func TestCreateFirstRaftNode(t *testing.T) {
}
// Add a new raft node.
-func TestRaftNodeAdd(t *testing.T) {
+func TestCreateRaftNode(t *testing.T) {
tx, cleanup := db.NewTestNodeTx(t)
defer cleanup()
- id, err := tx.RaftNodeAdd("1.2.3.4:666")
+ id, err := tx.CreateRaftNode("1.2.3.4:666")
assert.Equal(t, int64(1), id)
assert.NoError(t, err)
}
@@ -97,7 +97,7 @@ func TestRaftNodeDelete(t *testing.T) {
tx, cleanup := db.NewTestNodeTx(t)
defer cleanup()
- id, err := tx.RaftNodeAdd("1.2.3.4:666")
+ id, err := tx.CreateRaftNode("1.2.3.4:666")
require.NoError(t, err)
err = tx.RaftNodeDelete(id)
@@ -118,7 +118,7 @@ func TestRaftNodesReplace(t *testing.T) {
tx, cleanup := db.NewTestNodeTx(t)
defer cleanup()
- _, err := tx.RaftNodeAdd("1.2.3.4:666")
+ _, err := tx.CreateRaftNode("1.2.3.4:666")
require.NoError(t, err)
nodes := []db.RaftNode{
diff --git a/lxd/node/raft_test.go b/lxd/node/raft_test.go
index c3bcbd7c98..88bfacccd1 100644
--- a/lxd/node/raft_test.go
+++ b/lxd/node/raft_test.go
@@ -60,7 +60,7 @@ func TestDetermineRaftNode(t *testing.T) {
require.NoError(t, err)
for _, address := range c.addresses {
- _, err := tx.RaftNodeAdd(address)
+ _, err := tx.CreateRaftNode(address)
require.NoError(t, err)
}
From f14ebd72bb50f1efa5b5be723db431783b3d757b Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Wed, 6 May 2020 15:04:12 +0100
Subject: [PATCH 10/43] lxd/db: Rename RaftNodeDelete to RemoveRaftNode
Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
lxd/db/raft.go | 4 ++--
lxd/db/raft_test.go | 10 +++++-----
2 files changed, 7 insertions(+), 7 deletions(-)
diff --git a/lxd/db/raft.go b/lxd/db/raft.go
index 3215bf16ab..b83ff1586b 100644
--- a/lxd/db/raft.go
+++ b/lxd/db/raft.go
@@ -100,9 +100,9 @@ func (n *NodeTx) CreateRaftNode(address string) (int64, error) {
return query.UpsertObject(n.tx, "raft_nodes", columns, values)
}
-// RaftNodeDelete removes a node from the current list of LXD nodes that are
+// RemoteRaftNode removes a node from the current list of LXD nodes that are
// part of the dqlite Raft cluster.
-func (n *NodeTx) RaftNodeDelete(id int64) error {
+func (n *NodeTx) RemoteRaftNode(id int64) error {
deleted, err := query.DeleteObject(n.tx, "raft_nodes", id)
if err != nil {
return err
diff --git a/lxd/db/raft_test.go b/lxd/db/raft_test.go
index 2a5fb4c475..9a73c6cbc0 100644
--- a/lxd/db/raft_test.go
+++ b/lxd/db/raft_test.go
@@ -71,7 +71,7 @@ func TestCreateFirstRaftNode(t *testing.T) {
err := tx.CreateFirstRaftNode("1.2.3.4:666")
assert.NoError(t, err)
- err = tx.RaftNodeDelete(1)
+ err = tx.RemoteRaftNode(1)
assert.NoError(t, err)
err = tx.CreateFirstRaftNode("5.6.7.8:666")
@@ -93,23 +93,23 @@ func TestCreateRaftNode(t *testing.T) {
}
// Delete an existing raft node.
-func TestRaftNodeDelete(t *testing.T) {
+func TestRemoteRaftNode(t *testing.T) {
tx, cleanup := db.NewTestNodeTx(t)
defer cleanup()
id, err := tx.CreateRaftNode("1.2.3.4:666")
require.NoError(t, err)
- err = tx.RaftNodeDelete(id)
+ err = tx.RemoteRaftNode(id)
assert.NoError(t, err)
}
// Delete a non-existing raft node returns an error.
-func TestRaftNodeDelete_NonExisting(t *testing.T) {
+func TestRemoteRaftNode_NonExisting(t *testing.T) {
tx, cleanup := db.NewTestNodeTx(t)
defer cleanup()
- err := tx.RaftNodeDelete(1)
+ err := tx.RemoteRaftNode(1)
assert.Equal(t, db.ErrNoSuchObject, err)
}
From 6d2867dac58460a8b47139f49a4dfdae2eb8ecaf Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Wed, 6 May 2020 15:05:19 +0100
Subject: [PATCH 11/43] lxd/db: Rename RaftNodesReplace to ReplaceRaftNodes
Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
lxd/cluster/gateway.go | 4 ++--
lxd/cluster/heartbeat.go | 2 +-
lxd/cluster/membership.go | 4 ++--
lxd/cluster/recover.go | 2 +-
lxd/cluster/upgrade_test.go | 2 +-
lxd/db/raft.go | 4 ++--
lxd/db/raft_test.go | 4 ++--
7 files changed, 11 insertions(+), 11 deletions(-)
diff --git a/lxd/cluster/gateway.go b/lxd/cluster/gateway.go
index cfab2852fc..e46908a6fe 100644
--- a/lxd/cluster/gateway.go
+++ b/lxd/cluster/gateway.go
@@ -197,7 +197,7 @@ func (g *Gateway) HandlerFuncs(nodeRefreshTask func(*APIHeartbeat)) map[string]h
// Accept Raft node updates from any node (joining nodes just send raft nodes heartbeat data).
logger.Debugf("Replace current raft nodes with %+v", raftNodes)
err = g.db.Transaction(func(tx *db.NodeTx) error {
- return tx.RaftNodesReplace(raftNodes)
+ return tx.ReplaceRaftNodes(raftNodes)
})
if err != nil {
logger.Errorf("Error updating raft nodes: %v", err)
@@ -497,7 +497,7 @@ func (g *Gateway) Reset(cert *shared.CertInfo) error {
return err
}
err = g.db.Transaction(func(tx *db.NodeTx) error {
- return tx.RaftNodesReplace(nil)
+ return tx.ReplaceRaftNodes(nil)
})
if err != nil {
return err
diff --git a/lxd/cluster/heartbeat.go b/lxd/cluster/heartbeat.go
index cfdb611bb7..6db52b4ed9 100644
--- a/lxd/cluster/heartbeat.go
+++ b/lxd/cluster/heartbeat.go
@@ -210,7 +210,7 @@ func (g *Gateway) heartbeat(ctx context.Context, initialHeartbeat bool) {
// send us a fresh update through the heartbeat pool.
logger.Debugf("Heartbeat updating local raft nodes to %+v", raftNodes)
err = g.db.Transaction(func(tx *db.NodeTx) error {
- return tx.RaftNodesReplace(raftNodes)
+ return tx.ReplaceRaftNodes(raftNodes)
})
if err != nil {
logger.Warnf("Failed to replace local raft nodes: %v", err)
diff --git a/lxd/cluster/membership.go b/lxd/cluster/membership.go
index 52e552ab55..868b92baa8 100644
--- a/lxd/cluster/membership.go
+++ b/lxd/cluster/membership.go
@@ -260,7 +260,7 @@ func Join(state *state.State, gateway *Gateway, cert *shared.CertInfo, name stri
}
// Set the raft nodes list to the one that was returned by Accept().
- err = tx.RaftNodesReplace(raftNodes)
+ err = tx.ReplaceRaftNodes(raftNodes)
if err != nil {
return errors.Wrap(err, "failed to set raft nodes")
}
@@ -653,7 +653,7 @@ func Assign(state *state.State, gateway *Gateway, nodes []db.RaftNode) error {
// Replace our local list of raft nodes with the given one (which
// includes ourselves).
err = state.Node.Transaction(func(tx *db.NodeTx) error {
- err = tx.RaftNodesReplace(nodes)
+ err = tx.ReplaceRaftNodes(nodes)
if err != nil {
return errors.Wrap(err, "Failed to set raft nodes")
}
diff --git a/lxd/cluster/recover.go b/lxd/cluster/recover.go
index 89dcfaa3e6..d14f390622 100644
--- a/lxd/cluster/recover.go
+++ b/lxd/cluster/recover.go
@@ -82,7 +82,7 @@ func Recover(database *db.Node) error {
nodes := []db.RaftNode{
{ID: info.ID, Address: info.Address},
}
- return tx.RaftNodesReplace(nodes)
+ return tx.ReplaceRaftNodes(nodes)
})
if err != nil {
return errors.Wrap(err, "Failed to update database nodes")
diff --git a/lxd/cluster/upgrade_test.go b/lxd/cluster/upgrade_test.go
index 9c749bb3bd..b5500f7bbf 100644
--- a/lxd/cluster/upgrade_test.go
+++ b/lxd/cluster/upgrade_test.go
@@ -59,7 +59,7 @@ func TestMaybeUpdate_Upgrade(t *testing.T) {
{ID: 1, Address: "0.0.0.0:666"},
{ID: 2, Address: "1.2.3.4:666"},
}
- err := tx.RaftNodesReplace(nodes)
+ err := tx.ReplaceRaftNodes(nodes)
require.NoError(t, err)
return nil
})
diff --git a/lxd/db/raft.go b/lxd/db/raft.go
index b83ff1586b..4444471299 100644
--- a/lxd/db/raft.go
+++ b/lxd/db/raft.go
@@ -113,8 +113,8 @@ func (n *NodeTx) RemoteRaftNode(id int64) error {
return nil
}
-// RaftNodesReplace replaces the current list of raft nodes.
-func (n *NodeTx) RaftNodesReplace(nodes []RaftNode) error {
+// ReplaceRaftNodes replaces the current list of raft nodes.
+func (n *NodeTx) ReplaceRaftNodes(nodes []RaftNode) error {
_, err := n.tx.Exec("DELETE FROM raft_nodes")
if err != nil {
return err
diff --git a/lxd/db/raft_test.go b/lxd/db/raft_test.go
index 9a73c6cbc0..be881f167a 100644
--- a/lxd/db/raft_test.go
+++ b/lxd/db/raft_test.go
@@ -114,7 +114,7 @@ func TestRemoteRaftNode_NonExisting(t *testing.T) {
}
// Replace all existing raft nodes.
-func TestRaftNodesReplace(t *testing.T) {
+func TestReplaceRaftNodes(t *testing.T) {
tx, cleanup := db.NewTestNodeTx(t)
defer cleanup()
@@ -125,7 +125,7 @@ func TestRaftNodesReplace(t *testing.T) {
{ID: 2, Address: "2.2.2.2:666"},
{ID: 3, Address: "3.3.3.3:666"},
}
- err = tx.RaftNodesReplace(nodes)
+ err = tx.ReplaceRaftNodes(nodes)
assert.NoError(t, err)
newNodes, err := tx.GetRaftNodes()
From 52d6c357187c1a1e027644731afe3cb131169edd Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Wed, 6 May 2020 15:06:44 +0100
Subject: [PATCH 12/43] lxd/db: Rename InstanceSnapshotConfigUpdate to
UpdateInstanceSnapshotConfig
Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
lxd/db/snapshots.go | 4 ++--
lxd/instance/drivers/driver_lxc.go | 2 +-
lxd/instance/drivers/driver_qemu.go | 2 +-
3 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/lxd/db/snapshots.go b/lxd/db/snapshots.go
index 157d7ffc50..3b7b412e3a 100644
--- a/lxd/db/snapshots.go
+++ b/lxd/db/snapshots.go
@@ -86,8 +86,8 @@ func InstanceSnapshotToInstance(instance *Instance, snapshot *InstanceSnapshot)
}
}
-// InstanceSnapshotConfigUpdate inserts/updates/deletes the provided config keys.
-func (c *ClusterTx) InstanceSnapshotConfigUpdate(id int, values map[string]string) error {
+// UpdateInstanceSnapshotConfig inserts/updates/deletes the provided config keys.
+func (c *ClusterTx) UpdateInstanceSnapshotConfig(id int, values map[string]string) error {
insertSQL := "INSERT OR REPLACE INTO instances_snapshots_config (instance_snapshot_id, key, value) VALUES"
deleteSQL := "DELETE FROM instances_snapshots_config WHERE key IN %s AND instance_snapshot_id=?"
return c.configUpdate(id, values, insertSQL, deleteSQL)
diff --git a/lxd/instance/drivers/driver_lxc.go b/lxd/instance/drivers/driver_lxc.go
index 47828c0c06..688b4caea8 100644
--- a/lxd/instance/drivers/driver_lxc.go
+++ b/lxd/instance/drivers/driver_lxc.go
@@ -3729,7 +3729,7 @@ func (c *lxc) VolatileSet(changes map[string]string) error {
var err error
if c.IsSnapshot() {
err = c.state.Cluster.Transaction(func(tx *db.ClusterTx) error {
- return tx.InstanceSnapshotConfigUpdate(c.id, changes)
+ return tx.UpdateInstanceSnapshotConfig(c.id, changes)
})
} else {
err = c.state.Cluster.Transaction(func(tx *db.ClusterTx) error {
diff --git a/lxd/instance/drivers/driver_qemu.go b/lxd/instance/drivers/driver_qemu.go
index 2c768041ad..53a57fd090 100644
--- a/lxd/instance/drivers/driver_qemu.go
+++ b/lxd/instance/drivers/driver_qemu.go
@@ -3280,7 +3280,7 @@ func (vm *qemu) VolatileSet(changes map[string]string) error {
var err error
if vm.IsSnapshot() {
err = vm.state.Cluster.Transaction(func(tx *db.ClusterTx) error {
- return tx.InstanceSnapshotConfigUpdate(vm.id, changes)
+ return tx.UpdateInstanceSnapshotConfig(vm.id, changes)
})
} else {
err = vm.state.Cluster.Transaction(func(tx *db.ClusterTx) error {
From 7c71264e500849b020175ed87d1617c4066ac36c Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Wed, 6 May 2020 15:08:34 +0100
Subject: [PATCH 13/43] lxd/db: Rename InstanceSnapshotID to
GetInstanceSnapshotID
Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
lxd/api_internal.go | 2 +-
lxd/db/snapshots.go | 8 ++++----
lxd/instance/drivers/driver_lxc.go | 2 +-
lxd/instance/drivers/driver_qemu.go | 2 +-
lxd/instance_snapshot.go | 2 +-
5 files changed, 8 insertions(+), 8 deletions(-)
diff --git a/lxd/api_internal.go b/lxd/api_internal.go
index 4f9811f40e..1d1261dc7b 100644
--- a/lxd/api_internal.go
+++ b/lxd/api_internal.go
@@ -664,7 +664,7 @@ func internalImport(d *Daemon, r *http.Request) response.Response {
parts := strings.SplitN(snap.Name, shared.SnapshotDelimiter, 2)
// Check if an entry for the snapshot already exists in the db.
- _, snapErr := d.cluster.InstanceSnapshotID(projectName, parts[0], parts[1])
+ _, snapErr := d.cluster.GetInstanceSnapshotID(projectName, parts[0], parts[1])
if snapErr != nil {
if snapErr != db.ErrNoSuchObject {
return response.SmartError(snapErr)
diff --git a/lxd/db/snapshots.go b/lxd/db/snapshots.go
index 3b7b412e3a..1c966da56d 100644
--- a/lxd/db/snapshots.go
+++ b/lxd/db/snapshots.go
@@ -93,9 +93,9 @@ func (c *ClusterTx) UpdateInstanceSnapshotConfig(id int, values map[string]strin
return c.configUpdate(id, values, insertSQL, deleteSQL)
}
-// InstanceSnapshotUpdate updates the description and expiry date of the
+// UpdateInstanceSnapshot updates the description and expiry date of the
// instance snapshot with the given ID.
-func InstanceSnapshotUpdate(tx *sql.Tx, id int, description string, expiryDate time.Time) error {
+func UpdateInstanceSnapshot(tx *sql.Tx, id int, description string, expiryDate time.Time) error {
str := fmt.Sprintf("UPDATE instances_snapshots SET description=?, expiry_date=? WHERE id=?")
stmt, err := tx.Prepare(str)
if err != nil {
@@ -115,8 +115,8 @@ func InstanceSnapshotUpdate(tx *sql.Tx, id int, description string, expiryDate t
return nil
}
-// InstanceSnapshotID returns the ID of the snapshot with the given name.
-func (c *Cluster) InstanceSnapshotID(project, instance, name string) (int, error) {
+// GetInstanceSnapshotID returns the ID of the snapshot with the given name.
+func (c *Cluster) GetInstanceSnapshotID(project, instance, name string) (int, error) {
var id int64
err := c.Transaction(func(tx *ClusterTx) error {
var err error
diff --git a/lxd/instance/drivers/driver_lxc.go b/lxd/instance/drivers/driver_lxc.go
index 688b4caea8..1cffc34840 100644
--- a/lxd/instance/drivers/driver_lxc.go
+++ b/lxd/instance/drivers/driver_lxc.go
@@ -4360,7 +4360,7 @@ func (c *lxc) Update(args db.InstanceArgs, userRequested bool) error {
// Snapshots should update only their descriptions and expiry date.
if c.IsSnapshot() {
- err = db.InstanceSnapshotUpdate(tx, c.id, c.description, c.expiryDate)
+ err = db.UpdateInstanceSnapshot(tx, c.id, c.description, c.expiryDate)
if err != nil {
tx.Rollback()
return errors.Wrap(err, "Snapshot update")
diff --git a/lxd/instance/drivers/driver_qemu.go b/lxd/instance/drivers/driver_qemu.go
index 53a57fd090..130a5f4f87 100644
--- a/lxd/instance/drivers/driver_qemu.go
+++ b/lxd/instance/drivers/driver_qemu.go
@@ -2605,7 +2605,7 @@ func (vm *qemu) Update(args db.InstanceArgs, userRequested bool) error {
// Snapshots should update only their descriptions and expiry date.
if vm.IsSnapshot() {
- err = db.InstanceSnapshotUpdate(tx, vm.id, vm.description, vm.expiryDate)
+ err = db.UpdateInstanceSnapshot(tx, vm.id, vm.description, vm.expiryDate)
if err != nil {
tx.Rollback()
return errors.Wrap(err, "Snapshot update")
diff --git a/lxd/instance_snapshot.go b/lxd/instance_snapshot.go
index 18865f675b..557ebfa91f 100644
--- a/lxd/instance_snapshot.go
+++ b/lxd/instance_snapshot.go
@@ -408,7 +408,7 @@ func snapshotPost(d *Daemon, r *http.Request, sc instance.Instance, containerNam
fullName := containerName + shared.SnapshotDelimiter + newName
// Check that the name isn't already in use
- id, _ := d.cluster.InstanceSnapshotID(sc.Project(), containerName, newName)
+ id, _ := d.cluster.GetInstanceSnapshotID(sc.Project(), containerName, newName)
if id > 0 {
return response.Conflict(fmt.Errorf("Name '%s' already in use", fullName))
}
From d48f0426a721abbe0501e53c29469b2d30ea9879 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Wed, 6 May 2020 15:10:10 +0100
Subject: [PATCH 14/43] lxd/db: Rename StoragePoolsNodeConfig to
GetStoragePoolsLocalConfig
Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
lxd/api_cluster.go | 2 +-
lxd/cluster/membership.go | 2 +-
lxd/db/storage_pools.go | 4 ++--
lxd/db/storage_pools_test.go | 6 +++---
4 files changed, 7 insertions(+), 7 deletions(-)
diff --git a/lxd/api_cluster.go b/lxd/api_cluster.go
index cbf672bead..06cd93e019 100644
--- a/lxd/api_cluster.go
+++ b/lxd/api_cluster.go
@@ -125,7 +125,7 @@ func clusterGetMemberConfig(cluster *db.Cluster) ([]api.ClusterMemberConfigKey,
err := cluster.Transaction(func(tx *db.ClusterTx) error {
var err error
- pools, err = tx.StoragePoolsNodeConfig()
+ pools, err = tx.GetStoragePoolsLocalConfig()
if err != nil {
return errors.Wrapf(err, "Failed to fetch storage pools configuration")
}
diff --git a/lxd/cluster/membership.go b/lxd/cluster/membership.go
index 868b92baa8..788ac29cf0 100644
--- a/lxd/cluster/membership.go
+++ b/lxd/cluster/membership.go
@@ -281,7 +281,7 @@ func Join(state *state.State, gateway *Gateway, cert *shared.CertInfo, name stri
var operations []db.Operation
err = state.Cluster.Transaction(func(tx *db.ClusterTx) error {
- pools, err = tx.StoragePoolsNodeConfig()
+ pools, err = tx.GetStoragePoolsLocalConfig()
if err != nil {
return err
}
diff --git a/lxd/db/storage_pools.go b/lxd/db/storage_pools.go
index df6f499e74..c5eedc8af5 100644
--- a/lxd/db/storage_pools.go
+++ b/lxd/db/storage_pools.go
@@ -14,9 +14,9 @@ import (
"github.com/lxc/lxd/shared/api"
)
-// StoragePoolsNodeConfig returns a map associating each storage pool name to
+// GetStoragePoolsLocalConfig returns a map associating each storage pool name to
// its node-specific config values (i.e. the ones where node_id is not NULL).
-func (c *ClusterTx) StoragePoolsNodeConfig() (map[string]map[string]string, error) {
+func (c *ClusterTx) GetStoragePoolsLocalConfig() (map[string]map[string]string, error) {
names, err := query.SelectStrings(c.tx, "SELECT name FROM storage_pools")
if err != nil {
return nil, err
diff --git a/lxd/db/storage_pools_test.go b/lxd/db/storage_pools_test.go
index 90fb03fe58..9ca2a18e2d 100644
--- a/lxd/db/storage_pools_test.go
+++ b/lxd/db/storage_pools_test.go
@@ -11,8 +11,8 @@ import (
"github.com/stretchr/testify/require"
)
-// The StoragePoolsNodeConfigs method returns only node-specific config values.
-func TestStoragePoolsNodeConfigs(t *testing.T) {
+// The GetStoragePoolsLocalConfigs method returns only node-specific config values.
+func TestGetStoragePoolsLocalConfigs(t *testing.T) {
cluster, cleanup := db.NewTestCluster(t)
defer cleanup()
@@ -39,7 +39,7 @@ func TestStoragePoolsNodeConfigs(t *testing.T) {
err = cluster.Transaction(func(tx *db.ClusterTx) error {
var err error
- config, err = tx.StoragePoolsNodeConfig()
+ config, err = tx.GetStoragePoolsLocalConfig()
return err
})
require.NoError(t, err)
From d8aaa8f0fcba00d9e57c16b4b639a6619a7564ce Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Wed, 6 May 2020 15:11:16 +0100
Subject: [PATCH 15/43] lxd/db: Rename StoragePoolID to GetStoragePoolID
Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
lxd/db/containers.go | 2 +-
lxd/db/storage_pools.go | 4 ++--
lxd/db/storage_pools_test.go | 4 ++--
lxd/db/storage_volumes.go | 2 +-
lxd/storage_pools.go | 2 +-
5 files changed, 7 insertions(+), 7 deletions(-)
diff --git a/lxd/db/containers.go b/lxd/db/containers.go
index 6a5d22a412..1cb8ff9ee8 100644
--- a/lxd/db/containers.go
+++ b/lxd/db/containers.go
@@ -451,7 +451,7 @@ func (c *ClusterTx) UpdateInstanceNode(project, oldName, newName, newNode string
return errors.Wrap(err, "Failed to get instance's storage pool name")
}
- poolID, err := c.StoragePoolID(poolName)
+ poolID, err := c.GetStoragePoolID(poolName)
if err != nil {
return errors.Wrap(err, "Failed to get instance's storage pool ID")
}
diff --git a/lxd/db/storage_pools.go b/lxd/db/storage_pools.go
index c5eedc8af5..3b1f9fca08 100644
--- a/lxd/db/storage_pools.go
+++ b/lxd/db/storage_pools.go
@@ -37,8 +37,8 @@ storage_pools_config JOIN storage_pools ON storage_pools.id=storage_pools_config
return pools, nil
}
-// StoragePoolID returns the ID of the pool with the given name.
-func (c *ClusterTx) StoragePoolID(name string) (int64, error) {
+// GetStoragePoolID returns the ID of the pool with the given name.
+func (c *ClusterTx) GetStoragePoolID(name string) (int64, error) {
stmt := "SELECT id FROM storage_pools WHERE name=?"
ids, err := query.SelectIntegers(c.tx, stmt, name)
if err != nil {
diff --git a/lxd/db/storage_pools_test.go b/lxd/db/storage_pools_test.go
index 9ca2a18e2d..4dacd49335 100644
--- a/lxd/db/storage_pools_test.go
+++ b/lxd/db/storage_pools_test.go
@@ -62,7 +62,7 @@ func TestStoragePoolsCreatePending(t *testing.T) {
err = tx.StoragePoolCreatePending("buzz", "pool1", "dir", config)
require.NoError(t, err)
- poolID, err := tx.StoragePoolID("pool1")
+ poolID, err := tx.GetStoragePoolID("pool1")
require.NoError(t, err)
assert.True(t, poolID > 0)
@@ -109,7 +109,7 @@ func TestStoragePoolsCreatePending_OtherPool(t *testing.T) {
err = tx.StoragePoolCreatePending("none", "pool2", "dir", config)
require.NoError(t, err)
- poolID, err := tx.StoragePoolID("pool2")
+ poolID, err := tx.GetStoragePoolID("pool2")
require.NoError(t, err)
config = map[string]string{}
diff --git a/lxd/db/storage_volumes.go b/lxd/db/storage_volumes.go
index aca38b49dc..ed153dd8b6 100644
--- a/lxd/db/storage_volumes.go
+++ b/lxd/db/storage_volumes.go
@@ -214,7 +214,7 @@ func (c *Cluster) StorageVolumeIsAvailable(pool, volume string) (bool, error) {
isAvailable := false
err := c.Transaction(func(tx *ClusterTx) error {
- id, err := tx.StoragePoolID(pool)
+ id, err := tx.GetStoragePoolID(pool)
if err != nil {
return errors.Wrapf(err, "Fetch storage pool ID for %q", pool)
}
diff --git a/lxd/storage_pools.go b/lxd/storage_pools.go
index a7bc12a7dd..810fd567a0 100644
--- a/lxd/storage_pools.go
+++ b/lxd/storage_pools.go
@@ -199,7 +199,7 @@ func storagePoolsPostCluster(d *Daemon, req api.StoragePoolsPost) error {
var err error
// Check that the pool was defined at all.
- poolID, err = tx.StoragePoolID(req.Name)
+ poolID, err = tx.GetStoragePoolID(req.Name)
if err != nil {
return err
}
From 23cd40681e1ae486b56f121e3416502e27fb261c Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Wed, 6 May 2020 15:12:32 +0100
Subject: [PATCH 16/43] lxd/db: Rename StoragePoolDriver to
GetStoragePoolDriver
Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
lxd/cluster/connect.go | 2 +-
lxd/cluster/membership.go | 2 +-
lxd/db/containers.go | 2 +-
lxd/db/storage_pools.go | 4 ++--
lxd/db/storage_volumes.go | 2 +-
5 files changed, 6 insertions(+), 6 deletions(-)
diff --git a/lxd/cluster/connect.go b/lxd/cluster/connect.go
index 7d12dd6099..ef45505957 100644
--- a/lxd/cluster/connect.go
+++ b/lxd/cluster/connect.go
@@ -104,7 +104,7 @@ func ConnectIfVolumeIsRemote(cluster *db.Cluster, poolID int64, volumeName strin
var driver string
err := cluster.Transaction(func(tx *db.ClusterTx) error {
var err error
- driver, err = tx.StoragePoolDriver(poolID)
+ driver, err = tx.GetStoragePoolDriver(poolID)
return err
})
if err != nil {
diff --git a/lxd/cluster/membership.go b/lxd/cluster/membership.go
index 788ac29cf0..00f94822d1 100644
--- a/lxd/cluster/membership.go
+++ b/lxd/cluster/membership.go
@@ -386,7 +386,7 @@ func Join(state *state.State, gateway *Gateway, cert *shared.CertInfo, name stri
return errors.Wrap(err, "failed to add joining node's to the pool")
}
- driver, err := tx.StoragePoolDriver(id)
+ driver, err := tx.GetStoragePoolDriver(id)
if err != nil {
return errors.Wrap(err, "failed to get storage pool driver")
}
diff --git a/lxd/db/containers.go b/lxd/db/containers.go
index 1cb8ff9ee8..b7e526f643 100644
--- a/lxd/db/containers.go
+++ b/lxd/db/containers.go
@@ -456,7 +456,7 @@ func (c *ClusterTx) UpdateInstanceNode(project, oldName, newName, newNode string
return errors.Wrap(err, "Failed to get instance's storage pool ID")
}
- poolDriver, err := c.StoragePoolDriver(poolID)
+ poolDriver, err := c.GetStoragePoolDriver(poolID)
if err != nil {
return errors.Wrap(err, "Failed to get instance's storage pool driver")
}
diff --git a/lxd/db/storage_pools.go b/lxd/db/storage_pools.go
index 3b1f9fca08..17f9aefd17 100644
--- a/lxd/db/storage_pools.go
+++ b/lxd/db/storage_pools.go
@@ -54,8 +54,8 @@ func (c *ClusterTx) GetStoragePoolID(name string) (int64, error) {
}
}
-// StoragePoolDriver returns the driver of the pool with the given ID.
-func (c *ClusterTx) StoragePoolDriver(id int64) (string, error) {
+// GetStoragePoolDriver returns the driver of the pool with the given ID.
+func (c *ClusterTx) GetStoragePoolDriver(id int64) (string, error) {
stmt := "SELECT driver FROM storage_pools WHERE id=?"
drivers, err := query.SelectStrings(c.tx, stmt, id)
if err != nil {
diff --git a/lxd/db/storage_volumes.go b/lxd/db/storage_volumes.go
index ed153dd8b6..c1aba1c0a6 100644
--- a/lxd/db/storage_volumes.go
+++ b/lxd/db/storage_volumes.go
@@ -219,7 +219,7 @@ func (c *Cluster) StorageVolumeIsAvailable(pool, volume string) (bool, error) {
return errors.Wrapf(err, "Fetch storage pool ID for %q", pool)
}
- driver, err := tx.StoragePoolDriver(id)
+ driver, err := tx.GetStoragePoolDriver(id)
if err != nil {
return errors.Wrapf(err, "Fetch storage pool driver for %q", pool)
}
From fd443348635b24f67ae3edee38332c4cbb8732c0 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Wed, 6 May 2020 15:13:43 +0100
Subject: [PATCH 17/43] lxd/db: Rename StoragePoolIDsNotPending to
GetNonPendingStoragePoolsNamesToIDs
Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
lxd/cluster/membership.go | 2 +-
lxd/db/storage_pools.go | 4 ++--
2 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/lxd/cluster/membership.go b/lxd/cluster/membership.go
index 00f94822d1..8d83af797f 100644
--- a/lxd/cluster/membership.go
+++ b/lxd/cluster/membership.go
@@ -376,7 +376,7 @@ func Join(state *state.State, gateway *Gateway, cert *shared.CertInfo, name stri
tx.NodeID(node.ID)
// Storage pools.
- ids, err := tx.StoragePoolIDsNotPending()
+ ids, err := tx.GetNonPendingStoragePoolsNamesToIDs()
if err != nil {
return errors.Wrap(err, "failed to get cluster storage pool IDs")
}
diff --git a/lxd/db/storage_pools.go b/lxd/db/storage_pools.go
index 17f9aefd17..25a94fdd36 100644
--- a/lxd/db/storage_pools.go
+++ b/lxd/db/storage_pools.go
@@ -71,10 +71,10 @@ func (c *ClusterTx) GetStoragePoolDriver(id int64) (string, error) {
}
}
-// StoragePoolIDsNotPending returns a map associating each storage pool name to its ID.
+// GetNonPendingStoragePoolsNamesToIDs returns a map associating each storage pool name to its ID.
//
// Pending storage pools are skipped.
-func (c *ClusterTx) StoragePoolIDsNotPending() (map[string]int64, error) {
+func (c *ClusterTx) GetNonPendingStoragePoolsNamesToIDs() (map[string]int64, error) {
pools := []struct {
id int64
name string
From 03b5f1b35d78e269c26cb9cebd63a717c76d4989 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Wed, 6 May 2020 15:16:32 +0100
Subject: [PATCH 18/43] lxd/db: Rename StoragePoolNodeJoin to
UpdateStoragePoolAfterNodeJoin
Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
lxd/cluster/membership.go | 4 ++--
lxd/db/storage_pools.go | 8 ++++----
2 files changed, 6 insertions(+), 6 deletions(-)
diff --git a/lxd/cluster/membership.go b/lxd/cluster/membership.go
index 8d83af797f..dcef436162 100644
--- a/lxd/cluster/membership.go
+++ b/lxd/cluster/membership.go
@@ -381,7 +381,7 @@ func Join(state *state.State, gateway *Gateway, cert *shared.CertInfo, name stri
return errors.Wrap(err, "failed to get cluster storage pool IDs")
}
for name, id := range ids {
- err := tx.StoragePoolNodeJoin(id, node.ID)
+ err := tx.UpdateStoragePoolAfterNodeJoin(id, node.ID)
if err != nil {
return errors.Wrap(err, "failed to add joining node's to the pool")
}
@@ -394,7 +394,7 @@ func Join(state *state.State, gateway *Gateway, cert *shared.CertInfo, name stri
if shared.StringInSlice(driver, []string{"ceph", "cephfs"}) {
// For ceph pools we have to create volume
// entries for the joining node.
- err := tx.StoragePoolNodeJoinCeph(id, node.ID)
+ err := tx.UpdateCephStoragePoolAfterNodeJoin(id, node.ID)
if err != nil {
return errors.Wrap(err, "failed to create ceph volumes for joining node")
}
diff --git a/lxd/db/storage_pools.go b/lxd/db/storage_pools.go
index 25a94fdd36..4c239a7995 100644
--- a/lxd/db/storage_pools.go
+++ b/lxd/db/storage_pools.go
@@ -103,12 +103,12 @@ func (c *ClusterTx) GetNonPendingStoragePoolsNamesToIDs() (map[string]int64, err
return ids, nil
}
-// StoragePoolNodeJoin adds a new entry in the storage_pools_nodes table.
+// UpdateStoragePoolAfterNodeJoin adds a new entry in the storage_pools_nodes table.
//
// It should only be used when a new node joins the cluster, when it's safe to
// assume that the relevant pool has already been created on the joining node,
// and we just need to track it.
-func (c *ClusterTx) StoragePoolNodeJoin(poolID, nodeID int64) error {
+func (c *ClusterTx) UpdateStoragePoolAfterNodeJoin(poolID, nodeID int64) error {
columns := []string{"storage_pool_id", "node_id"}
values := []interface{}{poolID, nodeID}
_, err := query.UpsertObject(c.tx, "storage_pools_nodes", columns, values)
@@ -119,9 +119,9 @@ func (c *ClusterTx) StoragePoolNodeJoin(poolID, nodeID int64) error {
return nil
}
-// StoragePoolNodeJoinCeph updates internal state to reflect that nodeID is
+// UpdateCephStoragePoolAfterNodeJoin updates internal state to reflect that nodeID is
// joining a cluster where poolID is a ceph pool.
-func (c *ClusterTx) StoragePoolNodeJoinCeph(poolID, nodeID int64) error {
+func (c *ClusterTx) UpdateCephStoragePoolAfterNodeJoin(poolID, nodeID int64) error {
// Get the IDs of the other nodes (they should be all linked to
// the pool).
stmt := "SELECT node_id FROM storage_pools_nodes WHERE storage_pool_id=?"
From 8ac3dabc85f475c4ce63968523097490ea415e1d Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Wed, 6 May 2020 15:17:22 +0100
Subject: [PATCH 19/43] lxd/db: Rename StoragePoolConfigAdd to
CreateStoragePoolConfig
Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
lxd/cluster/membership.go | 2 +-
lxd/db/storage_pools.go | 6 +++---
lxd/storage_pools.go | 2 +-
3 files changed, 5 insertions(+), 5 deletions(-)
diff --git a/lxd/cluster/membership.go b/lxd/cluster/membership.go
index dcef436162..0013310b6b 100644
--- a/lxd/cluster/membership.go
+++ b/lxd/cluster/membership.go
@@ -404,7 +404,7 @@ func Join(state *state.State, gateway *Gateway, cert *shared.CertInfo, name stri
if !ok {
return fmt.Errorf("joining node has no config for pool %s", name)
}
- err = tx.StoragePoolConfigAdd(id, node.ID, config)
+ err = tx.CreateStoragePoolConfig(id, node.ID, config)
if err != nil {
return errors.Wrap(err, "failed to add joining node's pool config")
}
diff --git a/lxd/db/storage_pools.go b/lxd/db/storage_pools.go
index 4c239a7995..0416f0fc3c 100644
--- a/lxd/db/storage_pools.go
+++ b/lxd/db/storage_pools.go
@@ -220,8 +220,8 @@ SELECT ?, key, value
return nil
}
-// StoragePoolConfigAdd adds a new entry in the storage_pools_config table
-func (c *ClusterTx) StoragePoolConfigAdd(poolID, nodeID int64, config map[string]string) error {
+// CreateStoragePoolConfig adds a new entry in the storage_pools_config table
+func (c *ClusterTx) CreateStoragePoolConfig(poolID, nodeID int64, config map[string]string) error {
return storagePoolConfigAdd(c.tx, poolID, nodeID, config)
}
@@ -308,7 +308,7 @@ func (c *ClusterTx) StoragePoolCreatePending(node, name, driver string, conf map
if err != nil {
return err
}
- err = c.StoragePoolConfigAdd(poolID, nodeInfo.ID, conf)
+ err = c.CreateStoragePoolConfig(poolID, nodeInfo.ID, conf)
if err != nil {
return err
}
diff --git a/lxd/storage_pools.go b/lxd/storage_pools.go
index 810fd567a0..49c18a88dd 100644
--- a/lxd/storage_pools.go
+++ b/lxd/storage_pools.go
@@ -217,7 +217,7 @@ func storagePoolsPostCluster(d *Daemon, req api.StoragePoolsPost) error {
}
// Insert the global config keys.
- return tx.StoragePoolConfigAdd(poolID, 0, req.Config)
+ return tx.CreateStoragePoolConfig(poolID, 0, req.Config)
})
if err != nil {
if err == db.ErrNoSuchObject {
From dcf2e8bb4ea43b82e252ac029ac473da2269d4a1 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Wed, 6 May 2020 15:18:59 +0100
Subject: [PATCH 20/43] lxd/db: Rename StoragePoolNodeConfigs to
GetStoragePoolNodeConfigs
Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
lxd/db/storage_pools.go | 4 ++--
lxd/db/storage_pools_test.go | 6 +++---
lxd/storage_pools.go | 2 +-
3 files changed, 6 insertions(+), 6 deletions(-)
diff --git a/lxd/db/storage_pools.go b/lxd/db/storage_pools.go
index 0416f0fc3c..33e6d7c309 100644
--- a/lxd/db/storage_pools.go
+++ b/lxd/db/storage_pools.go
@@ -342,11 +342,11 @@ func (c *ClusterTx) storagePoolState(name string, state int) error {
return nil
}
-// StoragePoolNodeConfigs returns the node-specific configuration of all
+// GetStoragePoolNodeConfigs returns the node-specific configuration of all
// nodes grouped by node name, for the given poolID.
//
// If the storage pool is not defined on all nodes, an error is returned.
-func (c *ClusterTx) StoragePoolNodeConfigs(poolID int64) (map[string]map[string]string, error) {
+func (c *ClusterTx) GetStoragePoolNodeConfigs(poolID int64) (map[string]map[string]string, error) {
// Fetch all nodes.
nodes, err := c.GetNodes()
if err != nil {
diff --git a/lxd/db/storage_pools_test.go b/lxd/db/storage_pools_test.go
index 4dacd49335..4a1abc3283 100644
--- a/lxd/db/storage_pools_test.go
+++ b/lxd/db/storage_pools_test.go
@@ -71,7 +71,7 @@ func TestStoragePoolsCreatePending(t *testing.T) {
require.NoError(t, err)
// The initial node (whose name is 'none' by default) is missing.
- _, err = tx.StoragePoolNodeConfigs(poolID)
+ _, err = tx.GetStoragePoolNodeConfigs(poolID)
require.EqualError(t, err, "Pool not defined on nodes: none")
config = map[string]string{"source": "/egg"}
@@ -79,7 +79,7 @@ func TestStoragePoolsCreatePending(t *testing.T) {
require.NoError(t, err)
// Now the storage is defined on all nodes.
- configs, err := tx.StoragePoolNodeConfigs(poolID)
+ configs, err := tx.GetStoragePoolNodeConfigs(poolID)
require.NoError(t, err)
assert.Len(t, configs, 3)
assert.Equal(t, map[string]string{"source": "/foo"}, configs["buzz"])
@@ -118,7 +118,7 @@ func TestStoragePoolsCreatePending_OtherPool(t *testing.T) {
// The node-level configs of the second pool do not contain any key
// from the first pool.
- configs, err := tx.StoragePoolNodeConfigs(poolID)
+ configs, err := tx.GetStoragePoolNodeConfigs(poolID)
require.NoError(t, err)
assert.Len(t, configs, 2)
assert.Equal(t, map[string]string{}, configs["none"])
diff --git a/lxd/storage_pools.go b/lxd/storage_pools.go
index 49c18a88dd..e1ae175885 100644
--- a/lxd/storage_pools.go
+++ b/lxd/storage_pools.go
@@ -205,7 +205,7 @@ func storagePoolsPostCluster(d *Daemon, req api.StoragePoolsPost) error {
}
// Fetch the node-specific configs.
- configs, err = tx.StoragePoolNodeConfigs(poolID)
+ configs, err = tx.GetStoragePoolNodeConfigs(poolID)
if err != nil {
return err
}
From 425a4d2c0316b84690b36568351872e739f88d02 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Wed, 6 May 2020 15:21:18 +0100
Subject: [PATCH 21/43] lxd/db: Rename StoragePools to GetStoragePoolNames
Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
lxd/api_cluster.go | 6 +++---
lxd/db/migration_test.go | 2 +-
lxd/db/storage_pools.go | 4 ++--
lxd/instances_post.go | 4 ++--
lxd/patches.go | 36 ++++++++++++++++-----------------
lxd/storage_pools.go | 2 +-
lxd/storage_volumes_snapshot.go | 2 +-
7 files changed, 28 insertions(+), 28 deletions(-)
diff --git a/lxd/api_cluster.go b/lxd/api_cluster.go
index 06cd93e019..584e962394 100644
--- a/lxd/api_cluster.go
+++ b/lxd/api_cluster.go
@@ -413,7 +413,7 @@ func clusterPutJoin(d *Daemon, req api.ClusterPut) response.Response {
// Get all defined storage pools and networks, so they can be compared
// to the ones in the cluster.
pools := []api.StoragePool{}
- poolNames, err := d.cluster.StoragePools()
+ poolNames, err := d.cluster.GetStoragePoolNames()
if err != nil && err != db.ErrNoSuchObject {
return err
}
@@ -499,7 +499,7 @@ func clusterPutJoin(d *Daemon, req api.ClusterPut) response.Response {
}
// For ceph pools we have to trigger the local mountpoint creation too.
- poolNames, err = d.cluster.StoragePools()
+ poolNames, err = d.cluster.GetStoragePoolNames()
if err != nil && err != db.ErrNoSuchObject {
return err
}
@@ -1048,7 +1048,7 @@ func clusterNodeDelete(d *Daemon, r *http.Request) response.Response {
}
// Delete all the pools on this node
- pools, err := d.cluster.StoragePools()
+ pools, err := d.cluster.GetStoragePoolNames()
if err != nil && err != db.ErrNoSuchObject {
return response.SmartError(err)
}
diff --git a/lxd/db/migration_test.go b/lxd/db/migration_test.go
index b6bb7294aa..4637027246 100644
--- a/lxd/db/migration_test.go
+++ b/lxd/db/migration_test.go
@@ -92,7 +92,7 @@ func TestImportPreClusteringData(t *testing.T) {
assert.Equal(t, []string{"none"}, network.Locations)
// storage
- pools, err := cluster.StoragePools()
+ pools, err := cluster.GetStoragePoolNames()
require.NoError(t, err)
assert.Equal(t, []string{"default"}, pools)
id, pool, err := cluster.StoragePoolGet("default")
diff --git a/lxd/db/storage_pools.go b/lxd/db/storage_pools.go
index 33e6d7c309..62c4ef6558 100644
--- a/lxd/db/storage_pools.go
+++ b/lxd/db/storage_pools.go
@@ -390,8 +390,8 @@ WHERE storage_pools.id = ? AND storage_pools.state = ?
return configs, nil
}
-// StoragePools returns the names of all storage pools.
-func (c *Cluster) StoragePools() ([]string, error) {
+// GetStoragePoolNames returns the names of all storage pools.
+func (c *Cluster) GetStoragePoolNames() ([]string, error) {
return c.storagePools("")
}
diff --git a/lxd/instances_post.go b/lxd/instances_post.go
index 34a61cd55c..4a5ff6227d 100644
--- a/lxd/instances_post.go
+++ b/lxd/instances_post.go
@@ -792,7 +792,7 @@ func containersPost(d *Daemon, r *http.Request) response.Response {
}
// If no storage pool is found, error out.
- pools, err := d.cluster.StoragePools()
+ pools, err := d.cluster.GetStoragePoolNames()
if err != nil || len(pools) == 0 {
return response.BadRequest(fmt.Errorf("No storage pool found. Please create a new storage pool"))
}
@@ -933,7 +933,7 @@ func containerFindStoragePool(d *Daemon, project string, req *api.InstancesPost)
// If there is just a single pool in the database, use that
if storagePool == "" {
logger.Debugf("No valid storage pool in the container's local root disk device and profiles found")
- pools, err := d.cluster.StoragePools()
+ pools, err := d.cluster.GetStoragePoolNames()
if err != nil {
if err == db.ErrNoSuchObject {
return "", "", "", nil, response.BadRequest(fmt.Errorf("This LXD instance does not have any storage pools configured"))
diff --git a/lxd/patches.go b/lxd/patches.go
index 4fa543c840..a38e7de3be 100644
--- a/lxd/patches.go
+++ b/lxd/patches.go
@@ -201,7 +201,7 @@ func patchNetworkPIDFiles(name string, d *Daemon) error {
func patchGenericStorage(name string, d *Daemon) error {
// Load all the pools.
- pools, _ := d.cluster.StoragePools()
+ pools, _ := d.cluster.GetStoragePoolNames()
for _, poolName := range pools {
pool, err := storagePools.GetPoolByName(d.State(), poolName)
@@ -222,7 +222,7 @@ func patchGenericStorage(name string, d *Daemon) error {
func patchRenameCustomVolumeLVs(name string, d *Daemon) error {
// Ignore the error since it will also fail if there are no pools.
- pools, _ := d.cluster.StoragePools()
+ pools, _ := d.cluster.GetStoragePoolNames()
for _, poolName := range pools {
poolID, pool, err := d.cluster.StoragePoolGet(poolName)
@@ -485,7 +485,7 @@ func upgradeFromStorageTypeBtrfs(name string, d *Daemon, defaultPoolName string,
}
var poolID int64
- pools, err := d.cluster.StoragePools()
+ pools, err := d.cluster.GetStoragePoolNames()
if err == nil { // Already exist valid storage pools.
// Check if the storage pool already has a db entry.
if shared.StringInSlice(defaultPoolName, pools) {
@@ -785,7 +785,7 @@ func upgradeFromStorageTypeDir(name string, d *Daemon, defaultPoolName string, d
}
var poolID int64
- pools, err := d.cluster.StoragePools()
+ pools, err := d.cluster.GetStoragePoolNames()
if err == nil { // Already exist valid storage pools.
// Check if the storage pool already has a db entry.
if shared.StringInSlice(defaultPoolName, pools) {
@@ -1087,7 +1087,7 @@ func upgradeFromStorageTypeLvm(name string, d *Daemon, defaultPoolName string, d
// are already configured. If so, we can assume that a partial upgrade
// has been performed and can skip the next steps.
var poolID int64
- pools, err := d.cluster.StoragePools()
+ pools, err := d.cluster.GetStoragePoolNames()
if err == nil { // Already exist valid storage pools.
// Check if the storage pool already has a db entry.
if shared.StringInSlice(defaultPoolName, pools) {
@@ -1605,7 +1605,7 @@ func upgradeFromStorageTypeZfs(name string, d *Daemon, defaultPoolName string, d
// are already configured. If so, we can assume that a partial upgrade
// has been performed and can skip the next steps.
var poolID int64
- pools, err := d.cluster.StoragePools()
+ pools, err := d.cluster.GetStoragePoolNames()
if err == nil { // Already exist valid storage pools.
// Check if the storage pool already has a db entry.
if shared.StringInSlice(poolName, pools) {
@@ -2047,7 +2047,7 @@ func updatePoolPropertyForAllObjects(d *Daemon, poolName string, allcontainers [
}
func patchStorageApiV1(name string, d *Daemon) error {
- pools, err := d.cluster.StoragePools()
+ pools, err := d.cluster.GetStoragePoolNames()
if err != nil && err == db.ErrNoSuchObject {
// No pool was configured in the previous update. So we're on a
// pristine LXD instance.
@@ -2191,7 +2191,7 @@ func patchStorageApiLvmKeys(name string, d *Daemon) error {
}
func patchStorageApiKeys(name string, d *Daemon) error {
- pools, err := d.cluster.StoragePools()
+ pools, err := d.cluster.GetStoragePoolNames()
if err != nil && err == db.ErrNoSuchObject {
// No pool was configured in the previous update. So we're on a
// pristine LXD instance.
@@ -2248,7 +2248,7 @@ func patchStorageApiKeys(name string, d *Daemon) error {
// In case any of the objects images/containers/snapshots are missing storage
// volume configuration entries, let's add the defaults.
func patchStorageApiUpdateStorageConfigs(name string, d *Daemon) error {
- pools, err := d.cluster.StoragePools()
+ pools, err := d.cluster.GetStoragePoolNames()
if err != nil {
if err == db.ErrNoSuchObject {
return nil
@@ -2396,7 +2396,7 @@ func patchStorageApiUpdateStorageConfigs(name string, d *Daemon) error {
}
func patchStorageApiLxdOnBtrfs(name string, d *Daemon) error {
- pools, err := d.cluster.StoragePools()
+ pools, err := d.cluster.GetStoragePoolNames()
if err != nil {
if err == db.ErrNoSuchObject {
return nil
@@ -2453,7 +2453,7 @@ func patchStorageApiLxdOnBtrfs(name string, d *Daemon) error {
}
func patchStorageApiDetectLVSize(name string, d *Daemon) error {
- pools, err := d.cluster.StoragePools()
+ pools, err := d.cluster.GetStoragePoolNames()
if err != nil {
if err == db.ErrNoSuchObject {
return nil
@@ -2548,7 +2548,7 @@ func patchStorageApiInsertZfsDriver(name string, d *Daemon) error {
}
func patchStorageZFSnoauto(name string, d *Daemon) error {
- pools, err := d.cluster.StoragePools()
+ pools, err := d.cluster.GetStoragePoolNames()
if err != nil {
if err == db.ErrNoSuchObject {
return nil
@@ -2611,7 +2611,7 @@ func patchStorageZFSnoauto(name string, d *Daemon) error {
}
func patchStorageZFSVolumeSize(name string, d *Daemon) error {
- pools, err := d.cluster.StoragePools()
+ pools, err := d.cluster.GetStoragePoolNames()
if err != nil && err == db.ErrNoSuchObject {
// No pool was configured in the previous update. So we're on a
// pristine LXD instance.
@@ -2697,7 +2697,7 @@ func patchNetworkDnsmasqHosts(name string, d *Daemon) error {
}
func patchStorageApiDirBindMount(name string, d *Daemon) error {
- pools, err := d.cluster.StoragePools()
+ pools, err := d.cluster.GetStoragePoolNames()
if err != nil && err == db.ErrNoSuchObject {
// No pool was configured in the previous update. So we're on a
// pristine LXD instance.
@@ -2782,7 +2782,7 @@ func patchFixUploadedAt(name string, d *Daemon) error {
}
func patchStorageApiCephSizeRemove(name string, d *Daemon) error {
- pools, err := d.cluster.StoragePools()
+ pools, err := d.cluster.GetStoragePoolNames()
if err != nil && err == db.ErrNoSuchObject {
// No pool was configured in the previous update. So we're on a
// pristine LXD instance.
@@ -2979,7 +2979,7 @@ func patchStorageApiPermissions(name string, d *Daemon) error {
return err
}
- pools, err := d.cluster.StoragePools()
+ pools, err := d.cluster.GetStoragePoolNames()
if err != nil && err == db.ErrNoSuchObject {
// No pool was configured in the previous update. So we're on a
// pristine LXD instance.
@@ -3140,7 +3140,7 @@ func patchCandidConfigKey(name string, d *Daemon) error {
func patchMoveBackups(name string, d *Daemon) error {
// Get all storage pools
- pools, err := d.cluster.StoragePools()
+ pools, err := d.cluster.GetStoragePoolNames()
if err != nil {
if err == db.ErrNoSuchObject {
return nil
@@ -3258,7 +3258,7 @@ func patchMoveBackups(name string, d *Daemon) error {
}
func patchStorageApiRenameContainerSnapshotsDir(name string, d *Daemon) error {
- pools, err := d.cluster.StoragePools()
+ pools, err := d.cluster.GetStoragePoolNames()
if err != nil && err == db.ErrNoSuchObject {
// No pool was configured so we're on a pristine LXD instance.
return nil
diff --git a/lxd/storage_pools.go b/lxd/storage_pools.go
index e1ae175885..c1586c584c 100644
--- a/lxd/storage_pools.go
+++ b/lxd/storage_pools.go
@@ -44,7 +44,7 @@ var storagePoolCmd = APIEndpoint{
func storagePoolsGet(d *Daemon, r *http.Request) response.Response {
recursion := util.IsRecursionRequest(r)
- pools, err := d.cluster.StoragePools()
+ pools, err := d.cluster.GetStoragePoolNames()
if err != nil && err != db.ErrNoSuchObject {
return response.SmartError(err)
}
diff --git a/lxd/storage_volumes_snapshot.go b/lxd/storage_volumes_snapshot.go
index 6fec3c29fb..875ae408f7 100644
--- a/lxd/storage_volumes_snapshot.go
+++ b/lxd/storage_volumes_snapshot.go
@@ -783,7 +783,7 @@ func volumeDetermineNextSnapshotName(d *Daemon, volume db.StorageVolumeArgs, def
return "", err
}
- pools, err := d.cluster.StoragePools()
+ pools, err := d.cluster.GetStoragePoolNames()
if err != nil {
return "", err
}
From 21d6bc9c48a91834227bccdefb9a8245447895b5 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Wed, 6 May 2020 15:22:15 +0100
Subject: [PATCH 22/43] lxd/db: Rename StoragePoolsNotPending to
GetNonPendingStoragePoolNames
Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
lxd/api_cluster.go | 2 +-
lxd/db/storage_pools.go | 4 ++--
lxd/storage.go | 2 +-
3 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/lxd/api_cluster.go b/lxd/api_cluster.go
index 584e962394..b179a0a0b4 100644
--- a/lxd/api_cluster.go
+++ b/lxd/api_cluster.go
@@ -1421,7 +1421,7 @@ type internalClusterPostHandoverRequest struct {
}
func clusterCheckStoragePoolsMatch(cluster *db.Cluster, reqPools []api.StoragePool) error {
- poolNames, err := cluster.StoragePoolsNotPending()
+ poolNames, err := cluster.GetNonPendingStoragePoolNames()
if err != nil && err != db.ErrNoSuchObject {
return err
}
diff --git a/lxd/db/storage_pools.go b/lxd/db/storage_pools.go
index 62c4ef6558..17c5e68659 100644
--- a/lxd/db/storage_pools.go
+++ b/lxd/db/storage_pools.go
@@ -395,9 +395,9 @@ func (c *Cluster) GetStoragePoolNames() ([]string, error) {
return c.storagePools("")
}
-// StoragePoolsNotPending returns the names of all storage pools that are not
+// GetNonPendingStoragePoolNames returns the names of all storage pools that are not
// pending.
-func (c *Cluster) StoragePoolsNotPending() ([]string, error) {
+func (c *Cluster) GetNonPendingStoragePoolNames() ([]string, error) {
return c.storagePools("NOT state=?", storagePoolPending)
}
diff --git a/lxd/storage.go b/lxd/storage.go
index 8ec8f512d3..cf5c19a5fd 100644
--- a/lxd/storage.go
+++ b/lxd/storage.go
@@ -65,7 +65,7 @@ func resetContainerDiskIdmap(container instance.Container, srcIdmap *idmap.Idmap
}
func setupStorageDriver(s *state.State, forceCheck bool) error {
- pools, err := s.Cluster.StoragePoolsNotPending()
+ pools, err := s.Cluster.GetNonPendingStoragePoolNames()
if err != nil {
if err == db.ErrNoSuchObject {
logger.Debugf("No existing storage pools detected")
From aa2eda694c8916efaf2e4c6cece575b6ddcffc2d Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Wed, 6 May 2020 15:25:04 +0100
Subject: [PATCH 23/43] lxd/db: Rename StoragePoolsGetDrivers to
GetStoragePoolDrivers
Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
lxd/db/storage_pools.go | 6 +++---
lxd/storage.go | 2 +-
2 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/lxd/db/storage_pools.go b/lxd/db/storage_pools.go
index 17c5e68659..bddfaa3eaf 100644
--- a/lxd/db/storage_pools.go
+++ b/lxd/db/storage_pools.go
@@ -432,9 +432,9 @@ func (c *Cluster) storagePools(where string, args ...interface{}) ([]string, err
return pools, nil
}
-// StoragePoolsGetDrivers returns the names of all storage volumes attached to
-// a given storage pool.
-func (c *Cluster) StoragePoolsGetDrivers() ([]string, error) {
+// GetStoragePoolDrivers returns the names of all storage drivers currently
+// being used by at least one storage pool.
+func (c *Cluster) GetStoragePoolDrivers() ([]string, error) {
var poolDriver string
query := "SELECT DISTINCT driver FROM storage_pools"
inargs := []interface{}{}
diff --git a/lxd/storage.go b/lxd/storage.go
index cf5c19a5fd..0b627bc2ea 100644
--- a/lxd/storage.go
+++ b/lxd/storage.go
@@ -125,7 +125,7 @@ func storagePoolDriversCacheUpdate(s *state.State) {
// appropriate. (Should be cheaper then querying the db all the time,
// especially if we keep adding more storage drivers.)
- drivers, err := s.Cluster.StoragePoolsGetDrivers()
+ drivers, err := s.Cluster.GetStoragePoolDrivers()
if err != nil && err != db.ErrNoSuchObject {
return
}
From bcab60e7507231d53d50384befbb66b0104cca11 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Wed, 6 May 2020 15:26:24 +0100
Subject: [PATCH 24/43] lxd/db: Rename StoragePoolGetID to GetStoragePoolID
Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
lxd/daemon_images.go | 2 +-
lxd/db/storage_pools.go | 4 ++--
lxd/device/disk.go | 2 +-
lxd/instances_post.go | 2 +-
lxd/migrate_storage_volumes.go | 2 +-
lxd/patches.go | 2 +-
lxd/storage/utils.go | 2 +-
lxd/storage_pools.go | 4 ++--
lxd/storage_pools_utils.go | 2 +-
lxd/storage_volumes.go | 14 +++++++-------
lxd/storage_volumes_snapshot.go | 8 ++++----
11 files changed, 22 insertions(+), 22 deletions(-)
diff --git a/lxd/daemon_images.go b/lxd/daemon_images.go
index 3458bdf586..b194ae1149 100644
--- a/lxd/daemon_images.go
+++ b/lxd/daemon_images.go
@@ -143,7 +143,7 @@ func (d *Daemon) ImageDownload(op *operations.Operation, server string, protocol
}
// Get the ID of the target storage pool
- poolID, err := d.cluster.StoragePoolGetID(storagePool)
+ poolID, err := d.cluster.GetStoragePoolID(storagePool)
if err != nil {
return nil, err
}
diff --git a/lxd/db/storage_pools.go b/lxd/db/storage_pools.go
index bddfaa3eaf..a5d1596dc2 100644
--- a/lxd/db/storage_pools.go
+++ b/lxd/db/storage_pools.go
@@ -457,8 +457,8 @@ func (c *Cluster) GetStoragePoolDrivers() ([]string, error) {
return drivers, nil
}
-// StoragePoolGetID returns the id of a single storage pool.
-func (c *Cluster) StoragePoolGetID(poolName string) (int64, error) {
+// GetStoragePoolID returns the id of a single storage pool.
+func (c *Cluster) GetStoragePoolID(poolName string) (int64, error) {
poolID := int64(-1)
query := "SELECT id FROM storage_pools WHERE name=?"
inargs := []interface{}{poolName}
diff --git a/lxd/device/disk.go b/lxd/device/disk.go
index 39a4ec9401..133a215294 100644
--- a/lxd/device/disk.go
+++ b/lxd/device/disk.go
@@ -164,7 +164,7 @@ func (d *disk) validateConfig(instConf instance.ConfigReader) error {
return fmt.Errorf("Storage volumes cannot be specified as absolute paths")
}
- _, err := d.state.Cluster.StoragePoolGetID(d.config["pool"])
+ _, err := d.state.Cluster.GetStoragePoolID(d.config["pool"])
if err != nil {
return fmt.Errorf("The %q storage pool doesn't exist", d.config["pool"])
}
diff --git a/lxd/instances_post.go b/lxd/instances_post.go
index 4a5ff6227d..89a34687f5 100644
--- a/lxd/instances_post.go
+++ b/lxd/instances_post.go
@@ -904,7 +904,7 @@ func containerFindStoragePool(d *Daemon, project string, req *api.InstancesPost)
// Handle copying/moving between two storage-api LXD instances.
if storagePool != "" {
- _, err := d.cluster.StoragePoolGetID(storagePool)
+ _, err := d.cluster.GetStoragePoolID(storagePool)
if err == db.ErrNoSuchObject {
storagePool = ""
// Unset the local root disk device storage pool if not
diff --git a/lxd/migrate_storage_volumes.go b/lxd/migrate_storage_volumes.go
index d02e1a24ba..4ee81d49b8 100644
--- a/lxd/migrate_storage_volumes.go
+++ b/lxd/migrate_storage_volumes.go
@@ -69,7 +69,7 @@ func (s *migrationSourceWs) DoStorage(state *state.State, projectName string, po
var err error
snaps, err := storagePools.VolumeSnapshotsGet(state, projectName, poolName, volName, db.StoragePoolVolumeTypeCustom)
if err == nil {
- poolID, err := state.Cluster.StoragePoolGetID(poolName)
+ poolID, err := state.Cluster.GetStoragePoolID(poolName)
if err == nil {
for _, snap := range snaps {
_, snapVolume, err := state.Cluster.StoragePoolNodeVolumeGetTypeByProject(projectName, snap.Name, db.StoragePoolVolumeTypeCustom, poolID)
diff --git a/lxd/patches.go b/lxd/patches.go
index a38e7de3be..4a306548ed 100644
--- a/lxd/patches.go
+++ b/lxd/patches.go
@@ -3042,7 +3042,7 @@ func patchStorageApiPermissions(name string, d *Daemon) error {
// Retrieve ID of the storage pool (and check if the storage pool
// exists).
- poolID, err := d.cluster.StoragePoolGetID(poolName)
+ poolID, err := d.cluster.GetStoragePoolID(poolName)
if err != nil && !os.IsNotExist(err) {
return err
}
diff --git a/lxd/storage/utils.go b/lxd/storage/utils.go
index e7f55e3fdd..62f6016b81 100644
--- a/lxd/storage/utils.go
+++ b/lxd/storage/utils.go
@@ -307,7 +307,7 @@ func VolumeFillDefault(name string, config map[string]string, parentPool *api.St
// VolumeSnapshotsGet returns a list of snapshots of the form <volume>/<snapshot-name>.
func VolumeSnapshotsGet(s *state.State, projectName string, pool string, volume string, volType int) ([]db.StorageVolumeArgs, error) {
- poolID, err := s.Cluster.StoragePoolGetID(pool)
+ poolID, err := s.Cluster.GetStoragePoolID(pool)
if err != nil {
return nil, err
}
diff --git a/lxd/storage_pools.go b/lxd/storage_pools.go
index c1586c584c..17752986ee 100644
--- a/lxd/storage_pools.go
+++ b/lxd/storage_pools.go
@@ -117,7 +117,7 @@ func storagePoolsPost(d *Daemon, r *http.Request) response.Response {
return response.BadRequest(err)
}
- poolID, err := d.cluster.StoragePoolGetID(req.Name)
+ poolID, err := d.cluster.GetStoragePoolID(req.Name)
if err != nil {
return response.NotFound(err)
}
@@ -531,7 +531,7 @@ func storagePoolClusterFillWithNodeConfig(dbConfig, reqConfig map[string]string)
func storagePoolDelete(d *Daemon, r *http.Request) response.Response {
poolName := mux.Vars(r)["name"]
- poolID, err := d.cluster.StoragePoolGetID(poolName)
+ poolID, err := d.cluster.GetStoragePoolID(poolName)
if err != nil {
return response.NotFound(err)
}
diff --git a/lxd/storage_pools_utils.go b/lxd/storage_pools_utils.go
index 4a14d1855e..8076c2712b 100644
--- a/lxd/storage_pools_utils.go
+++ b/lxd/storage_pools_utils.go
@@ -113,7 +113,7 @@ func profilesUsingPoolGetNames(db *db.Cluster, project string, poolName string)
// storagePoolDBCreate creates a storage pool DB entry and returns the created Pool ID.
func storagePoolDBCreate(s *state.State, poolName, poolDescription string, driver string, config map[string]string) (int64, error) {
// Check that the storage pool does not already exist.
- _, err := s.Cluster.StoragePoolGetID(poolName)
+ _, err := s.Cluster.GetStoragePoolID(poolName)
if err == nil {
return -1, fmt.Errorf("The storage pool already exists")
}
diff --git a/lxd/storage_volumes.go b/lxd/storage_volumes.go
index a6a2ec8fce..86146d8e6d 100644
--- a/lxd/storage_volumes.go
+++ b/lxd/storage_volumes.go
@@ -90,7 +90,7 @@ func storagePoolVolumesGet(d *Daemon, r *http.Request) response.Response {
recursion := util.IsRecursionRequest(r)
// Retrieve ID of the storage pool (and check if the storage pool exists).
- poolID, err := d.cluster.StoragePoolGetID(poolName)
+ poolID, err := d.cluster.GetStoragePoolID(poolName)
if err != nil {
return response.SmartError(err)
}
@@ -207,7 +207,7 @@ func storagePoolVolumesTypeGet(d *Daemon, r *http.Request) response.Response {
}
// Retrieve ID of the storage pool (and check if the storage pool exists).
- poolID, err := d.cluster.StoragePoolGetID(poolName)
+ poolID, err := d.cluster.GetStoragePoolID(poolName)
if err != nil {
return response.SmartError(err)
}
@@ -298,7 +298,7 @@ func storagePoolVolumesTypePost(d *Daemon, r *http.Request) response.Response {
}
poolName := mux.Vars(r)["name"]
- poolID, err := d.cluster.StoragePoolGetID(poolName)
+ poolID, err := d.cluster.GetStoragePoolID(poolName)
if err != nil {
return response.SmartError(err)
}
@@ -403,7 +403,7 @@ func storagePoolVolumesPost(d *Daemon, r *http.Request) response.Response {
}
poolName := mux.Vars(r)["name"]
- poolID, err := d.cluster.StoragePoolGetID(poolName)
+ poolID, err := d.cluster.GetStoragePoolID(poolName)
if err != nil {
return response.SmartError(err)
}
@@ -555,9 +555,9 @@ func storagePoolVolumeTypePost(d *Daemon, r *http.Request, volumeTypeName string
// Retrieve ID of the storage pool (and check if the storage pool exists).
var poolID int64
if req.Pool != "" {
- poolID, err = d.cluster.StoragePoolGetID(req.Pool)
+ poolID, err = d.cluster.GetStoragePoolID(req.Pool)
} else {
- poolID, err = d.cluster.StoragePoolGetID(poolName)
+ poolID, err = d.cluster.GetStoragePoolID(poolName)
}
if err != nil {
return response.SmartError(err)
@@ -796,7 +796,7 @@ func storagePoolVolumeTypeGet(d *Daemon, r *http.Request, volumeTypeName string)
}
// Get the ID of the storage pool the storage volume is supposed to be attached to.
- poolID, err := d.cluster.StoragePoolGetID(poolName)
+ poolID, err := d.cluster.GetStoragePoolID(poolName)
if err != nil {
return response.SmartError(err)
}
diff --git a/lxd/storage_volumes_snapshot.go b/lxd/storage_volumes_snapshot.go
index 875ae408f7..b97b7949dc 100644
--- a/lxd/storage_volumes_snapshot.go
+++ b/lxd/storage_volumes_snapshot.go
@@ -100,7 +100,7 @@ func storagePoolVolumeSnapshotsTypePost(d *Daemon, r *http.Request) response.Res
}
// Retrieve ID of the storage pool (and check if the storage pool exists).
- poolID, err := d.cluster.StoragePoolGetID(poolName)
+ poolID, err := d.cluster.GetStoragePoolID(poolName)
if err != nil {
return response.SmartError(err)
}
@@ -191,7 +191,7 @@ func storagePoolVolumeSnapshotsTypeGet(d *Daemon, r *http.Request) response.Resp
}
// Retrieve ID of the storage pool (and check if the storage pool exists).
- poolID, err := d.cluster.StoragePoolGetID(poolName)
+ poolID, err := d.cluster.GetStoragePoolID(poolName)
if err != nil {
return response.SmartError(err)
}
@@ -723,7 +723,7 @@ func autoCreateCustomVolumeSnapshots(ctx context.Context, d *Daemon, volumes []d
}
// Get pool ID
- poolID, err := d.cluster.StoragePoolGetID(v.PoolName)
+ poolID, err := d.cluster.GetStoragePoolID(v.PoolName)
if err != nil {
logger.Error("Error retrieving pool ID", log.Ctx{"err": err, "pool": v.PoolName})
ch <- nil
@@ -789,7 +789,7 @@ func volumeDetermineNextSnapshotName(d *Daemon, volume db.StorageVolumeArgs, def
}
for _, pool := range pools {
- poolID, err := d.cluster.StoragePoolGetID(pool)
+ poolID, err := d.cluster.GetStoragePoolID(pool)
if err != nil {
return "", err
}
From c2ad4c004d2a86f641522d67fcf75e77f5db327b Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Wed, 6 May 2020 15:28:32 +0100
Subject: [PATCH 25/43] lxd/db: Rename StoragePoolGet to GetStoragePool
Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
lxd/api_cluster.go | 8 +++----
lxd/backup/backup_instance_config.go | 2 +-
lxd/daemon_storage.go | 2 +-
lxd/db/migration_test.go | 2 +-
lxd/db/storage_pools.go | 8 +++----
lxd/device/disk.go | 4 ++--
lxd/instance/drivers/driver_lxc.go | 2 +-
lxd/instance/drivers/driver_qemu.go | 2 +-
lxd/instance_post.go | 2 +-
lxd/instances_post.go | 4 ++--
lxd/patches.go | 34 ++++++++++++++--------------
lxd/storage/load.go | 2 +-
lxd/storage/utils.go | 2 +-
lxd/storage_pools.go | 10 ++++----
lxd/storage_volumes.go | 2 +-
lxd/storage_volumes_snapshot.go | 8 +++----
16 files changed, 47 insertions(+), 47 deletions(-)
diff --git a/lxd/api_cluster.go b/lxd/api_cluster.go
index b179a0a0b4..dd0095b9e8 100644
--- a/lxd/api_cluster.go
+++ b/lxd/api_cluster.go
@@ -419,7 +419,7 @@ func clusterPutJoin(d *Daemon, req api.ClusterPut) response.Response {
}
for _, name := range poolNames {
- _, pool, err := d.cluster.StoragePoolGet(name)
+ _, pool, err := d.cluster.GetStoragePool(name)
if err != nil {
return err
}
@@ -505,7 +505,7 @@ func clusterPutJoin(d *Daemon, req api.ClusterPut) response.Response {
}
for _, name := range poolNames {
- id, pool, err := d.cluster.StoragePoolGet(name)
+ id, pool, err := d.cluster.GetStoragePool(name)
if err != nil {
return err
}
@@ -1432,7 +1432,7 @@ func clusterCheckStoragePoolsMatch(cluster *db.Cluster, reqPools []api.StoragePo
continue
}
found = true
- _, pool, err := cluster.StoragePoolGet(name)
+ _, pool, err := cluster.GetStoragePool(name)
if err != nil {
return err
}
@@ -1448,7 +1448,7 @@ func clusterCheckStoragePoolsMatch(cluster *db.Cluster, reqPools []api.StoragePo
break
}
if !found {
- _, pool, err := cluster.StoragePoolGet(name)
+ _, pool, err := cluster.GetStoragePool(name)
if err != nil {
return err
}
diff --git a/lxd/backup/backup_instance_config.go b/lxd/backup/backup_instance_config.go
index 383eff4f30..5120087bb5 100644
--- a/lxd/backup/backup_instance_config.go
+++ b/lxd/backup/backup_instance_config.go
@@ -54,7 +54,7 @@ func updateRootDevicePool(devices map[string]map[string]string, poolName string)
// specified in b.Pool.
func UpdateInstanceConfigStoragePool(c *db.Cluster, b Info, mountPath string) error {
// Load the storage pool.
- _, pool, err := c.StoragePoolGet(b.Pool)
+ _, pool, err := c.GetStoragePool(b.Pool)
if err != nil {
return err
}
diff --git a/lxd/daemon_storage.go b/lxd/daemon_storage.go
index 74a9671812..33b82050ee 100644
--- a/lxd/daemon_storage.go
+++ b/lxd/daemon_storage.go
@@ -93,7 +93,7 @@ func daemonStorageValidate(s *state.State, target string) error {
volumeName := fields[1]
// Validate pool exists.
- poolID, dbPool, err := s.Cluster.StoragePoolGet(poolName)
+ poolID, dbPool, err := s.Cluster.GetStoragePool(poolName)
if err != nil {
return errors.Wrapf(err, "Unable to load storage pool %q", poolName)
}
diff --git a/lxd/db/migration_test.go b/lxd/db/migration_test.go
index 4637027246..0f77c8e7c2 100644
--- a/lxd/db/migration_test.go
+++ b/lxd/db/migration_test.go
@@ -95,7 +95,7 @@ func TestImportPreClusteringData(t *testing.T) {
pools, err := cluster.GetStoragePoolNames()
require.NoError(t, err)
assert.Equal(t, []string{"default"}, pools)
- id, pool, err := cluster.StoragePoolGet("default")
+ id, pool, err := cluster.GetStoragePool("default")
require.NoError(t, err)
assert.Equal(t, int64(1), id)
assert.Equal(t, "/foo/bar", pool.Config["source"])
diff --git a/lxd/db/storage_pools.go b/lxd/db/storage_pools.go
index a5d1596dc2..34958e195a 100644
--- a/lxd/db/storage_pools.go
+++ b/lxd/db/storage_pools.go
@@ -474,8 +474,8 @@ func (c *Cluster) GetStoragePoolID(poolName string) (int64, error) {
return poolID, nil
}
-// StoragePoolGet returns a single storage pool.
-func (c *Cluster) StoragePoolGet(poolName string) (int64, *api.StoragePool, error) {
+// GetStoragePool returns a single storage pool.
+func (c *Cluster) GetStoragePool(poolName string) (int64, *api.StoragePool, error) {
var poolDriver string
poolID := int64(-1)
description := sql.NullString{}
@@ -649,7 +649,7 @@ func storagePoolDriverGet(tx *sql.Tx, id int64) (string, error) {
// StoragePoolUpdate updates a storage pool.
func (c *Cluster) StoragePoolUpdate(poolName, description string, poolConfig map[string]string) error {
- poolID, _, err := c.StoragePoolGet(poolName)
+ poolID, _, err := c.GetStoragePool(poolName)
if err != nil {
return err
}
@@ -693,7 +693,7 @@ func StoragePoolConfigClear(tx *sql.Tx, poolID, nodeID int64) error {
// StoragePoolDelete deletes storage pool.
func (c *Cluster) StoragePoolDelete(poolName string) (*api.StoragePool, error) {
- poolID, pool, err := c.StoragePoolGet(poolName)
+ poolID, pool, err := c.GetStoragePool(poolName)
if err != nil {
return nil, err
}
diff --git a/lxd/device/disk.go b/lxd/device/disk.go
index 133a215294..6209bd9ea6 100644
--- a/lxd/device/disk.go
+++ b/lxd/device/disk.go
@@ -303,7 +303,7 @@ func (d *disk) startContainer() (*deviceConfig.RunConfig, error) {
// If ownerShift is none and pool is specified then check whether the pool itself
// has owner shifting enabled, and if so enable shifting on this device too.
if ownerShift == deviceConfig.MountOwnerShiftNone && d.config["pool"] != "" {
- poolID, _, err := d.state.Cluster.StoragePoolGet(d.config["pool"])
+ poolID, _, err := d.state.Cluster.GetStoragePool(d.config["pool"])
if err != nil {
return nil, err
}
@@ -908,7 +908,7 @@ func (d *disk) createDevice() (string, error) {
func (d *disk) storagePoolVolumeAttachShift(projectName, poolName, volumeName string, volumeType int, remapPath string) error {
// Load the DB records.
- poolID, pool, err := d.state.Cluster.StoragePoolGet(poolName)
+ poolID, pool, err := d.state.Cluster.GetStoragePool(poolName)
if err != nil {
return err
}
diff --git a/lxd/instance/drivers/driver_lxc.go b/lxd/instance/drivers/driver_lxc.go
index 1cffc34840..5b6557ef2b 100644
--- a/lxd/instance/drivers/driver_lxc.go
+++ b/lxd/instance/drivers/driver_lxc.go
@@ -219,7 +219,7 @@ func lxcCreate(s *state.State, args db.InstanceArgs) (instance.Instance, error)
storagePool := rootDiskDevice["pool"]
// Get the storage pool ID for the container
- poolID, dbPool, err := s.Cluster.StoragePoolGet(storagePool)
+ poolID, dbPool, err := s.Cluster.GetStoragePool(storagePool)
if err != nil {
c.Delete()
return nil, err
diff --git a/lxd/instance/drivers/driver_qemu.go b/lxd/instance/drivers/driver_qemu.go
index 130a5f4f87..d45645aaea 100644
--- a/lxd/instance/drivers/driver_qemu.go
+++ b/lxd/instance/drivers/driver_qemu.go
@@ -232,7 +232,7 @@ func qemuCreate(s *state.State, args db.InstanceArgs) (instance.Instance, error)
storagePool := rootDiskDevice["pool"]
// Get the storage pool ID for the instance.
- poolID, pool, err := s.Cluster.StoragePoolGet(storagePool)
+ poolID, pool, err := s.Cluster.GetStoragePool(storagePool)
if err != nil {
return nil, err
}
diff --git a/lxd/instance_post.go b/lxd/instance_post.go
index e868974e31..9d20cd8cf1 100644
--- a/lxd/instance_post.go
+++ b/lxd/instance_post.go
@@ -184,7 +184,7 @@ func containerPost(d *Daemon, r *http.Request) response.Response {
err = errors.Wrap(err, "Failed to fetch instance's pool name")
return response.SmartError(err)
}
- _, pool, err := d.cluster.StoragePoolGet(poolName)
+ _, pool, err := d.cluster.GetStoragePool(poolName)
if err != nil {
err = errors.Wrap(err, "Failed to fetch instance's pool info")
return response.SmartError(err)
diff --git a/lxd/instances_post.go b/lxd/instances_post.go
index 89a34687f5..01878626fe 100644
--- a/lxd/instances_post.go
+++ b/lxd/instances_post.go
@@ -411,7 +411,7 @@ func createFromCopy(d *Daemon, project string, req *api.InstancesPost) response.
return clusterCopyContainerInternal(d, source, project, req)
}
- _, pool, err := d.cluster.StoragePoolGet(sourcePoolName)
+ _, pool, err := d.cluster.GetStoragePool(sourcePoolName)
if err != nil {
err = errors.Wrap(err, "Failed to fetch instance's pool info")
return response.SmartError(err)
@@ -611,7 +611,7 @@ func createFromBackup(d *Daemon, project string, data io.Reader, pool string) re
})
// Check storage pool exists.
- _, _, err = d.State().Cluster.StoragePoolGet(bInfo.Pool)
+ _, _, err = d.State().Cluster.GetStoragePool(bInfo.Pool)
if errors.Cause(err) == db.ErrNoSuchObject {
// The storage pool doesn't exist. If backup is in binary format (so we cannot alter
// the backup.yaml) or the pool has been specified directly from the user restoring
diff --git a/lxd/patches.go b/lxd/patches.go
index 4a306548ed..a38831644f 100644
--- a/lxd/patches.go
+++ b/lxd/patches.go
@@ -225,7 +225,7 @@ func patchRenameCustomVolumeLVs(name string, d *Daemon) error {
pools, _ := d.cluster.GetStoragePoolNames()
for _, poolName := range pools {
- poolID, pool, err := d.cluster.StoragePoolGet(poolName)
+ poolID, pool, err := d.cluster.GetStoragePool(poolName)
if err != nil {
return err
}
@@ -494,7 +494,7 @@ func upgradeFromStorageTypeBtrfs(name string, d *Daemon, defaultPoolName string,
// Get the pool ID as we need it for storage volume creation.
// (Use a tmp variable as Go's scoping is freaking me out.)
- tmp, pool, err := d.cluster.StoragePoolGet(defaultPoolName)
+ tmp, pool, err := d.cluster.GetStoragePool(defaultPoolName)
if err != nil {
logger.Errorf("Failed to query database: %s", err)
return err
@@ -547,7 +547,7 @@ func upgradeFromStorageTypeBtrfs(name string, d *Daemon, defaultPoolName string,
}
// Get storage pool from the db after having updated it above.
- _, defaultPool, err := d.cluster.StoragePoolGet(defaultPoolName)
+ _, defaultPool, err := d.cluster.GetStoragePool(defaultPoolName)
if err != nil {
return err
}
@@ -794,7 +794,7 @@ func upgradeFromStorageTypeDir(name string, d *Daemon, defaultPoolName string, d
// Get the pool ID as we need it for storage volume creation.
// (Use a tmp variable as Go's scoping is freaking me out.)
- tmp, pool, err := d.cluster.StoragePoolGet(defaultPoolName)
+ tmp, pool, err := d.cluster.GetStoragePool(defaultPoolName)
if err != nil {
logger.Errorf("Failed to query database: %s", err)
return err
@@ -836,7 +836,7 @@ func upgradeFromStorageTypeDir(name string, d *Daemon, defaultPoolName string, d
}
// Get storage pool from the db after having updated it above.
- _, defaultPool, err := d.cluster.StoragePoolGet(defaultPoolName)
+ _, defaultPool, err := d.cluster.GetStoragePool(defaultPoolName)
if err != nil {
return err
}
@@ -1096,7 +1096,7 @@ func upgradeFromStorageTypeLvm(name string, d *Daemon, defaultPoolName string, d
// Get the pool ID as we need it for storage volume creation.
// (Use a tmp variable as Go's scoping is freaking me out.)
- tmp, pool, err := d.cluster.StoragePoolGet(defaultPoolName)
+ tmp, pool, err := d.cluster.GetStoragePool(defaultPoolName)
if err != nil {
logger.Errorf("Failed to query database: %s", err)
return err
@@ -1145,7 +1145,7 @@ func upgradeFromStorageTypeLvm(name string, d *Daemon, defaultPoolName string, d
}
// Get storage pool from the db after having updated it above.
- _, defaultPool, err := d.cluster.StoragePoolGet(defaultPoolName)
+ _, defaultPool, err := d.cluster.GetStoragePool(defaultPoolName)
if err != nil {
return err
}
@@ -1614,7 +1614,7 @@ func upgradeFromStorageTypeZfs(name string, d *Daemon, defaultPoolName string, d
// Get the pool ID as we need it for storage volume creation.
// (Use a tmp variable as Go's scoping is freaking me out.)
- tmp, pool, err := d.cluster.StoragePoolGet(poolName)
+ tmp, pool, err := d.cluster.GetStoragePool(poolName)
if err != nil {
logger.Errorf("Failed to query database: %s", err)
return err
@@ -1671,7 +1671,7 @@ func upgradeFromStorageTypeZfs(name string, d *Daemon, defaultPoolName string, d
}
// Get storage pool from the db after having updated it above.
- _, defaultPool, err := d.cluster.StoragePoolGet(poolName)
+ _, defaultPool, err := d.cluster.GetStoragePool(poolName)
if err != nil {
return err
}
@@ -2203,7 +2203,7 @@ func patchStorageApiKeys(name string, d *Daemon) error {
}
for _, poolName := range pools {
- _, pool, err := d.cluster.StoragePoolGet(poolName)
+ _, pool, err := d.cluster.GetStoragePool(poolName)
if err != nil {
logger.Errorf("Failed to query database: %s", err)
return err
@@ -2258,7 +2258,7 @@ func patchStorageApiUpdateStorageConfigs(name string, d *Daemon) error {
}
for _, poolName := range pools {
- poolID, pool, err := d.cluster.StoragePoolGet(poolName)
+ poolID, pool, err := d.cluster.GetStoragePool(poolName)
if err != nil {
logger.Errorf("Failed to query database: %s", err)
return err
@@ -2406,7 +2406,7 @@ func patchStorageApiLxdOnBtrfs(name string, d *Daemon) error {
}
for _, poolName := range pools {
- _, pool, err := d.cluster.StoragePoolGet(poolName)
+ _, pool, err := d.cluster.GetStoragePool(poolName)
if err != nil {
logger.Errorf("Failed to query database: %s", err)
return err
@@ -2463,7 +2463,7 @@ func patchStorageApiDetectLVSize(name string, d *Daemon) error {
}
for _, poolName := range pools {
- poolID, pool, err := d.cluster.StoragePoolGet(poolName)
+ poolID, pool, err := d.cluster.GetStoragePool(poolName)
if err != nil {
logger.Errorf("Failed to query database: %s", err)
return err
@@ -2558,7 +2558,7 @@ func patchStorageZFSnoauto(name string, d *Daemon) error {
}
for _, poolName := range pools {
- _, pool, err := d.cluster.StoragePoolGet(poolName)
+ _, pool, err := d.cluster.GetStoragePool(poolName)
if err != nil {
logger.Errorf("Failed to query database: %s", err)
return err
@@ -2623,7 +2623,7 @@ func patchStorageZFSVolumeSize(name string, d *Daemon) error {
}
for _, poolName := range pools {
- poolID, pool, err := d.cluster.StoragePoolGet(poolName)
+ poolID, pool, err := d.cluster.GetStoragePool(poolName)
if err != nil {
logger.Errorf("Failed to query database: %s", err)
return err
@@ -2709,7 +2709,7 @@ func patchStorageApiDirBindMount(name string, d *Daemon) error {
}
for _, poolName := range pools {
- _, pool, err := d.cluster.StoragePoolGet(poolName)
+ _, pool, err := d.cluster.GetStoragePool(poolName)
if err != nil {
logger.Errorf("Failed to query database: %s", err)
return err
@@ -2794,7 +2794,7 @@ func patchStorageApiCephSizeRemove(name string, d *Daemon) error {
}
for _, poolName := range pools {
- _, pool, err := d.cluster.StoragePoolGet(poolName)
+ _, pool, err := d.cluster.GetStoragePool(poolName)
if err != nil {
logger.Errorf("Failed to query database: %s", err)
return err
diff --git a/lxd/storage/load.go b/lxd/storage/load.go
index eb421dcb79..fbe149fc7c 100644
--- a/lxd/storage/load.go
+++ b/lxd/storage/load.go
@@ -140,7 +140,7 @@ func GetPoolByName(state *state.State, name string) (Pool, error) {
}
// Load the database record.
- poolID, dbPool, err := state.Cluster.StoragePoolGet(name)
+ poolID, dbPool, err := state.Cluster.GetStoragePool(name)
if err != nil {
return nil, err
}
diff --git a/lxd/storage/utils.go b/lxd/storage/utils.go
index 62f6016b81..7a044c5f9e 100644
--- a/lxd/storage/utils.go
+++ b/lxd/storage/utils.go
@@ -128,7 +128,7 @@ func VolumeDBCreate(s *state.State, project, poolName, volumeName, volumeDescrip
}
// Load storage pool the volume will be attached to.
- poolID, poolStruct, err := s.Cluster.StoragePoolGet(poolName)
+ poolID, poolStruct, err := s.Cluster.GetStoragePool(poolName)
if err != nil {
return err
}
diff --git a/lxd/storage_pools.go b/lxd/storage_pools.go
index 17752986ee..25ea9a1cb1 100644
--- a/lxd/storage_pools.go
+++ b/lxd/storage_pools.go
@@ -55,7 +55,7 @@ func storagePoolsGet(d *Daemon, r *http.Request) response.Response {
if !recursion {
resultString = append(resultString, fmt.Sprintf("/%s/storage-pools/%s", version.APIVersion, pool))
} else {
- plID, pl, err := d.cluster.StoragePoolGet(pool)
+ plID, pl, err := d.cluster.GetStoragePool(pool)
if err != nil {
continue
}
@@ -295,7 +295,7 @@ func storagePoolGet(d *Daemon, r *http.Request) response.Response {
poolName := mux.Vars(r)["name"]
// Get the existing storage pool.
- poolID, pool, err := d.cluster.StoragePoolGet(poolName)
+ poolID, pool, err := d.cluster.GetStoragePool(poolName)
if err != nil {
return response.SmartError(err)
}
@@ -333,7 +333,7 @@ func storagePoolPut(d *Daemon, r *http.Request) response.Response {
poolName := mux.Vars(r)["name"]
// Get the existing storage pool.
- _, dbInfo, err := d.cluster.StoragePoolGet(poolName)
+ _, dbInfo, err := d.cluster.GetStoragePool(poolName)
if err != nil {
return response.SmartError(err)
}
@@ -408,7 +408,7 @@ func storagePoolPatch(d *Daemon, r *http.Request) response.Response {
poolName := mux.Vars(r)["name"]
// Get the existing network
- _, dbInfo, err := d.cluster.StoragePoolGet(poolName)
+ _, dbInfo, err := d.cluster.GetStoragePool(poolName)
if err != nil {
return response.SmartError(err)
}
@@ -547,7 +547,7 @@ func storagePoolDelete(d *Daemon, r *http.Request) response.Response {
// Check if the pool is pending, if so we just need to delete it from
// the database.
- _, dbPool, err := d.cluster.StoragePoolGet(poolName)
+ _, dbPool, err := d.cluster.GetStoragePool(poolName)
if err != nil {
return response.SmartError(err)
}
diff --git a/lxd/storage_volumes.go b/lxd/storage_volumes.go
index 86146d8e6d..f77f46197c 100644
--- a/lxd/storage_volumes.go
+++ b/lxd/storage_volumes.go
@@ -1108,7 +1108,7 @@ func storagePoolVolumeTypeDelete(d *Daemon, r *http.Request, volumeTypeName stri
return resp
}
- poolID, _, err := d.cluster.StoragePoolGet(poolName)
+ poolID, _, err := d.cluster.GetStoragePool(poolName)
if err != nil {
return response.SmartError(err)
}
diff --git a/lxd/storage_volumes_snapshot.go b/lxd/storage_volumes_snapshot.go
index b97b7949dc..c63b5eef5e 100644
--- a/lxd/storage_volumes_snapshot.go
+++ b/lxd/storage_volumes_snapshot.go
@@ -292,7 +292,7 @@ func storagePoolVolumeSnapshotTypePost(d *Daemon, r *http.Request) response.Resp
return resp
}
- poolID, _, err := d.cluster.StoragePoolGet(poolName)
+ poolID, _, err := d.cluster.GetStoragePool(poolName)
if err != nil {
return response.SmartError(err)
}
@@ -358,7 +358,7 @@ func storagePoolVolumeSnapshotTypeGet(d *Daemon, r *http.Request) response.Respo
return resp
}
- poolID, _, err := d.cluster.StoragePoolGet(poolName)
+ poolID, _, err := d.cluster.GetStoragePool(poolName)
if err != nil {
return response.SmartError(err)
}
@@ -426,7 +426,7 @@ func storagePoolVolumeSnapshotTypePut(d *Daemon, r *http.Request) response.Respo
return resp
}
- poolID, _, err := d.cluster.StoragePoolGet(poolName)
+ poolID, _, err := d.cluster.GetStoragePool(poolName)
if err != nil {
return response.SmartError(err)
}
@@ -522,7 +522,7 @@ func storagePoolVolumeSnapshotTypeDelete(d *Daemon, r *http.Request) response.Re
return resp
}
- poolID, _, err := d.cluster.StoragePoolGet(poolName)
+ poolID, _, err := d.cluster.GetStoragePool(poolName)
if err != nil {
return response.SmartError(err)
}
From c6bf4746c3251349078fef89e5dc17156928beb8 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Wed, 6 May 2020 15:29:24 +0100
Subject: [PATCH 26/43] lxd/db: Rename StoragePoolConfigGet to
getStoragePoolConfig
Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
lxd/db/storage_pools.go | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/lxd/db/storage_pools.go b/lxd/db/storage_pools.go
index 34958e195a..1c593339a8 100644
--- a/lxd/db/storage_pools.go
+++ b/lxd/db/storage_pools.go
@@ -493,7 +493,7 @@ func (c *Cluster) GetStoragePool(poolName string) (int64, *api.StoragePool, erro
return -1, nil, err
}
- config, err := c.StoragePoolConfigGet(poolID)
+ config, err := c.getStoragePoolConfig(poolID)
if err != nil {
return -1, nil, err
}
@@ -542,8 +542,8 @@ SELECT nodes.name FROM nodes
return nodes, nil
}
-// StoragePoolConfigGet returns the config of a storage pool.
-func (c *Cluster) StoragePoolConfigGet(poolID int64) (map[string]string, error) {
+// Return the config of a storage pool.
+func (c *Cluster) getStoragePoolConfig(poolID int64) (map[string]string, error) {
var key, value string
query := "SELECT key, value FROM storage_pools_config WHERE storage_pool_id=? AND (node_id=? OR node_id IS NULL)"
inargs := []interface{}{poolID, c.nodeID}
From e859a92281d6cb9ebb0d67463042892ee580392d Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Wed, 6 May 2020 15:30:32 +0100
Subject: [PATCH 27/43] lxd/db: Rename StoragePoolCreate to CreateStoragePool
Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
lxd/db/containers_test.go | 2 +-
lxd/db/storage_pools.go | 4 ++--
lxd/db/storage_pools_test.go | 8 ++++----
lxd/storage_pools_utils.go | 2 +-
4 files changed, 8 insertions(+), 8 deletions(-)
diff --git a/lxd/db/containers_test.go b/lxd/db/containers_test.go
index 48068ec769..bf4c087b89 100644
--- a/lxd/db/containers_test.go
+++ b/lxd/db/containers_test.go
@@ -353,7 +353,7 @@ func TestGetInstancePool(t *testing.T) {
cluster, cleanup := db.NewTestCluster(t)
defer cleanup()
- poolID, err := cluster.StoragePoolCreate("default", "", "dir", nil)
+ poolID, err := cluster.CreateStoragePool("default", "", "dir", nil)
require.NoError(t, err)
_, err = cluster.StoragePoolVolumeCreate("default", "c1", "", db.StoragePoolVolumeTypeContainer, poolID, nil)
require.NoError(t, err)
diff --git a/lxd/db/storage_pools.go b/lxd/db/storage_pools.go
index 1c593339a8..f7d8c389f6 100644
--- a/lxd/db/storage_pools.go
+++ b/lxd/db/storage_pools.go
@@ -566,8 +566,8 @@ func (c *Cluster) getStoragePoolConfig(poolID int64) (map[string]string, error)
return config, nil
}
-// StoragePoolCreate creates new storage pool.
-func (c *Cluster) StoragePoolCreate(poolName string, poolDescription string, poolDriver string, poolConfig map[string]string) (int64, error) {
+// CreateStoragePool creates new storage pool.
+func (c *Cluster) CreateStoragePool(poolName string, poolDescription string, poolDriver string, poolConfig map[string]string) (int64, error) {
var id int64
err := c.Transaction(func(tx *ClusterTx) error {
result, err := tx.tx.Exec("INSERT INTO storage_pools (name, description, driver, state) VALUES (?, ?, ?, ?)", poolName, poolDescription, poolDriver, storagePoolCreated)
diff --git a/lxd/db/storage_pools_test.go b/lxd/db/storage_pools_test.go
index 4a1abc3283..560578c7b4 100644
--- a/lxd/db/storage_pools_test.go
+++ b/lxd/db/storage_pools_test.go
@@ -18,7 +18,7 @@ func TestGetStoragePoolsLocalConfigs(t *testing.T) {
// Create a storage pool named "local" (like the default LXD clustering
// one), then delete it and create another one.
- _, err := cluster.StoragePoolCreate("local", "", "dir", map[string]string{
+ _, err := cluster.CreateStoragePool("local", "", "dir", map[string]string{
"rsync.bwlimit": "1",
"source": "/foo/bar",
})
@@ -27,7 +27,7 @@ func TestGetStoragePoolsLocalConfigs(t *testing.T) {
_, err = cluster.StoragePoolDelete("local")
require.NoError(t, err)
- _, err = cluster.StoragePoolCreate("BTRFS", "", "dir", map[string]string{
+ _, err = cluster.CreateStoragePool("BTRFS", "", "dir", map[string]string{
"rsync.bwlimit": "1",
"source": "/egg/baz",
})
@@ -164,7 +164,7 @@ func TestStoragePoolVolume_Ceph(t *testing.T) {
})
require.NoError(t, err)
- poolID, err := cluster.StoragePoolCreate("p1", "", "ceph", nil)
+ poolID, err := cluster.CreateStoragePool("p1", "", "ceph", nil)
require.NoError(t, err)
config := map[string]string{"k": "v"}
@@ -217,7 +217,7 @@ func TestStoragePoolVolumeCreate_Snapshot(t *testing.T) {
cluster, cleanup := db.NewTestCluster(t)
defer cleanup()
- poolID, err := cluster.StoragePoolCreate("p1", "", "dir", nil)
+ poolID, err := cluster.CreateStoragePool("p1", "", "dir", nil)
require.NoError(t, err)
config := map[string]string{"k": "v"}
diff --git a/lxd/storage_pools_utils.go b/lxd/storage_pools_utils.go
index 8076c2712b..3e2e92a335 100644
--- a/lxd/storage_pools_utils.go
+++ b/lxd/storage_pools_utils.go
@@ -242,7 +242,7 @@ func storagePoolCreateLocal(state *state.State, id int64, req api.StoragePoolsPo
// Helper around the low-level DB API, which also updates the driver names cache.
func dbStoragePoolCreateAndUpdateCache(s *state.State, poolName string, poolDescription string, poolDriver string, poolConfig map[string]string) (int64, error) {
- id, err := s.Cluster.StoragePoolCreate(poolName, poolDescription, poolDriver, poolConfig)
+ id, err := s.Cluster.CreateStoragePool(poolName, poolDescription, poolDriver, poolConfig)
if err != nil {
return id, err
}
From fa4c222d5f466f8683c7ec38983f52d27e029e07 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Wed, 6 May 2020 15:31:50 +0100
Subject: [PATCH 28/43] lxd/db: Rename StoragePoolUpdate to UpdateStoragePool
Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
lxd/db/storage_pools.go | 10 +++++-----
lxd/patches.go | 16 ++++++++--------
lxd/storage/backend_lxd.go | 2 +-
lxd/storage_pools_utils.go | 2 +-
4 files changed, 15 insertions(+), 15 deletions(-)
diff --git a/lxd/db/storage_pools.go b/lxd/db/storage_pools.go
index f7d8c389f6..22f71106e5 100644
--- a/lxd/db/storage_pools.go
+++ b/lxd/db/storage_pools.go
@@ -647,15 +647,15 @@ func storagePoolDriverGet(tx *sql.Tx, id int64) (string, error) {
}
}
-// StoragePoolUpdate updates a storage pool.
-func (c *Cluster) StoragePoolUpdate(poolName, description string, poolConfig map[string]string) error {
+// UpdateStoragePool updates a storage pool.
+func (c *Cluster) UpdateStoragePool(poolName, description string, poolConfig map[string]string) error {
poolID, _, err := c.GetStoragePool(poolName)
if err != nil {
return err
}
err = c.Transaction(func(tx *ClusterTx) error {
- err = StoragePoolUpdateDescription(tx.tx, poolID, description)
+ err = updateStoragePoolDescription(tx.tx, poolID, description)
if err != nil {
return err
}
@@ -675,8 +675,8 @@ func (c *Cluster) StoragePoolUpdate(poolName, description string, poolConfig map
return err
}
-// StoragePoolUpdateDescription updates the storage pool description.
-func StoragePoolUpdateDescription(tx *sql.Tx, id int64, description string) error {
+// Uupdate the storage pool description.
+func updateStoragePoolDescription(tx *sql.Tx, id int64, description string) error {
_, err := tx.Exec("UPDATE storage_pools SET description=? WHERE id=?", description, id)
return err
}
diff --git a/lxd/patches.go b/lxd/patches.go
index a38831644f..fe934f3be1 100644
--- a/lxd/patches.go
+++ b/lxd/patches.go
@@ -507,7 +507,7 @@ func upgradeFromStorageTypeBtrfs(name string, d *Daemon, defaultPoolName string,
if pool.Config == nil {
pool.Config = poolConfig
}
- err = d.cluster.StoragePoolUpdate(defaultPoolName, "", pool.Config)
+ err = d.cluster.UpdateStoragePool(defaultPoolName, "", pool.Config)
if err != nil {
return err
}
@@ -807,7 +807,7 @@ func upgradeFromStorageTypeDir(name string, d *Daemon, defaultPoolName string, d
if pool.Config == nil {
pool.Config = poolConfig
}
- err = d.cluster.StoragePoolUpdate(defaultPoolName, pool.Description, pool.Config)
+ err = d.cluster.UpdateStoragePool(defaultPoolName, pool.Description, pool.Config)
if err != nil {
return err
}
@@ -1109,7 +1109,7 @@ func upgradeFromStorageTypeLvm(name string, d *Daemon, defaultPoolName string, d
if pool.Config == nil {
pool.Config = poolConfig
}
- err = d.cluster.StoragePoolUpdate(defaultPoolName, pool.Description, pool.Config)
+ err = d.cluster.UpdateStoragePool(defaultPoolName, pool.Description, pool.Config)
if err != nil {
return err
}
@@ -1627,7 +1627,7 @@ func upgradeFromStorageTypeZfs(name string, d *Daemon, defaultPoolName string, d
if pool.Config == nil {
pool.Config = poolConfig
}
- err = d.cluster.StoragePoolUpdate(poolName, pool.Description, pool.Config)
+ err = d.cluster.UpdateStoragePool(poolName, pool.Description, pool.Config)
if err != nil {
return err
}
@@ -2236,7 +2236,7 @@ func patchStorageApiKeys(name string, d *Daemon) error {
}
// Update the config in the database.
- err = d.cluster.StoragePoolUpdate(poolName, pool.Description, pool.Config)
+ err = d.cluster.UpdateStoragePool(poolName, pool.Description, pool.Config)
if err != nil {
return err
}
@@ -2324,7 +2324,7 @@ func patchStorageApiUpdateStorageConfigs(name string, d *Daemon) error {
}
// Update the storage pool config.
- err = d.cluster.StoragePoolUpdate(poolName, pool.Description, pool.Config)
+ err = d.cluster.UpdateStoragePool(poolName, pool.Description, pool.Config)
if err != nil {
return err
}
@@ -2441,7 +2441,7 @@ func patchStorageApiLxdOnBtrfs(name string, d *Daemon) error {
pool.Config["source"] = driver.GetStoragePoolMountPoint(poolName)
// Update the storage pool config.
- err = d.cluster.StoragePoolUpdate(poolName, pool.Description, pool.Config)
+ err = d.cluster.UpdateStoragePool(poolName, pool.Description, pool.Config)
if err != nil {
return err
}
@@ -2811,7 +2811,7 @@ func patchStorageApiCephSizeRemove(name string, d *Daemon) error {
}
// Update the config in the database.
- err = d.cluster.StoragePoolUpdate(poolName, pool.Description,
+ err = d.cluster.UpdateStoragePool(poolName, pool.Description,
pool.Config)
if err != nil {
return err
diff --git a/lxd/storage/backend_lxd.go b/lxd/storage/backend_lxd.go
index d73e24d334..d0d021ec04 100644
--- a/lxd/storage/backend_lxd.go
+++ b/lxd/storage/backend_lxd.go
@@ -193,7 +193,7 @@ func (b *lxdBackend) Update(driverOnly bool, newDesc string, newConfig map[strin
// Update the database if something changed.
if len(changedConfig) != 0 || newDesc != b.db.Description {
- err = b.state.Cluster.StoragePoolUpdate(b.name, newDesc, newConfig)
+ err = b.state.Cluster.UpdateStoragePool(b.name, newDesc, newConfig)
if err != nil {
return err
}
diff --git a/lxd/storage_pools_utils.go b/lxd/storage_pools_utils.go
index 3e2e92a335..7cd82b626f 100644
--- a/lxd/storage_pools_utils.go
+++ b/lxd/storage_pools_utils.go
@@ -228,7 +228,7 @@ func storagePoolCreateLocal(state *state.State, id int64, req api.StoragePoolsPo
configDiff, _ := storagePools.ConfigDiff(req.Config, updatedConfig)
if len(configDiff) > 0 {
// Create the database entry for the storage pool.
- err = state.Cluster.StoragePoolUpdate(req.Name, req.Description, updatedConfig)
+ err = state.Cluster.UpdateStoragePool(req.Name, req.Description, updatedConfig)
if err != nil {
return nil, fmt.Errorf("Error inserting %s into database: %s", req.Name, err)
}
From 2f71846bfbad46e8b6def98d770ed35c70017217 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Wed, 6 May 2020 15:32:51 +0100
Subject: [PATCH 29/43] lxd/db: Rename StoragePoolConfigClear to
clearStoragePoolConfig
Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
lxd/db/storage_pools.go | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/lxd/db/storage_pools.go b/lxd/db/storage_pools.go
index 22f71106e5..dba66905b9 100644
--- a/lxd/db/storage_pools.go
+++ b/lxd/db/storage_pools.go
@@ -660,7 +660,7 @@ func (c *Cluster) UpdateStoragePool(poolName, description string, poolConfig map
return err
}
- err = StoragePoolConfigClear(tx.tx, poolID, c.nodeID)
+ err = clearStoragePoolConfig(tx.tx, poolID, c.nodeID)
if err != nil {
return err
}
@@ -681,8 +681,8 @@ func updateStoragePoolDescription(tx *sql.Tx, id int64, description string) erro
return err
}
-// StoragePoolConfigClear deletes the storage pool config.
-func StoragePoolConfigClear(tx *sql.Tx, poolID, nodeID int64) error {
+// Delete the storage pool config.
+func clearStoragePoolConfig(tx *sql.Tx, poolID, nodeID int64) error {
_, err := tx.Exec("DELETE FROM storage_pools_config WHERE storage_pool_id=? AND (node_id=? OR node_id IS NULL)", poolID, nodeID)
if err != nil {
return err
From 1ccbe158e07201117c6ffd06ad94b6a9c2fe02fe Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Wed, 6 May 2020 15:33:39 +0100
Subject: [PATCH 30/43] lxd/db: Rename StoragePoolDelete to RemoveStoragePool
Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
lxd/db/storage_pools.go | 4 ++--
lxd/db/storage_pools_test.go | 2 +-
lxd/storage_pools.go | 2 +-
lxd/storage_pools_utils.go | 2 +-
4 files changed, 5 insertions(+), 5 deletions(-)
diff --git a/lxd/db/storage_pools.go b/lxd/db/storage_pools.go
index dba66905b9..f972673ed2 100644
--- a/lxd/db/storage_pools.go
+++ b/lxd/db/storage_pools.go
@@ -691,8 +691,8 @@ func clearStoragePoolConfig(tx *sql.Tx, poolID, nodeID int64) error {
return nil
}
-// StoragePoolDelete deletes storage pool.
-func (c *Cluster) StoragePoolDelete(poolName string) (*api.StoragePool, error) {
+// RemoveStoragePool deletes storage pool.
+func (c *Cluster) RemoveStoragePool(poolName string) (*api.StoragePool, error) {
poolID, pool, err := c.GetStoragePool(poolName)
if err != nil {
return nil, err
diff --git a/lxd/db/storage_pools_test.go b/lxd/db/storage_pools_test.go
index 560578c7b4..940ac7f3d6 100644
--- a/lxd/db/storage_pools_test.go
+++ b/lxd/db/storage_pools_test.go
@@ -24,7 +24,7 @@ func TestGetStoragePoolsLocalConfigs(t *testing.T) {
})
require.NoError(t, err)
- _, err = cluster.StoragePoolDelete("local")
+ _, err = cluster.RemoveStoragePool("local")
require.NoError(t, err)
_, err = cluster.CreateStoragePool("BTRFS", "", "dir", map[string]string{
diff --git a/lxd/storage_pools.go b/lxd/storage_pools.go
index 25ea9a1cb1..eab0f2ded7 100644
--- a/lxd/storage_pools.go
+++ b/lxd/storage_pools.go
@@ -553,7 +553,7 @@ func storagePoolDelete(d *Daemon, r *http.Request) response.Response {
}
if dbPool.Status == "Pending" {
- _, err := d.cluster.StoragePoolDelete(poolName)
+ _, err := d.cluster.RemoveStoragePool(poolName)
if err != nil {
return response.SmartError(err)
}
diff --git a/lxd/storage_pools_utils.go b/lxd/storage_pools_utils.go
index 7cd82b626f..2e34df8b87 100644
--- a/lxd/storage_pools_utils.go
+++ b/lxd/storage_pools_utils.go
@@ -256,7 +256,7 @@ func dbStoragePoolCreateAndUpdateCache(s *state.State, poolName string, poolDesc
// Helper around the low-level DB API, which also updates the driver names
// cache.
func dbStoragePoolDeleteAndUpdateCache(s *state.State, poolName string) error {
- _, err := s.Cluster.StoragePoolDelete(poolName)
+ _, err := s.Cluster.RemoveStoragePool(poolName)
if err != nil {
return err
}
From bd27eba824a70c20e31580acee8c8bc3449e0410 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Wed, 6 May 2020 15:35:06 +0100
Subject: [PATCH 31/43] lxd/db: Rename StoragePoolVolumesGetNames to
GetStoragePoolVolumesNames
Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
lxd/db/storage_pools.go | 4 ++--
lxd/storage_pools.go | 4 ++--
2 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/lxd/db/storage_pools.go b/lxd/db/storage_pools.go
index f972673ed2..29b286d09a 100644
--- a/lxd/db/storage_pools.go
+++ b/lxd/db/storage_pools.go
@@ -706,9 +706,9 @@ func (c *Cluster) RemoveStoragePool(poolName string) (*api.StoragePool, error) {
return pool, nil
}
-// StoragePoolVolumesGetNames gets the names of all storage volumes attached to
+// GetStoragePoolVolumesNames gets the names of all storage volumes attached to
// a given storage pool.
-func (c *Cluster) StoragePoolVolumesGetNames(poolID int64) ([]string, error) {
+func (c *Cluster) GetStoragePoolVolumesNames(poolID int64) ([]string, error) {
var volumeName string
query := "SELECT name FROM storage_volumes_all WHERE storage_pool_id=? AND node_id=?"
inargs := []interface{}{poolID, c.nodeID}
diff --git a/lxd/storage_pools.go b/lxd/storage_pools.go
index eab0f2ded7..4059b8d9bb 100644
--- a/lxd/storage_pools.go
+++ b/lxd/storage_pools.go
@@ -560,7 +560,7 @@ func storagePoolDelete(d *Daemon, r *http.Request) response.Response {
return response.EmptySyncResponse
}
- volumeNames, err := d.cluster.StoragePoolVolumesGetNames(poolID)
+ volumeNames, err := d.cluster.GetStoragePoolVolumesNames(poolID)
if err != nil {
return response.InternalError(err)
}
@@ -627,7 +627,7 @@ func storagePoolDelete(d *Daemon, r *http.Request) response.Response {
}
func storagePoolDeleteCheckPreconditions(cluster *db.Cluster, poolName string, poolID int64) response.Response {
- volumeNames, err := cluster.StoragePoolVolumesGetNames(poolID)
+ volumeNames, err := cluster.GetStoragePoolVolumesNames(poolID)
if err != nil {
return response.InternalError(err)
}
From 3f9a32a5728997f28bbaf27ac5f4ba2a5dc8f661 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Wed, 6 May 2020 15:36:30 +0100
Subject: [PATCH 32/43] lxd/db: Rename StoragePoolVolumesGetAllByType to
GetStoragePoolVolumesWithType
Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
lxd/db/storage_pools.go | 4 ++--
lxd/main_activateifneeded.go | 2 +-
lxd/storage_volumes_snapshot.go | 2 +-
3 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/lxd/db/storage_pools.go b/lxd/db/storage_pools.go
index 29b286d09a..3d5ba9514c 100644
--- a/lxd/db/storage_pools.go
+++ b/lxd/db/storage_pools.go
@@ -728,8 +728,8 @@ func (c *Cluster) GetStoragePoolVolumesNames(poolID int64) ([]string, error) {
return out, nil
}
-// StoragePoolVolumesGetAllByType return a list of volumes by type.
-func (c *Cluster) StoragePoolVolumesGetAllByType(volumeType int) ([]StorageVolumeArgs, error) {
+// GetStoragePoolVolumesWithType return a list of all volumes of the given type.
+func (c *Cluster) GetStoragePoolVolumesWithType(volumeType int) ([]StorageVolumeArgs, error) {
var id int64
var name string
var description string
diff --git a/lxd/main_activateifneeded.go b/lxd/main_activateifneeded.go
index 94a7a0eb93..9197ff9b5c 100644
--- a/lxd/main_activateifneeded.go
+++ b/lxd/main_activateifneeded.go
@@ -143,7 +143,7 @@ func (c *cmdActivateifneeded) Run(cmd *cobra.Command, args []string) error {
}
// Check for scheduled volume snapshots
- volumes, err := d.cluster.StoragePoolVolumesGetAllByType(db.StoragePoolVolumeTypeCustom)
+ volumes, err := d.cluster.GetStoragePoolVolumesWithType(db.StoragePoolVolumeTypeCustom)
if err != nil {
return err
}
diff --git a/lxd/storage_volumes_snapshot.go b/lxd/storage_volumes_snapshot.go
index c63b5eef5e..0a33395a28 100644
--- a/lxd/storage_volumes_snapshot.go
+++ b/lxd/storage_volumes_snapshot.go
@@ -619,7 +619,7 @@ func pruneExpiredCustomVolumeSnapshots(ctx context.Context, d *Daemon, expiredSn
func autoCreateCustomVolumeSnapshotsTask(d *Daemon) (task.Func, task.Schedule) {
f := func(ctx context.Context) {
- allVolumes, err := d.cluster.StoragePoolVolumesGetAllByType(db.StoragePoolVolumeTypeCustom)
+ allVolumes, err := d.cluster.GetStoragePoolVolumesWithType(db.StoragePoolVolumeTypeCustom)
if err != nil {
return
}
From 9523b00dc41c68842ec7b95ffa42a9009bb1b142 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Wed, 6 May 2020 15:37:29 +0100
Subject: [PATCH 33/43] lxd/db: Rename StoragePoolVolumesGet to
GetStoragePoolVolumes
Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
lxd/db/storage_pools.go | 4 ++--
lxd/storage_pools.go | 2 +-
lxd/storage_volumes.go | 6 +++---
3 files changed, 6 insertions(+), 6 deletions(-)
diff --git a/lxd/db/storage_pools.go b/lxd/db/storage_pools.go
index 3d5ba9514c..d0a9729f12 100644
--- a/lxd/db/storage_pools.go
+++ b/lxd/db/storage_pools.go
@@ -774,9 +774,9 @@ WHERE storage_volumes.type = ?
return response, nil
}
-// StoragePoolVolumesGet returns all storage volumes attached to a given
+// GetStoragePoolVolumes returns all storage volumes attached to a given
// storage pool on any node.
-func (c *Cluster) StoragePoolVolumesGet(project string, poolID int64, volumeTypes []int) ([]*api.StorageVolume, error) {
+func (c *Cluster) GetStoragePoolVolumes(project string, poolID int64, volumeTypes []int) ([]*api.StorageVolume, error) {
var nodeIDs []int
err := c.Transaction(func(tx *ClusterTx) error {
diff --git a/lxd/storage_pools.go b/lxd/storage_pools.go
index 4059b8d9bb..674288011e 100644
--- a/lxd/storage_pools.go
+++ b/lxd/storage_pools.go
@@ -644,7 +644,7 @@ func storagePoolDeleteCheckPreconditions(cluster *db.Cluster, poolName string, p
if len(volumeNames) > 0 {
for _, project := range projects {
- volumes, err := cluster.StoragePoolVolumesGet(project, poolID, supportedVolumeTypes)
+ volumes, err := cluster.GetStoragePoolVolumes(project, poolID, supportedVolumeTypes)
if err != nil {
return response.InternalError(err)
}
diff --git a/lxd/storage_volumes.go b/lxd/storage_volumes.go
index f77f46197c..e6e8658d3a 100644
--- a/lxd/storage_volumes.go
+++ b/lxd/storage_volumes.go
@@ -96,7 +96,7 @@ func storagePoolVolumesGet(d *Daemon, r *http.Request) response.Response {
}
// Get all instance volumes currently attached to the storage pool by ID of the pool and project.
- volumes, err := d.cluster.StoragePoolVolumesGet(projectName, poolID, supportedVolumeTypesInstances)
+ volumes, err := d.cluster.GetStoragePoolVolumes(projectName, poolID, supportedVolumeTypesInstances)
if err != nil && err != db.ErrNoSuchObject {
return response.SmartError(err)
}
@@ -109,7 +109,7 @@ func storagePoolVolumesGet(d *Daemon, r *http.Request) response.Response {
}
// Get all custom volumes currently attached to the storage pool by ID of the pool and project.
- custVolumes, err := d.cluster.StoragePoolVolumesGet(customVolProjectName, poolID, []int{db.StoragePoolVolumeTypeCustom})
+ custVolumes, err := d.cluster.GetStoragePoolVolumes(customVolProjectName, poolID, []int{db.StoragePoolVolumeTypeCustom})
if err != nil && err != db.ErrNoSuchObject {
return response.SmartError(err)
}
@@ -122,7 +122,7 @@ func storagePoolVolumesGet(d *Daemon, r *http.Request) response.Response {
// table, but are effectively a cache which is not tied to projects, so we always link the to the default
// project. This means that we want to filter image volumes and return only the ones that have fingerprint
// matching images actually in use by the project.
- imageVolumes, err := d.cluster.StoragePoolVolumesGet(project.Default, poolID, []int{db.StoragePoolVolumeTypeImage})
+ imageVolumes, err := d.cluster.GetStoragePoolVolumes(project.Default, poolID, []int{db.StoragePoolVolumeTypeImage})
if err != nil && err != db.ErrNoSuchObject {
return response.SmartError(err)
}
From d07b91cacda09984f3fdebfd64fcece7671ee1e4 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Wed, 6 May 2020 15:41:52 +0100
Subject: [PATCH 34/43] lxd/db: Rename StoragePoolNodeVolumesGet to
GetLocalStoragePoolVolumes
Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
lxd/db/migration_test.go | 2 +-
lxd/db/storage_pools.go | 4 ++--
lxd/patches.go | 6 +++---
lxd/storage/backend_lxd_patches.go | 2 +-
lxd/storage_pools_utils.go | 2 +-
5 files changed, 8 insertions(+), 8 deletions(-)
diff --git a/lxd/db/migration_test.go b/lxd/db/migration_test.go
index 0f77c8e7c2..943a6baaa2 100644
--- a/lxd/db/migration_test.go
+++ b/lxd/db/migration_test.go
@@ -105,7 +105,7 @@ func TestImportPreClusteringData(t *testing.T) {
assert.Equal(t, "true", pool.Config["zfs.clone_copy"])
assert.Equal(t, "Created", pool.Status)
assert.Equal(t, []string{"none"}, pool.Locations)
- volumes, err := cluster.StoragePoolNodeVolumesGet("default", id, []int{1})
+ volumes, err := cluster.GetLocalStoragePoolVolumes("default", id, []int{1})
require.NoError(t, err)
assert.Len(t, volumes, 1)
assert.Equal(t, "/foo/bar", volumes[0].Config["source"])
diff --git a/lxd/db/storage_pools.go b/lxd/db/storage_pools.go
index d0a9729f12..cac024d54c 100644
--- a/lxd/db/storage_pools.go
+++ b/lxd/db/storage_pools.go
@@ -804,9 +804,9 @@ SELECT DISTINCT node_id
return volumes, nil
}
-// StoragePoolNodeVolumesGet returns all storage volumes attached to a given
+// GetLocalStoragePoolVolumes returns all storage volumes attached to a given
// storage pool on the current node.
-func (c *Cluster) StoragePoolNodeVolumesGet(project string, poolID int64, volumeTypes []int) ([]*api.StorageVolume, error) {
+func (c *Cluster) GetLocalStoragePoolVolumes(project string, poolID int64, volumeTypes []int) ([]*api.StorageVolume, error) {
return c.storagePoolVolumesGet(project, poolID, c.nodeID, volumeTypes)
}
diff --git a/lxd/patches.go b/lxd/patches.go
index fe934f3be1..9369789443 100644
--- a/lxd/patches.go
+++ b/lxd/patches.go
@@ -2330,7 +2330,7 @@ func patchStorageApiUpdateStorageConfigs(name string, d *Daemon) error {
}
// Get all storage volumes on the storage pool.
- volumes, err := d.cluster.StoragePoolNodeVolumesGet("default", poolID, supportedVolumeTypes)
+ volumes, err := d.cluster.GetLocalStoragePoolVolumes("default", poolID, supportedVolumeTypes)
if err != nil {
if err == db.ErrNoSuchObject {
continue
@@ -2486,7 +2486,7 @@ func patchStorageApiDetectLVSize(name string, d *Daemon) error {
}
// Get all storage volumes on the storage pool.
- volumes, err := d.cluster.StoragePoolNodeVolumesGet("default", poolID, supportedVolumeTypes)
+ volumes, err := d.cluster.GetLocalStoragePoolVolumes("default", poolID, supportedVolumeTypes)
if err != nil {
if err == db.ErrNoSuchObject {
continue
@@ -2635,7 +2635,7 @@ func patchStorageZFSVolumeSize(name string, d *Daemon) error {
}
// Get all storage volumes on the storage pool.
- volumes, err := d.cluster.StoragePoolNodeVolumesGet("default", poolID, supportedVolumeTypes)
+ volumes, err := d.cluster.GetLocalStoragePoolVolumes("default", poolID, supportedVolumeTypes)
if err != nil {
if err == db.ErrNoSuchObject {
continue
diff --git a/lxd/storage/backend_lxd_patches.go b/lxd/storage/backend_lxd_patches.go
index 99b45d9a00..ef5e373292 100644
--- a/lxd/storage/backend_lxd_patches.go
+++ b/lxd/storage/backend_lxd_patches.go
@@ -34,7 +34,7 @@ func lxdPatchStorageCreateVM(b *lxdBackend) error {
func lxdPatchStorageRenameCustomVolumeAddProject(b *lxdBackend) error {
// Get all custom volumes in default project on this node.
// At this time, all custom volumes are in the default project.
- volumes, err := b.state.Cluster.StoragePoolNodeVolumesGet(project.Default, b.ID(), []int{db.StoragePoolVolumeTypeCustom})
+ volumes, err := b.state.Cluster.GetLocalStoragePoolVolumes(project.Default, b.ID(), []int{db.StoragePoolVolumeTypeCustom})
if err != nil && err != db.ErrNoSuchObject {
return errors.Wrapf(err, "Failed getting custom volumes for default project")
}
diff --git a/lxd/storage_pools_utils.go b/lxd/storage_pools_utils.go
index 2e34df8b87..2594ccb543 100644
--- a/lxd/storage_pools_utils.go
+++ b/lxd/storage_pools_utils.go
@@ -29,7 +29,7 @@ func storagePoolUpdate(state *state.State, name, newDescription string, newConfi
// /1.0/profiles/default
func storagePoolUsedByGet(state *state.State, project string, poolID int64, poolName string) ([]string, error) {
// Retrieve all non-custom volumes that exist on this storage pool.
- volumes, err := state.Cluster.StoragePoolNodeVolumesGet(project, poolID, []int{db.StoragePoolVolumeTypeContainer, db.StoragePoolVolumeTypeImage, db.StoragePoolVolumeTypeCustom, db.StoragePoolVolumeTypeVM})
+ volumes, err := state.Cluster.GetLocalStoragePoolVolumes(project, poolID, []int{db.StoragePoolVolumeTypeContainer, db.StoragePoolVolumeTypeImage, db.StoragePoolVolumeTypeCustom, db.StoragePoolVolumeTypeVM})
if err != nil && err != db.ErrNoSuchObject {
return []string{}, err
}
From 93d1f5fa51042361f953a2bc9a7b96a8867420cd Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Wed, 6 May 2020 15:43:54 +0100
Subject: [PATCH 35/43] lxd/db: Rename StoragePoolVolumeSnapshotsGetType to
GetLocalStoragePoolVolumeSnapshotsWithType
Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
lxd/daemon_storage.go | 2 +-
lxd/db/storage_pools.go | 6 +++---
lxd/storage/utils.go | 2 +-
lxd/storage_volumes_snapshot.go | 4 ++--
4 files changed, 7 insertions(+), 7 deletions(-)
diff --git a/lxd/daemon_storage.go b/lxd/daemon_storage.go
index 33b82050ee..8b5e56f9a2 100644
--- a/lxd/daemon_storage.go
+++ b/lxd/daemon_storage.go
@@ -109,7 +109,7 @@ func daemonStorageValidate(s *state.State, target string) error {
return errors.Wrapf(err, "Unable to load storage volume %q", target)
}
- snapshots, err := s.Cluster.StoragePoolVolumeSnapshotsGetType(project.Default, volumeName, db.StoragePoolVolumeTypeCustom, poolID)
+ snapshots, err := s.Cluster.GetLocalStoragePoolVolumeSnapshotsWithType(project.Default, volumeName, db.StoragePoolVolumeTypeCustom, poolID)
if err != nil {
return errors.Wrapf(err, "Unable to load storage volume snapshots %q", target)
}
diff --git a/lxd/db/storage_pools.go b/lxd/db/storage_pools.go
index cac024d54c..5b38c9f29a 100644
--- a/lxd/db/storage_pools.go
+++ b/lxd/db/storage_pools.go
@@ -866,10 +866,10 @@ SELECT storage_volumes_all.name
return response, nil
}
-// StoragePoolVolumeSnapshotsGetType get all snapshots of a storage volume
-// attached to a given storage pool of a given volume type, on the given node.
+// GetLocalStoragePoolVolumeSnapshotsWithType get all snapshots of a storage volume
+// attached to a given storage pool of a given volume type, on the local node.
// Returns snapshots slice ordered by when they were created, oldest first.
-func (c *Cluster) StoragePoolVolumeSnapshotsGetType(projectName string, volumeName string, volumeType int, poolID int64) ([]StorageVolumeArgs, error) {
+func (c *Cluster) GetLocalStoragePoolVolumeSnapshotsWithType(projectName string, volumeName string, volumeType int, poolID int64) ([]StorageVolumeArgs, error) {
result := []StorageVolumeArgs{}
// ORDER BY id is important here as the users of this function can expect that the results
diff --git a/lxd/storage/utils.go b/lxd/storage/utils.go
index 7a044c5f9e..a69877042c 100644
--- a/lxd/storage/utils.go
+++ b/lxd/storage/utils.go
@@ -312,7 +312,7 @@ func VolumeSnapshotsGet(s *state.State, projectName string, pool string, volume
return nil, err
}
- snapshots, err := s.Cluster.StoragePoolVolumeSnapshotsGetType(projectName, volume, volType, poolID)
+ snapshots, err := s.Cluster.GetLocalStoragePoolVolumeSnapshotsWithType(projectName, volume, volType, poolID)
if err != nil {
return nil, err
}
diff --git a/lxd/storage_volumes_snapshot.go b/lxd/storage_volumes_snapshot.go
index 0a33395a28..5055923120 100644
--- a/lxd/storage_volumes_snapshot.go
+++ b/lxd/storage_volumes_snapshot.go
@@ -197,7 +197,7 @@ func storagePoolVolumeSnapshotsTypeGet(d *Daemon, r *http.Request) response.Resp
}
// Get the names of all storage volume snapshots of a given volume.
- volumes, err := d.cluster.StoragePoolVolumeSnapshotsGetType(projectName, volumeName, volumeType, poolID)
+ volumes, err := d.cluster.GetLocalStoragePoolVolumeSnapshotsWithType(projectName, volumeName, volumeType, poolID)
if err != nil {
return response.SmartError(err)
}
@@ -795,7 +795,7 @@ func volumeDetermineNextSnapshotName(d *Daemon, volume db.StorageVolumeArgs, def
}
for _, project := range projects {
- snaps, err := d.cluster.StoragePoolVolumeSnapshotsGetType(project, volume.Name, db.StoragePoolVolumeTypeCustom, poolID)
+ snaps, err := d.cluster.GetLocalStoragePoolVolumeSnapshotsWithType(project, volume.Name, db.StoragePoolVolumeTypeCustom, poolID)
if err != nil {
return "", err
}
From 5f28f1ae81d3fbf3b6e08c2a8dbd1d0ad0b637db Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Wed, 6 May 2020 15:45:09 +0100
Subject: [PATCH 36/43] lxd/db: Rename StoragePoolNodeVolumesGetType to
GetLocalStoragePoolVolumesWithType
Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
lxd/db/storage_pools.go | 4 ++--
lxd/patches.go | 4 ++--
lxd/storage_volumes.go | 2 +-
3 files changed, 5 insertions(+), 5 deletions(-)
diff --git a/lxd/db/storage_pools.go b/lxd/db/storage_pools.go
index 5b38c9f29a..82cc4ee826 100644
--- a/lxd/db/storage_pools.go
+++ b/lxd/db/storage_pools.go
@@ -906,9 +906,9 @@ SELECT storage_volumes_snapshots.name, storage_volumes_snapshots.description FRO
return result, nil
}
-// StoragePoolNodeVolumesGetType returns all storage volumes attached to a
+// GetLocalStoragePoolVolumesWithType returns all storage volumes attached to a
// given storage pool of a given volume type, on the current node.
-func (c *Cluster) StoragePoolNodeVolumesGetType(projectName string, volumeType int, poolID int64) ([]string, error) {
+func (c *Cluster) GetLocalStoragePoolVolumesWithType(projectName string, volumeType int, poolID int64) ([]string, error) {
return c.storagePoolVolumesGetType(projectName, volumeType, poolID, c.nodeID)
}
diff --git a/lxd/patches.go b/lxd/patches.go
index 9369789443..40daf6dfe4 100644
--- a/lxd/patches.go
+++ b/lxd/patches.go
@@ -234,7 +234,7 @@ func patchRenameCustomVolumeLVs(name string, d *Daemon) error {
continue
}
- volumes, err := d.cluster.StoragePoolNodeVolumesGetType(project.Default, db.StoragePoolVolumeTypeCustom, poolID)
+ volumes, err := d.cluster.GetLocalStoragePoolVolumesWithType(project.Default, db.StoragePoolVolumeTypeCustom, poolID)
if err != nil {
return err
}
@@ -3047,7 +3047,7 @@ func patchStorageApiPermissions(name string, d *Daemon) error {
return err
}
- volumes, err := d.cluster.StoragePoolNodeVolumesGetType(project.Default, db.StoragePoolVolumeTypeCustom, poolID)
+ volumes, err := d.cluster.GetLocalStoragePoolVolumesWithType(project.Default, db.StoragePoolVolumeTypeCustom, poolID)
if err != nil && err != db.ErrNoSuchObject {
return err
}
diff --git a/lxd/storage_volumes.go b/lxd/storage_volumes.go
index e6e8658d3a..f518e6c962 100644
--- a/lxd/storage_volumes.go
+++ b/lxd/storage_volumes.go
@@ -213,7 +213,7 @@ func storagePoolVolumesTypeGet(d *Daemon, r *http.Request) response.Response {
}
// Get the names of all storage volumes of a given volume type currently attached to the storage pool.
- volumes, err := d.cluster.StoragePoolNodeVolumesGetType(projectName, volumeType, poolID)
+ volumes, err := d.cluster.GetLocalStoragePoolVolumesWithType(projectName, volumeType, poolID)
if err != nil {
return response.SmartError(err)
}
From 0c2d82910e59d1d01b280ed551e96ed7b8f093ba Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Wed, 6 May 2020 15:47:51 +0100
Subject: [PATCH 37/43] lxd/db: Rename StoragePoolNodeVolumeGetTypeByProject to
GetLocalStoragePoolVolume
Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
lxd/api_internal.go | 4 ++--
lxd/daemon_storage.go | 2 +-
lxd/db/storage_pools.go | 10 +++++-----
lxd/db/storage_volume_snapshots.go | 2 +-
lxd/device/disk.go | 4 ++--
lxd/migrate_storage_volumes.go | 2 +-
lxd/storage/backend_lxd.go | 28 ++++++++++++++--------------
lxd/storage/load.go | 2 +-
lxd/storage_volumes.go | 12 ++++++------
lxd/storage_volumes_snapshot.go | 10 +++++-----
10 files changed, 38 insertions(+), 38 deletions(-)
diff --git a/lxd/api_internal.go b/lxd/api_internal.go
index 1d1261dc7b..ec4c64c71f 100644
--- a/lxd/api_internal.go
+++ b/lxd/api_internal.go
@@ -537,7 +537,7 @@ func internalImport(d *Daemon, r *http.Request) response.Response {
}
// Check if a storage volume entry for the instance already exists.
- _, volume, ctVolErr := d.cluster.StoragePoolNodeVolumeGetTypeByProject(projectName, req.Name, instanceDBVolType, pool.ID())
+ _, volume, ctVolErr := d.cluster.GetLocalStoragePoolVolume(projectName, req.Name, instanceDBVolType, pool.ID())
if ctVolErr != nil {
if ctVolErr != db.ErrNoSuchObject {
return response.SmartError(ctVolErr)
@@ -677,7 +677,7 @@ func internalImport(d *Daemon, r *http.Request) response.Response {
}
// Check if a storage volume entry for the snapshot already exists.
- _, _, csVolErr := d.cluster.StoragePoolNodeVolumeGetTypeByProject(projectName, snap.Name, instanceDBVolType, pool.ID())
+ _, _, csVolErr := d.cluster.GetLocalStoragePoolVolume(projectName, snap.Name, instanceDBVolType, pool.ID())
if csVolErr != nil {
if csVolErr != db.ErrNoSuchObject {
return response.SmartError(csVolErr)
diff --git a/lxd/daemon_storage.go b/lxd/daemon_storage.go
index 8b5e56f9a2..c9e1c78bc6 100644
--- a/lxd/daemon_storage.go
+++ b/lxd/daemon_storage.go
@@ -104,7 +104,7 @@ func daemonStorageValidate(s *state.State, target string) error {
}
// Confirm volume exists.
- _, _, err = s.Cluster.StoragePoolNodeVolumeGetTypeByProject(project.Default, volumeName, db.StoragePoolVolumeTypeCustom, poolID)
+ _, _, err = s.Cluster.GetLocalStoragePoolVolume(project.Default, volumeName, db.StoragePoolVolumeTypeCustom, poolID)
if err != nil {
return errors.Wrapf(err, "Unable to load storage volume %q", target)
}
diff --git a/lxd/db/storage_pools.go b/lxd/db/storage_pools.go
index 82cc4ee826..84a0f0b39e 100644
--- a/lxd/db/storage_pools.go
+++ b/lxd/db/storage_pools.go
@@ -953,15 +953,15 @@ func (c *Cluster) storagePoolVolumeGetType(project string, volumeName string, vo
return volumeID, &storageVolume, nil
}
-// StoragePoolNodeVolumeGetTypeByProject gets a single storage volume attached to a
+// GetLocalStoragePoolVolume gets a single storage volume attached to a
// given storage pool of a given type, on the current node in the given project.
-func (c *Cluster) StoragePoolNodeVolumeGetTypeByProject(project, volumeName string, volumeType int, poolID int64) (int64, *api.StorageVolume, error) {
+func (c *Cluster) GetLocalStoragePoolVolume(project, volumeName string, volumeType int, poolID int64) (int64, *api.StorageVolume, error) {
return c.storagePoolVolumeGetType(project, volumeName, volumeType, poolID, c.nodeID)
}
// StoragePoolVolumeUpdateByProject updates the storage volume attached to a given storage pool.
func (c *Cluster) StoragePoolVolumeUpdateByProject(project, volumeName string, volumeType int, poolID int64, volumeDescription string, volumeConfig map[string]string) error {
- volumeID, _, err := c.StoragePoolNodeVolumeGetTypeByProject(project, volumeName, volumeType, poolID)
+ volumeID, _, err := c.GetLocalStoragePoolVolume(project, volumeName, volumeType, poolID)
if err != nil {
return err
}
@@ -994,7 +994,7 @@ func (c *Cluster) StoragePoolVolumeUpdateByProject(project, volumeName string, v
// StoragePoolVolumeDelete deletes the storage volume attached to a given storage
// pool.
func (c *Cluster) StoragePoolVolumeDelete(project, volumeName string, volumeType int, poolID int64) error {
- volumeID, _, err := c.StoragePoolNodeVolumeGetTypeByProject(project, volumeName, volumeType, poolID)
+ volumeID, _, err := c.GetLocalStoragePoolVolume(project, volumeName, volumeType, poolID)
if err != nil {
return err
}
@@ -1020,7 +1020,7 @@ func (c *Cluster) StoragePoolVolumeDelete(project, volumeName string, volumeType
// StoragePoolVolumeRename renames the storage volume attached to a given storage pool.
func (c *Cluster) StoragePoolVolumeRename(project, oldVolumeName string, newVolumeName string, volumeType int, poolID int64) error {
- volumeID, _, err := c.StoragePoolNodeVolumeGetTypeByProject(project, oldVolumeName, volumeType, poolID)
+ volumeID, _, err := c.GetLocalStoragePoolVolume(project, oldVolumeName, volumeType, poolID)
if err != nil {
return err
}
diff --git a/lxd/db/storage_volume_snapshots.go b/lxd/db/storage_volume_snapshots.go
index 69e2b26565..213dfbb543 100644
--- a/lxd/db/storage_volume_snapshots.go
+++ b/lxd/db/storage_volume_snapshots.go
@@ -89,7 +89,7 @@ func (c *Cluster) StoragePoolVolumeSnapshotCreate(project, volumeName, volumeDes
// StoragePoolVolumeSnapshotUpdateByProject updates the storage volume snapshot attached to a given storage pool.
func (c *Cluster) StoragePoolVolumeSnapshotUpdateByProject(project, volumeName string, volumeType int, poolID int64, volumeDescription string, volumeConfig map[string]string, expiryDate time.Time) error {
- volumeID, _, err := c.StoragePoolNodeVolumeGetTypeByProject(project, volumeName, volumeType, poolID)
+ volumeID, _, err := c.GetLocalStoragePoolVolume(project, volumeName, volumeType, poolID)
if err != nil {
return err
}
diff --git a/lxd/device/disk.go b/lxd/device/disk.go
index 6209bd9ea6..cd56703fd3 100644
--- a/lxd/device/disk.go
+++ b/lxd/device/disk.go
@@ -314,7 +314,7 @@ func (d *disk) startContainer() (*deviceConfig.RunConfig, error) {
return nil, err
}
- _, volume, err := d.state.Cluster.StoragePoolNodeVolumeGetTypeByProject(storageProjectName, d.config["source"], db.StoragePoolVolumeTypeCustom, poolID)
+ _, volume, err := d.state.Cluster.GetLocalStoragePoolVolume(storageProjectName, d.config["source"], db.StoragePoolVolumeTypeCustom, poolID)
if err != nil {
return nil, err
}
@@ -913,7 +913,7 @@ func (d *disk) storagePoolVolumeAttachShift(projectName, poolName, volumeName st
return err
}
- _, volume, err := d.state.Cluster.StoragePoolNodeVolumeGetTypeByProject(projectName, volumeName, volumeType, poolID)
+ _, volume, err := d.state.Cluster.GetLocalStoragePoolVolume(projectName, volumeName, volumeType, poolID)
if err != nil {
return err
}
diff --git a/lxd/migrate_storage_volumes.go b/lxd/migrate_storage_volumes.go
index 4ee81d49b8..e8e5be36f8 100644
--- a/lxd/migrate_storage_volumes.go
+++ b/lxd/migrate_storage_volumes.go
@@ -72,7 +72,7 @@ func (s *migrationSourceWs) DoStorage(state *state.State, projectName string, po
poolID, err := state.Cluster.GetStoragePoolID(poolName)
if err == nil {
for _, snap := range snaps {
- _, snapVolume, err := state.Cluster.StoragePoolNodeVolumeGetTypeByProject(projectName, snap.Name, db.StoragePoolVolumeTypeCustom, poolID)
+ _, snapVolume, err := state.Cluster.GetLocalStoragePoolVolume(projectName, snap.Name, db.StoragePoolVolumeTypeCustom, poolID)
if err != nil {
continue
}
diff --git a/lxd/storage/backend_lxd.go b/lxd/storage/backend_lxd.go
index d0d021ec04..5b98e9a5d0 100644
--- a/lxd/storage/backend_lxd.go
+++ b/lxd/storage/backend_lxd.go
@@ -398,7 +398,7 @@ func (b *lxdBackend) instanceRootVolumeConfig(inst instance.Instance) (map[strin
}
// Get volume config.
- _, vol, err := b.state.Cluster.StoragePoolNodeVolumeGetTypeByProject(inst.Project(), inst.Name(), volDBType, b.ID())
+ _, vol, err := b.state.Cluster.GetLocalStoragePoolVolume(inst.Project(), inst.Name(), volDBType, b.ID())
if err != nil {
if err == db.ErrNoSuchObject {
return nil, fmt.Errorf("Volume doesn't exist")
@@ -1373,7 +1373,7 @@ func (b *lxdBackend) UpdateInstance(inst instance.Instance, newDesc string, newC
}
// Get current config to compare what has changed.
- _, curVol, err := b.state.Cluster.StoragePoolNodeVolumeGetTypeByProject(inst.Project(), inst.Name(), volDBType, b.ID())
+ _, curVol, err := b.state.Cluster.GetLocalStoragePoolVolume(inst.Project(), inst.Name(), volDBType, b.ID())
if err != nil {
if err == db.ErrNoSuchObject {
return fmt.Errorf("Volume doesn't exist")
@@ -2032,7 +2032,7 @@ func (b *lxdBackend) EnsureImage(fingerprint string, op *operations.Operation) e
}
// Try and load any existing volume config on this storage pool so we can compare filesystems if needed.
- _, imgDBVol, err := b.state.Cluster.StoragePoolNodeVolumeGetTypeByProject(project.Default, fingerprint, db.StoragePoolVolumeTypeImage, b.ID())
+ _, imgDBVol, err := b.state.Cluster.GetLocalStoragePoolVolume(project.Default, fingerprint, db.StoragePoolVolumeTypeImage, b.ID())
if err != nil {
if err != db.ErrNoSuchObject {
return err
@@ -2104,7 +2104,7 @@ func (b *lxdBackend) DeleteImage(fingerprint string, op *operations.Operation) e
}
// Load the storage volume in order to get the volume config which is needed for some drivers.
- _, storageVol, err := b.state.Cluster.StoragePoolNodeVolumeGetTypeByProject(project.Default, fingerprint, db.StoragePoolVolumeTypeImage, b.ID())
+ _, storageVol, err := b.state.Cluster.GetLocalStoragePoolVolume(project.Default, fingerprint, db.StoragePoolVolumeTypeImage, b.ID())
if err != nil {
return err
}
@@ -2131,7 +2131,7 @@ func (b *lxdBackend) DeleteImage(fingerprint string, op *operations.Operation) e
// current volume's config then an error is returned.
func (b *lxdBackend) updateVolumeDescriptionOnly(project, volName string, dbVolType int, newDesc string, newConfig map[string]string) error {
// Get current config to compare what has changed.
- _, curVol, err := b.state.Cluster.StoragePoolNodeVolumeGetTypeByProject(project, volName, dbVolType, b.ID())
+ _, curVol, err := b.state.Cluster.GetLocalStoragePoolVolume(project, volName, dbVolType, b.ID())
if err != nil {
if err == db.ErrNoSuchObject {
return fmt.Errorf("Volume doesn't exist")
@@ -2234,7 +2234,7 @@ func (b *lxdBackend) CreateCustomVolumeFromCopy(projectName string, volName stri
}
// Check source volume exists and is custom type.
- _, srcVolRow, err := b.state.Cluster.StoragePoolNodeVolumeGetTypeByProject(projectName, srcVolName, db.StoragePoolVolumeTypeCustom, srcPool.ID())
+ _, srcVolRow, err := b.state.Cluster.GetLocalStoragePoolVolume(projectName, srcVolName, db.StoragePoolVolumeTypeCustom, srcPool.ID())
if err != nil {
if err == db.ErrNoSuchObject {
return fmt.Errorf("Source volume doesn't exist")
@@ -2587,7 +2587,7 @@ func (b *lxdBackend) UpdateCustomVolume(projectName string, volName string, newD
}
// Get current config to compare what has changed.
- _, curVol, err := b.state.Cluster.StoragePoolNodeVolumeGetTypeByProject(projectName, volName, db.StoragePoolVolumeTypeCustom, b.ID())
+ _, curVol, err := b.state.Cluster.GetLocalStoragePoolVolume(projectName, volName, db.StoragePoolVolumeTypeCustom, b.ID())
if err != nil {
if err == db.ErrNoSuchObject {
return fmt.Errorf("Volume doesn't exist")
@@ -2659,7 +2659,7 @@ func (b *lxdBackend) UpdateCustomVolumeSnapshot(projectName string, volName stri
}
// Get current config to compare what has changed.
- volID, curVol, err := b.state.Cluster.StoragePoolNodeVolumeGetTypeByProject(projectName, volName, db.StoragePoolVolumeTypeCustom, b.ID())
+ volID, curVol, err := b.state.Cluster.GetLocalStoragePoolVolume(projectName, volName, db.StoragePoolVolumeTypeCustom, b.ID())
if err != nil {
if err == db.ErrNoSuchObject {
return fmt.Errorf("Volume doesn't exist")
@@ -2756,7 +2756,7 @@ func (b *lxdBackend) MountCustomVolume(projectName, volName string, op *operatio
logger.Debug("MountCustomVolume started")
defer logger.Debug("MountCustomVolume finished")
- _, volume, err := b.state.Cluster.StoragePoolNodeVolumeGetTypeByProject(projectName, volName, db.StoragePoolVolumeTypeCustom, b.id)
+ _, volume, err := b.state.Cluster.GetLocalStoragePoolVolume(projectName, volName, db.StoragePoolVolumeTypeCustom, b.id)
if err != nil {
return false, err
}
@@ -2774,7 +2774,7 @@ func (b *lxdBackend) UnmountCustomVolume(projectName, volName string, op *operat
logger.Debug("UnmountCustomVolume started")
defer logger.Debug("UnmountCustomVolume finished")
- _, volume, err := b.state.Cluster.StoragePoolNodeVolumeGetTypeByProject(projectName, volName, db.StoragePoolVolumeTypeCustom, b.id)
+ _, volume, err := b.state.Cluster.GetLocalStoragePoolVolume(projectName, volName, db.StoragePoolVolumeTypeCustom, b.id)
if err != nil {
return false, err
}
@@ -2803,7 +2803,7 @@ func (b *lxdBackend) CreateCustomVolumeSnapshot(projectName, volName string, new
fullSnapshotName := drivers.GetSnapshotVolumeName(volName, newSnapshotName)
// Check snapshot volume doesn't exist already.
- _, _, err := b.state.Cluster.StoragePoolNodeVolumeGetTypeByProject(projectName, fullSnapshotName, db.StoragePoolVolumeTypeCustom, b.ID())
+ _, _, err := b.state.Cluster.GetLocalStoragePoolVolume(projectName, fullSnapshotName, db.StoragePoolVolumeTypeCustom, b.ID())
if err != db.ErrNoSuchObject {
if err != nil {
return err
@@ -2813,7 +2813,7 @@ func (b *lxdBackend) CreateCustomVolumeSnapshot(projectName, volName string, new
}
// Load parent volume information and check it exists.
- _, parentVol, err := b.state.Cluster.StoragePoolNodeVolumeGetTypeByProject(projectName, volName, db.StoragePoolVolumeTypeCustom, b.ID())
+ _, parentVol, err := b.state.Cluster.GetLocalStoragePoolVolume(projectName, volName, db.StoragePoolVolumeTypeCustom, b.ID())
if err != nil {
if err == db.ErrNoSuchObject {
return fmt.Errorf("Parent volume doesn't exist")
@@ -2952,7 +2952,7 @@ func (b *lxdBackend) RestoreCustomVolume(projectName, volName string, snapshotNa
}
// Get the volume config.
- _, dbVol, err := b.state.Cluster.StoragePoolNodeVolumeGetTypeByProject(projectName, volName, db.StoragePoolVolumeTypeCustom, b.ID())
+ _, dbVol, err := b.state.Cluster.GetLocalStoragePoolVolume(projectName, volName, db.StoragePoolVolumeTypeCustom, b.ID())
if err != nil {
if err == db.ErrNoSuchObject {
return fmt.Errorf("Volume doesn't exist")
@@ -3054,7 +3054,7 @@ func (b *lxdBackend) UpdateInstanceBackupFile(inst instance.Instance, op *operat
contentType := InstanceContentType(inst)
- _, volume, err := b.state.Cluster.StoragePoolNodeVolumeGetTypeByProject(inst.Project(), inst.Name(), volDBType, b.ID())
+ _, volume, err := b.state.Cluster.GetLocalStoragePoolVolume(inst.Project(), inst.Name(), volDBType, b.ID())
if err != nil {
return err
}
diff --git a/lxd/storage/load.go b/lxd/storage/load.go
index fbe149fc7c..1ccab79df0 100644
--- a/lxd/storage/load.go
+++ b/lxd/storage/load.go
@@ -42,7 +42,7 @@ func volIDFuncMake(state *state.State, poolID int64) func(volType drivers.Volume
projectName, volName = project.StorageVolumeParts(volName)
}
- volID, _, err := state.Cluster.StoragePoolNodeVolumeGetTypeByProject(projectName, volName, volTypeID, poolID)
+ volID, _, err := state.Cluster.GetLocalStoragePoolVolume(projectName, volName, volTypeID, poolID)
if err != nil {
if err == db.ErrNoSuchObject {
return -1, fmt.Errorf("Failed to get volume ID for project %q, volume %q, type %q: Volume doesn't exist", projectName, volName, volType)
diff --git a/lxd/storage_volumes.go b/lxd/storage_volumes.go
index f518e6c962..c14ae149e4 100644
--- a/lxd/storage_volumes.go
+++ b/lxd/storage_volumes.go
@@ -237,7 +237,7 @@ func storagePoolVolumesTypeGet(d *Daemon, r *http.Request) response.Response {
resultString = append(resultString, fmt.Sprintf("/%s/storage-pools/%s/volumes/%s/%s", version.APIVersion, poolName, apiEndpoint, volume))
} else {
- _, vol, err := d.cluster.StoragePoolNodeVolumeGetTypeByProject(projectName, volume, volumeType, poolID)
+ _, vol, err := d.cluster.GetLocalStoragePoolVolume(projectName, volume, volumeType, poolID)
if err != nil {
continue
}
@@ -304,7 +304,7 @@ func storagePoolVolumesTypePost(d *Daemon, r *http.Request) response.Response {
}
// Check if destination volume exists.
- _, _, err = d.cluster.StoragePoolNodeVolumeGetTypeByProject(projectName, req.Name, db.StoragePoolVolumeTypeCustom, poolID)
+ _, _, err = d.cluster.GetLocalStoragePoolVolume(projectName, req.Name, db.StoragePoolVolumeTypeCustom, poolID)
if err != db.ErrNoSuchObject {
if err != nil {
return response.SmartError(err)
@@ -409,7 +409,7 @@ func storagePoolVolumesPost(d *Daemon, r *http.Request) response.Response {
}
// Check if destination volume exists.
- _, _, err = d.cluster.StoragePoolNodeVolumeGetTypeByProject(projectName, req.Name, db.StoragePoolVolumeTypeCustom, poolID)
+ _, _, err = d.cluster.GetLocalStoragePoolVolume(projectName, req.Name, db.StoragePoolVolumeTypeCustom, poolID)
if err != db.ErrNoSuchObject {
if err != nil {
return response.SmartError(err)
@@ -812,7 +812,7 @@ func storagePoolVolumeTypeGet(d *Daemon, r *http.Request, volumeTypeName string)
}
// Get the storage volume.
- _, volume, err := d.cluster.StoragePoolNodeVolumeGetTypeByProject(projectName, volumeName, volumeType, poolID)
+ _, volume, err := d.cluster.GetLocalStoragePoolVolume(projectName, volumeName, volumeType, poolID)
if err != nil {
return response.SmartError(err)
}
@@ -891,7 +891,7 @@ func storagePoolVolumeTypePut(d *Daemon, r *http.Request, volumeTypeName string)
}
// Get the existing storage volume.
- _, vol, err := d.cluster.StoragePoolNodeVolumeGetTypeByProject(projectName, volumeName, volumeType, pool.ID())
+ _, vol, err := d.cluster.GetLocalStoragePoolVolume(projectName, volumeName, volumeType, pool.ID())
if err != nil {
return response.SmartError(err)
}
@@ -1021,7 +1021,7 @@ func storagePoolVolumeTypePatch(d *Daemon, r *http.Request, volumeTypeName strin
}
// Get the existing storage volume.
- _, vol, err := d.cluster.StoragePoolNodeVolumeGetTypeByProject(projectName, volumeName, volumeType, pool.ID())
+ _, vol, err := d.cluster.GetLocalStoragePoolVolume(projectName, volumeName, volumeType, pool.ID())
if err != nil {
return response.SmartError(err)
}
diff --git a/lxd/storage_volumes_snapshot.go b/lxd/storage_volumes_snapshot.go
index 5055923120..a61b3760ea 100644
--- a/lxd/storage_volumes_snapshot.go
+++ b/lxd/storage_volumes_snapshot.go
@@ -116,7 +116,7 @@ func storagePoolVolumeSnapshotsTypePost(d *Daemon, r *http.Request) response.Res
}
// Ensure that the snapshot doesn't already exist.
- _, _, err = d.cluster.StoragePoolNodeVolumeGetTypeByProject(projectName, fmt.Sprintf("%s/%s", volumeName, req.Name), volumeType, poolID)
+ _, _, err = d.cluster.GetLocalStoragePoolVolume(projectName, fmt.Sprintf("%s/%s", volumeName, req.Name), volumeType, poolID)
if err != db.ErrNoSuchObject {
if err != nil {
return response.SmartError(err)
@@ -126,7 +126,7 @@ func storagePoolVolumeSnapshotsTypePost(d *Daemon, r *http.Request) response.Res
}
// Get the parent volume so we can get the config.
- _, vol, err := d.cluster.StoragePoolNodeVolumeGetTypeByProject(projectName, volumeName, volumeType, poolID)
+ _, vol, err := d.cluster.GetLocalStoragePoolVolume(projectName, volumeName, volumeType, poolID)
if err != nil {
return response.SmartError(err)
}
@@ -214,7 +214,7 @@ func storagePoolVolumeSnapshotsTypeGet(d *Daemon, r *http.Request) response.Resp
}
resultString = append(resultString, fmt.Sprintf("/%s/storage-pools/%s/volumes/%s/%s/snapshots/%s", version.APIVersion, poolName, apiEndpoint, volumeName, snapshotName))
} else {
- _, vol, err := d.cluster.StoragePoolNodeVolumeGetTypeByProject(projectName, volume.Name, volumeType, poolID)
+ _, vol, err := d.cluster.GetLocalStoragePoolVolume(projectName, volume.Name, volumeType, poolID)
if err != nil {
continue
}
@@ -369,7 +369,7 @@ func storagePoolVolumeSnapshotTypeGet(d *Daemon, r *http.Request) response.Respo
return resp
}
- volID, volume, err := d.cluster.StoragePoolNodeVolumeGetTypeByProject(projectName, fullSnapshotName, volumeType, poolID)
+ volID, volume, err := d.cluster.GetLocalStoragePoolVolume(projectName, fullSnapshotName, volumeType, poolID)
if err != nil {
return response.SmartError(err)
}
@@ -437,7 +437,7 @@ func storagePoolVolumeSnapshotTypePut(d *Daemon, r *http.Request) response.Respo
return resp
}
- volID, vol, err := d.cluster.StoragePoolNodeVolumeGetTypeByProject(projectName, fullSnapshotName, volumeType, poolID)
+ volID, vol, err := d.cluster.GetLocalStoragePoolVolume(projectName, fullSnapshotName, volumeType, poolID)
if err != nil {
return response.SmartError(err)
}
From 89e8a5b34ad58936e3ad00c01070f71f89320d5d Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Wed, 6 May 2020 15:49:14 +0100
Subject: [PATCH 38/43] lxd/db: Rename StoragePoolVolumeUpdateByProject to
UpdateStoragePoolVolume
Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
lxd/db/storage_pools.go | 4 ++--
lxd/db/storage_pools_test.go | 2 +-
lxd/device/disk.go | 2 +-
lxd/patches.go | 30 +++++++++++++++---------------
lxd/storage/backend_lxd.go | 6 +++---
5 files changed, 22 insertions(+), 22 deletions(-)
diff --git a/lxd/db/storage_pools.go b/lxd/db/storage_pools.go
index 84a0f0b39e..135792d154 100644
--- a/lxd/db/storage_pools.go
+++ b/lxd/db/storage_pools.go
@@ -959,8 +959,8 @@ func (c *Cluster) GetLocalStoragePoolVolume(project, volumeName string, volumeTy
return c.storagePoolVolumeGetType(project, volumeName, volumeType, poolID, c.nodeID)
}
-// StoragePoolVolumeUpdateByProject updates the storage volume attached to a given storage pool.
-func (c *Cluster) StoragePoolVolumeUpdateByProject(project, volumeName string, volumeType int, poolID int64, volumeDescription string, volumeConfig map[string]string) error {
+// UpdateStoragePoolVolume updates the storage volume attached to a given storage pool.
+func (c *Cluster) UpdateStoragePoolVolume(project, volumeName string, volumeType int, poolID int64, volumeDescription string, volumeConfig map[string]string) error {
volumeID, _, err := c.GetLocalStoragePoolVolume(project, volumeName, volumeType, poolID)
if err != nil {
return err
diff --git a/lxd/db/storage_pools_test.go b/lxd/db/storage_pools_test.go
index 940ac7f3d6..c8c7b0a171 100644
--- a/lxd/db/storage_pools_test.go
+++ b/lxd/db/storage_pools_test.go
@@ -185,7 +185,7 @@ func TestStoragePoolVolume_Ceph(t *testing.T) {
// Update the volume
config["k"] = "v2"
- err = cluster.StoragePoolVolumeUpdateByProject("default", "v1", 1, poolID, "volume 1", config)
+ err = cluster.UpdateStoragePoolVolume("default", "v1", 1, poolID, "volume 1", config)
require.NoError(t, err)
for _, nodeID := range []int64{1, 2} {
_, volume, err := cluster.StoragePoolVolumeGetType("default", "v1", 1, poolID, nodeID)
diff --git a/lxd/device/disk.go b/lxd/device/disk.go
index cd56703fd3..a0104be82d 100644
--- a/lxd/device/disk.go
+++ b/lxd/device/disk.go
@@ -1056,7 +1056,7 @@ func (d *disk) storagePoolVolumeAttachShift(projectName, poolName, volumeName st
// Update last idmap.
poolVolumePut.Config["volatile.idmap.last"] = jsonIdmap
- err = d.state.Cluster.StoragePoolVolumeUpdateByProject(projectName, volumeName, volumeType, poolID, poolVolumePut.Description, poolVolumePut.Config)
+ err = d.state.Cluster.UpdateStoragePoolVolume(projectName, volumeName, volumeType, poolID, poolVolumePut.Description, poolVolumePut.Config)
if err != nil {
return err
}
diff --git a/lxd/patches.go b/lxd/patches.go
index 40daf6dfe4..5fa6368977 100644
--- a/lxd/patches.go
+++ b/lxd/patches.go
@@ -564,7 +564,7 @@ func upgradeFromStorageTypeBtrfs(name string, d *Daemon, defaultPoolName string,
_, err = d.cluster.StoragePoolNodeVolumeGetTypeIDByProject(project.Default, ct, db.StoragePoolVolumeTypeContainer, poolID)
if err == nil {
logger.Warnf("Storage volumes database already contains an entry for the container")
- err := d.cluster.StoragePoolVolumeUpdateByProject("default", ct, db.StoragePoolVolumeTypeContainer, poolID, "", containerPoolVolumeConfig)
+ err := d.cluster.UpdateStoragePoolVolume("default", ct, db.StoragePoolVolumeTypeContainer, poolID, "", containerPoolVolumeConfig)
if err != nil {
return err
}
@@ -652,7 +652,7 @@ func upgradeFromStorageTypeBtrfs(name string, d *Daemon, defaultPoolName string,
_, err = d.cluster.StoragePoolNodeVolumeGetTypeIDByProject(project.Default, cs, db.StoragePoolVolumeTypeContainer, poolID)
if err == nil {
logger.Warnf("Storage volumes database already contains an entry for the snapshot")
- err := d.cluster.StoragePoolVolumeUpdateByProject("default", cs, db.StoragePoolVolumeTypeContainer, poolID, "", snapshotPoolVolumeConfig)
+ err := d.cluster.UpdateStoragePoolVolume("default", cs, db.StoragePoolVolumeTypeContainer, poolID, "", snapshotPoolVolumeConfig)
if err != nil {
return err
}
@@ -733,7 +733,7 @@ func upgradeFromStorageTypeBtrfs(name string, d *Daemon, defaultPoolName string,
_, err = d.cluster.StoragePoolNodeVolumeGetTypeIDByProject(project.Default, img, db.StoragePoolVolumeTypeImage, poolID)
if err == nil {
logger.Warnf("Storage volumes database already contains an entry for the image")
- err := d.cluster.StoragePoolVolumeUpdateByProject("default", img, db.StoragePoolVolumeTypeImage, poolID, "", imagePoolVolumeConfig)
+ err := d.cluster.UpdateStoragePoolVolume("default", img, db.StoragePoolVolumeTypeImage, poolID, "", imagePoolVolumeConfig)
if err != nil {
return err
}
@@ -854,7 +854,7 @@ func upgradeFromStorageTypeDir(name string, d *Daemon, defaultPoolName string, d
_, err = d.cluster.StoragePoolNodeVolumeGetTypeIDByProject(project.Default, ct, db.StoragePoolVolumeTypeContainer, poolID)
if err == nil {
logger.Warnf("Storage volumes database already contains an entry for the container")
- err := d.cluster.StoragePoolVolumeUpdateByProject("default", ct, db.StoragePoolVolumeTypeContainer, poolID, "", containerPoolVolumeConfig)
+ err := d.cluster.UpdateStoragePoolVolume("default", ct, db.StoragePoolVolumeTypeContainer, poolID, "", containerPoolVolumeConfig)
if err != nil {
return err
}
@@ -971,7 +971,7 @@ func upgradeFromStorageTypeDir(name string, d *Daemon, defaultPoolName string, d
_, err = d.cluster.StoragePoolNodeVolumeGetTypeIDByProject(project.Default, cs, db.StoragePoolVolumeTypeContainer, poolID)
if err == nil {
logger.Warnf("Storage volumes database already contains an entry for the snapshot")
- err := d.cluster.StoragePoolVolumeUpdateByProject("default", cs, db.StoragePoolVolumeTypeContainer, poolID, "", snapshotPoolVolumeConfig)
+ err := d.cluster.UpdateStoragePoolVolume("default", cs, db.StoragePoolVolumeTypeContainer, poolID, "", snapshotPoolVolumeConfig)
if err != nil {
return err
}
@@ -1001,7 +1001,7 @@ func upgradeFromStorageTypeDir(name string, d *Daemon, defaultPoolName string, d
_, err = d.cluster.StoragePoolNodeVolumeGetTypeIDByProject(project.Default, img, db.StoragePoolVolumeTypeImage, poolID)
if err == nil {
logger.Warnf("Storage volumes database already contains an entry for the image")
- err := d.cluster.StoragePoolVolumeUpdateByProject("default", img, db.StoragePoolVolumeTypeImage, poolID, "", imagePoolVolumeConfig)
+ err := d.cluster.UpdateStoragePoolVolume("default", img, db.StoragePoolVolumeTypeImage, poolID, "", imagePoolVolumeConfig)
if err != nil {
return err
}
@@ -1163,7 +1163,7 @@ func upgradeFromStorageTypeLvm(name string, d *Daemon, defaultPoolName string, d
_, err = d.cluster.StoragePoolNodeVolumeGetTypeIDByProject(project.Default, ct, db.StoragePoolVolumeTypeContainer, poolID)
if err == nil {
logger.Warnf("Storage volumes database already contains an entry for the container")
- err := d.cluster.StoragePoolVolumeUpdateByProject("default", ct, db.StoragePoolVolumeTypeContainer, poolID, "", containerPoolVolumeConfig)
+ err := d.cluster.UpdateStoragePoolVolume("default", ct, db.StoragePoolVolumeTypeContainer, poolID, "", containerPoolVolumeConfig)
if err != nil {
return err
}
@@ -1324,7 +1324,7 @@ func upgradeFromStorageTypeLvm(name string, d *Daemon, defaultPoolName string, d
_, err = d.cluster.StoragePoolNodeVolumeGetTypeIDByProject(project.Default, cs, db.StoragePoolVolumeTypeContainer, poolID)
if err == nil {
logger.Warnf("Storage volumes database already contains an entry for the snapshot")
- err := d.cluster.StoragePoolVolumeUpdateByProject("default", cs, db.StoragePoolVolumeTypeContainer, poolID, "", snapshotPoolVolumeConfig)
+ err := d.cluster.UpdateStoragePoolVolume("default", cs, db.StoragePoolVolumeTypeContainer, poolID, "", snapshotPoolVolumeConfig)
if err != nil {
return err
}
@@ -1507,7 +1507,7 @@ func upgradeFromStorageTypeLvm(name string, d *Daemon, defaultPoolName string, d
_, err = d.cluster.StoragePoolNodeVolumeGetTypeIDByProject(project.Default, img, db.StoragePoolVolumeTypeImage, poolID)
if err == nil {
logger.Warnf("Storage volumes database already contains an entry for the image")
- err := d.cluster.StoragePoolVolumeUpdateByProject("default", img, db.StoragePoolVolumeTypeImage, poolID, "", imagePoolVolumeConfig)
+ err := d.cluster.UpdateStoragePoolVolume("default", img, db.StoragePoolVolumeTypeImage, poolID, "", imagePoolVolumeConfig)
if err != nil {
return err
}
@@ -1699,7 +1699,7 @@ func upgradeFromStorageTypeZfs(name string, d *Daemon, defaultPoolName string, d
_, err = d.cluster.StoragePoolNodeVolumeGetTypeIDByProject(project.Default, ct, db.StoragePoolVolumeTypeContainer, poolID)
if err == nil {
logger.Warnf("Storage volumes database already contains an entry for the container")
- err := d.cluster.StoragePoolVolumeUpdateByProject("default", ct, db.StoragePoolVolumeTypeContainer, poolID, "", containerPoolVolumeConfig)
+ err := d.cluster.UpdateStoragePoolVolume("default", ct, db.StoragePoolVolumeTypeContainer, poolID, "", containerPoolVolumeConfig)
if err != nil {
return err
}
@@ -1785,7 +1785,7 @@ func upgradeFromStorageTypeZfs(name string, d *Daemon, defaultPoolName string, d
_, err = d.cluster.StoragePoolNodeVolumeGetTypeIDByProject(project.Default, cs, db.StoragePoolVolumeTypeContainer, poolID)
if err == nil {
logger.Warnf("Storage volumes database already contains an entry for the snapshot")
- err := d.cluster.StoragePoolVolumeUpdateByProject("default", cs, db.StoragePoolVolumeTypeContainer, poolID, "", snapshotPoolVolumeConfig)
+ err := d.cluster.UpdateStoragePoolVolume("default", cs, db.StoragePoolVolumeTypeContainer, poolID, "", snapshotPoolVolumeConfig)
if err != nil {
return err
}
@@ -1841,7 +1841,7 @@ func upgradeFromStorageTypeZfs(name string, d *Daemon, defaultPoolName string, d
_, err = d.cluster.StoragePoolNodeVolumeGetTypeIDByProject(project.Default, img, db.StoragePoolVolumeTypeImage, poolID)
if err == nil {
logger.Warnf("Storage volumes database already contains an entry for the image")
- err := d.cluster.StoragePoolVolumeUpdateByProject("default", img, db.StoragePoolVolumeTypeImage, poolID, "", imagePoolVolumeConfig)
+ err := d.cluster.UpdateStoragePoolVolume("default", img, db.StoragePoolVolumeTypeImage, poolID, "", imagePoolVolumeConfig)
if err != nil {
return err
}
@@ -2385,7 +2385,7 @@ func patchStorageApiUpdateStorageConfigs(name string, d *Daemon) error {
// exist in the db, so it's safe to ignore the error.
volumeType, _ := driver.VolumeTypeNameToType(volume.Type)
// Update the volume config.
- err = d.cluster.StoragePoolVolumeUpdateByProject("default", volume.Name, volumeType, poolID, volume.Description, volume.Config)
+ err = d.cluster.UpdateStoragePoolVolume("default", volume.Name, volumeType, poolID, volume.Description, volume.Config)
if err != nil {
return err
}
@@ -2533,7 +2533,7 @@ func patchStorageApiDetectLVSize(name string, d *Daemon) error {
// exist in the db, so it's safe to ignore the error.
volumeType, _ := driver.VolumeTypeNameToType(volume.Type)
// Update the volume config.
- err = d.cluster.StoragePoolVolumeUpdateByProject("default", volume.Name, volumeType, poolID, volume.Description, volume.Config)
+ err = d.cluster.UpdateStoragePoolVolume("default", volume.Name, volumeType, poolID, volume.Description, volume.Config)
if err != nil {
return err
}
@@ -2663,7 +2663,7 @@ func patchStorageZFSVolumeSize(name string, d *Daemon) error {
// exist in the db, so it's safe to ignore the error.
volumeType, _ := driver.VolumeTypeNameToType(volume.Type)
// Update the volume config.
- err = d.cluster.StoragePoolVolumeUpdateByProject("default", volume.Name,
+ err = d.cluster.UpdateStoragePoolVolume("default", volume.Name,
volumeType, poolID, volume.Description,
volume.Config)
if err != nil {
diff --git a/lxd/storage/backend_lxd.go b/lxd/storage/backend_lxd.go
index 5b98e9a5d0..05925dd8fd 100644
--- a/lxd/storage/backend_lxd.go
+++ b/lxd/storage/backend_lxd.go
@@ -1412,7 +1412,7 @@ func (b *lxdBackend) UpdateInstance(inst instance.Instance, newDesc string, newC
// Update the database if something changed.
if len(changedConfig) != 0 || newDesc != curVol.Description {
- err = b.state.Cluster.StoragePoolVolumeUpdateByProject(inst.Project(), inst.Name(), volDBType, b.ID(), newDesc, newConfig)
+ err = b.state.Cluster.UpdateStoragePoolVolume(inst.Project(), inst.Name(), volDBType, b.ID(), newDesc, newConfig)
if err != nil {
return err
}
@@ -2149,7 +2149,7 @@ func (b *lxdBackend) updateVolumeDescriptionOnly(project, volName string, dbVolT
// Update the database if description changed. Use current config.
if newDesc != curVol.Description {
- err = b.state.Cluster.StoragePoolVolumeUpdateByProject(project, volName, dbVolType, b.ID(), newDesc, curVol.Config)
+ err = b.state.Cluster.UpdateStoragePoolVolume(project, volName, dbVolType, b.ID(), newDesc, curVol.Config)
if err != nil {
return err
}
@@ -2638,7 +2638,7 @@ func (b *lxdBackend) UpdateCustomVolume(projectName string, volName string, newD
// Update the database if something changed.
if len(changedConfig) != 0 || newDesc != curVol.Description {
- err = b.state.Cluster.StoragePoolVolumeUpdateByProject(projectName, volName, db.StoragePoolVolumeTypeCustom, b.ID(), newDesc, newConfig)
+ err = b.state.Cluster.UpdateStoragePoolVolume(projectName, volName, db.StoragePoolVolumeTypeCustom, b.ID(), newDesc, newConfig)
if err != nil {
return err
}
From 4bab94bd01ee9660eeec66f15d01c98b5677fc47 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Wed, 6 May 2020 15:50:15 +0100
Subject: [PATCH 39/43] lxd/db: Rename StoragePoolVolumeDelete to
RemoveStoragePoolVolume
Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
lxd/api_internal.go | 4 ++--
lxd/db/storage_pools.go | 4 ++--
lxd/db/storage_pools_test.go | 2 +-
lxd/instance/drivers/driver_lxc.go | 2 +-
lxd/patches.go | 2 +-
lxd/storage/backend_lxd.go | 24 ++++++++++++------------
6 files changed, 19 insertions(+), 19 deletions(-)
diff --git a/lxd/api_internal.go b/lxd/api_internal.go
index ec4c64c71f..cd830d1a7b 100644
--- a/lxd/api_internal.go
+++ b/lxd/api_internal.go
@@ -576,7 +576,7 @@ func internalImport(d *Daemon, r *http.Request) response.Response {
}
// Remove the storage volume db entry for the instance since force was specified.
- err := d.cluster.StoragePoolVolumeDelete(projectName, req.Name, instanceDBVolType, pool.ID())
+ err := d.cluster.RemoveStoragePoolVolume(projectName, req.Name, instanceDBVolType, pool.ID())
if err != nil {
return response.SmartError(err)
}
@@ -697,7 +697,7 @@ func internalImport(d *Daemon, r *http.Request) response.Response {
}
if csVolErr == nil {
- err := d.cluster.StoragePoolVolumeDelete(projectName, snap.Name, instanceDBVolType, pool.ID())
+ err := d.cluster.RemoveStoragePoolVolume(projectName, snap.Name, instanceDBVolType, pool.ID())
if err != nil {
return response.SmartError(err)
}
diff --git a/lxd/db/storage_pools.go b/lxd/db/storage_pools.go
index 135792d154..193c0d253f 100644
--- a/lxd/db/storage_pools.go
+++ b/lxd/db/storage_pools.go
@@ -991,9 +991,9 @@ func (c *Cluster) UpdateStoragePoolVolume(project, volumeName string, volumeType
return err
}
-// StoragePoolVolumeDelete deletes the storage volume attached to a given storage
+// RemoveStoragePoolVolume deletes the storage volume attached to a given storage
// pool.
-func (c *Cluster) StoragePoolVolumeDelete(project, volumeName string, volumeType int, poolID int64) error {
+func (c *Cluster) RemoveStoragePoolVolume(project, volumeName string, volumeType int, poolID int64) error {
volumeID, _, err := c.GetLocalStoragePoolVolume(project, volumeName, volumeType, poolID)
if err != nil {
return err
diff --git a/lxd/db/storage_pools_test.go b/lxd/db/storage_pools_test.go
index c8c7b0a171..dfbe847df2 100644
--- a/lxd/db/storage_pools_test.go
+++ b/lxd/db/storage_pools_test.go
@@ -203,7 +203,7 @@ func TestStoragePoolVolume_Ceph(t *testing.T) {
require.NoError(t, err)
// Delete the volume
- err = cluster.StoragePoolVolumeDelete("default", "v1-new", 1, poolID)
+ err = cluster.RemoveStoragePoolVolume("default", "v1-new", 1, poolID)
require.NoError(t, err)
for _, nodeID := range []int64{1, 2} {
_, volume, err := cluster.StoragePoolVolumeGetType("default", "v1-new", 1, poolID, nodeID)
diff --git a/lxd/instance/drivers/driver_lxc.go b/lxd/instance/drivers/driver_lxc.go
index 5b6557ef2b..97e9501ff4 100644
--- a/lxd/instance/drivers/driver_lxc.go
+++ b/lxd/instance/drivers/driver_lxc.go
@@ -248,7 +248,7 @@ func lxcCreate(s *state.State, args db.InstanceArgs) (instance.Instance, error)
pool, err := storagePools.GetPoolByInstance(c.state, c)
if err != nil {
c.Delete()
- s.Cluster.StoragePoolVolumeDelete(args.Project, args.Name, db.StoragePoolVolumeTypeContainer, poolID)
+ s.Cluster.RemoveStoragePoolVolume(args.Project, args.Name, db.StoragePoolVolumeTypeContainer, poolID)
logger.Error("Failed to initialize container storage", ctxMap)
return nil, err
}
diff --git a/lxd/patches.go b/lxd/patches.go
index 5fa6368977..3d6b6e475b 100644
--- a/lxd/patches.go
+++ b/lxd/patches.go
@@ -1565,7 +1565,7 @@ func upgradeFromStorageTypeLvm(name string, d *Daemon, defaultPoolName string, d
// This image didn't exist as a logical volume on the
// old LXD instance so we need to kick it from the
// storage volumes database for this pool.
- err := d.cluster.StoragePoolVolumeDelete("default", img, db.StoragePoolVolumeTypeImage, poolID)
+ err := d.cluster.RemoveStoragePoolVolume("default", img, db.StoragePoolVolumeTypeImage, poolID)
if err != nil {
return err
}
diff --git a/lxd/storage/backend_lxd.go b/lxd/storage/backend_lxd.go
index 05925dd8fd..312a76dee0 100644
--- a/lxd/storage/backend_lxd.go
+++ b/lxd/storage/backend_lxd.go
@@ -1311,7 +1311,7 @@ func (b *lxdBackend) DeleteInstance(inst instance.Instance, op *operations.Opera
vol := b.newVolume(volType, contentType, volStorageName, nil)
// Delete the volume from the storage device. Must come after snapshots are removed.
- // Must come before DB StoragePoolVolumeDelete so that the volume ID is still available.
+ // Must come before DB RemoveStoragePoolVolume so that the volume ID is still available.
logger.Debug("Deleting instance volume", log.Ctx{"volName": volStorageName})
if b.driver.HasVolume(vol) {
@@ -1333,7 +1333,7 @@ func (b *lxdBackend) DeleteInstance(inst instance.Instance, op *operations.Opera
}
// Remove the volume record from the database.
- err = b.state.Cluster.StoragePoolVolumeDelete(inst.Project(), inst.Name(), volDBType, b.ID())
+ err = b.state.Cluster.RemoveStoragePoolVolume(inst.Project(), inst.Name(), volDBType, b.ID())
if err != nil {
return errors.Wrapf(err, "Error deleting storage volume from database")
}
@@ -1811,7 +1811,7 @@ func (b *lxdBackend) DeleteInstanceSnapshot(inst instance.Instance, op *operatio
parentStorageName := project.Instance(inst.Project(), parentName)
// Delete the snapshot from the storage device.
- // Must come before DB StoragePoolVolumeDelete so that the volume ID is still available.
+ // Must come before DB RemoveStoragePoolVolume so that the volume ID is still available.
logger.Debug("Deleting instance snapshot volume", log.Ctx{"volName": parentStorageName, "snapshotName": snapName})
snapVolName := drivers.GetSnapshotVolumeName(parentStorageName, snapName)
@@ -1833,7 +1833,7 @@ func (b *lxdBackend) DeleteInstanceSnapshot(inst instance.Instance, op *operatio
}
// Remove the snapshot volume record from the database.
- err = b.state.Cluster.StoragePoolVolumeDelete(inst.Project(), drivers.GetSnapshotVolumeName(parentName, snapName), volDBType, b.ID())
+ err = b.state.Cluster.RemoveStoragePoolVolume(inst.Project(), drivers.GetSnapshotVolumeName(parentName, snapName), volDBType, b.ID())
if err != nil {
return err
}
@@ -2118,7 +2118,7 @@ func (b *lxdBackend) DeleteImage(fingerprint string, op *operations.Operation) e
}
}
- err = b.state.Cluster.StoragePoolVolumeDelete(project.Default, fingerprint, db.StoragePoolVolumeTypeImage, b.ID())
+ err = b.state.Cluster.RemoveStoragePoolVolume(project.Default, fingerprint, db.StoragePoolVolumeTypeImage, b.ID())
if err != nil {
return err
}
@@ -2192,7 +2192,7 @@ func (b *lxdBackend) CreateCustomVolume(projectName string, volName string, desc
revertDB := true
defer func() {
if revertDB {
- b.state.Cluster.StoragePoolVolumeDelete(projectName, volName, db.StoragePoolVolumeTypeCustom, b.ID())
+ b.state.Cluster.RemoveStoragePoolVolume(projectName, volName, db.StoragePoolVolumeTypeCustom, b.ID())
}
}()
@@ -2277,7 +2277,7 @@ func (b *lxdBackend) CreateCustomVolumeFromCopy(projectName string, volName stri
defer func() {
// Remove any DB volume rows created if we are reverting.
for _, volName := range revertDBVolumes {
- b.state.Cluster.StoragePoolVolumeDelete(projectName, volName, db.StoragePoolVolumeTypeCustom, b.ID())
+ b.state.Cluster.RemoveStoragePoolVolume(projectName, volName, db.StoragePoolVolumeTypeCustom, b.ID())
}
}()
@@ -2429,7 +2429,7 @@ func (b *lxdBackend) CreateCustomVolumeFromMigration(projectName string, conn io
defer func() {
// Remove any DB volume rows created if we are reverting.
for _, volName := range revertDBVolumes {
- b.state.Cluster.StoragePoolVolumeDelete(projectName, volName, db.StoragePoolVolumeTypeCustom, b.ID())
+ b.state.Cluster.RemoveStoragePoolVolume(projectName, volName, db.StoragePoolVolumeTypeCustom, b.ID())
}
}()
@@ -2731,7 +2731,7 @@ func (b *lxdBackend) DeleteCustomVolume(projectName string, volName string, op *
}
// Finally, remove the volume record from the database.
- err = b.state.Cluster.StoragePoolVolumeDelete(projectName, volName, db.StoragePoolVolumeTypeCustom, b.ID())
+ err = b.state.Cluster.RemoveStoragePoolVolume(projectName, volName, db.StoragePoolVolumeTypeCustom, b.ID())
if err != nil {
return err
}
@@ -2831,7 +2831,7 @@ func (b *lxdBackend) CreateCustomVolumeSnapshot(projectName, volName string, new
revertDB := true
defer func() {
if revertDB {
- b.state.Cluster.StoragePoolVolumeDelete(projectName, fullSnapshotName, db.StoragePoolVolumeTypeCustom, b.ID())
+ b.state.Cluster.RemoveStoragePoolVolume(projectName, fullSnapshotName, db.StoragePoolVolumeTypeCustom, b.ID())
}
}()
@@ -2909,7 +2909,7 @@ func (b *lxdBackend) DeleteCustomVolumeSnapshot(projectName, volName string, op
vol := b.newVolume(drivers.VolumeTypeCustom, drivers.ContentTypeFS, volStorageName, nil)
// Delete the snapshot from the storage device.
- // Must come before DB StoragePoolVolumeDelete so that the volume ID is still available.
+ // Must come before DB RemoveStoragePoolVolume so that the volume ID is still available.
if b.driver.HasVolume(vol) {
err := b.driver.DeleteVolumeSnapshot(vol, op)
if err != nil {
@@ -2918,7 +2918,7 @@ func (b *lxdBackend) DeleteCustomVolumeSnapshot(projectName, volName string, op
}
// Remove the snapshot volume record from the database.
- err := b.state.Cluster.StoragePoolVolumeDelete(projectName, volName, db.StoragePoolVolumeTypeCustom, b.ID())
+ err := b.state.Cluster.RemoveStoragePoolVolume(projectName, volName, db.StoragePoolVolumeTypeCustom, b.ID())
if err != nil {
return err
}
From d58de3f8067ffc6d8b45792ad878caab3e9c61dc Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Wed, 6 May 2020 15:51:03 +0100
Subject: [PATCH 40/43] lxd/db: Rename StoragePoolVolumeRename to
RenameStoragePoolVolume
Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
lxd/db/storage_pools.go | 4 ++--
lxd/db/storage_pools_test.go | 2 +-
lxd/storage/backend_lxd.go | 22 +++++++++++-----------
3 files changed, 14 insertions(+), 14 deletions(-)
diff --git a/lxd/db/storage_pools.go b/lxd/db/storage_pools.go
index 193c0d253f..53c97a126c 100644
--- a/lxd/db/storage_pools.go
+++ b/lxd/db/storage_pools.go
@@ -1018,8 +1018,8 @@ func (c *Cluster) RemoveStoragePoolVolume(project, volumeName string, volumeType
return err
}
-// StoragePoolVolumeRename renames the storage volume attached to a given storage pool.
-func (c *Cluster) StoragePoolVolumeRename(project, oldVolumeName string, newVolumeName string, volumeType int, poolID int64) error {
+// RenameStoragePoolVolume renames the storage volume attached to a given storage pool.
+func (c *Cluster) RenameStoragePoolVolume(project, oldVolumeName string, newVolumeName string, volumeType int, poolID int64) error {
volumeID, _, err := c.GetLocalStoragePoolVolume(project, oldVolumeName, volumeType, poolID)
if err != nil {
return err
diff --git a/lxd/db/storage_pools_test.go b/lxd/db/storage_pools_test.go
index dfbe847df2..3fdb5604cd 100644
--- a/lxd/db/storage_pools_test.go
+++ b/lxd/db/storage_pools_test.go
@@ -193,7 +193,7 @@ func TestStoragePoolVolume_Ceph(t *testing.T) {
assert.Equal(t, "volume 1", volume.Description)
assert.Equal(t, config, volume.Config)
}
- err = cluster.StoragePoolVolumeRename("default", "v1", "v1-new", 1, poolID)
+ err = cluster.RenameStoragePoolVolume("default", "v1", "v1-new", 1, poolID)
require.NoError(t, err)
for _, nodeID := range []int64{1, 2} {
_, volume, err := cluster.StoragePoolVolumeGetType("default", "v1-new", 1, poolID, nodeID)
diff --git a/lxd/storage/backend_lxd.go b/lxd/storage/backend_lxd.go
index 312a76dee0..dcc23a2c23 100644
--- a/lxd/storage/backend_lxd.go
+++ b/lxd/storage/backend_lxd.go
@@ -1196,24 +1196,24 @@ func (b *lxdBackend) RenameInstance(inst instance.Instance, newName string, op *
for _, srcSnapshot := range snapshots {
_, snapName, _ := shared.InstanceGetParentAndSnapshotName(srcSnapshot)
newSnapVolName := drivers.GetSnapshotVolumeName(newName, snapName)
- err = b.state.Cluster.StoragePoolVolumeRename(inst.Project(), srcSnapshot, newSnapVolName, volDBType, b.ID())
+ err = b.state.Cluster.RenameStoragePoolVolume(inst.Project(), srcSnapshot, newSnapVolName, volDBType, b.ID())
if err != nil {
return err
}
revert.Add(func() {
- b.state.Cluster.StoragePoolVolumeRename(inst.Project(), newSnapVolName, srcSnapshot, volDBType, b.ID())
+ b.state.Cluster.RenameStoragePoolVolume(inst.Project(), newSnapVolName, srcSnapshot, volDBType, b.ID())
})
}
// Rename the parent volume DB record.
- err = b.state.Cluster.StoragePoolVolumeRename(inst.Project(), inst.Name(), newName, volDBType, b.ID())
+ err = b.state.Cluster.RenameStoragePoolVolume(inst.Project(), inst.Name(), newName, volDBType, b.ID())
if err != nil {
return err
}
revert.Add(func() {
- b.state.Cluster.StoragePoolVolumeRename(inst.Project(), newName, inst.Name(), volDBType, b.ID())
+ b.state.Cluster.RenameStoragePoolVolume(inst.Project(), newName, inst.Name(), volDBType, b.ID())
})
// Rename the volume and its snapshots on the storage device.
@@ -1763,14 +1763,14 @@ func (b *lxdBackend) RenameInstanceSnapshot(inst instance.Instance, newName stri
})
// Rename DB volume record.
- err = b.state.Cluster.StoragePoolVolumeRename(inst.Project(), inst.Name(), newVolName, volDBType, b.ID())
+ err = b.state.Cluster.RenameStoragePoolVolume(inst.Project(), inst.Name(), newVolName, volDBType, b.ID())
if err != nil {
return err
}
revert.Add(func() {
// Rename DB volume record back.
- b.state.Cluster.StoragePoolVolumeRename(inst.Project(), newVolName, inst.Name(), volDBType, b.ID())
+ b.state.Cluster.RenameStoragePoolVolume(inst.Project(), newVolName, inst.Name(), volDBType, b.ID())
})
// Ensure the backup file reflects current config.
@@ -2501,23 +2501,23 @@ func (b *lxdBackend) RenameCustomVolume(projectName string, volName string, newV
for _, srcSnapshot := range snapshots {
_, snapName, _ := shared.InstanceGetParentAndSnapshotName(srcSnapshot.Name)
newSnapVolName := drivers.GetSnapshotVolumeName(newVolName, snapName)
- err = b.state.Cluster.StoragePoolVolumeRename(projectName, srcSnapshot.Name, newSnapVolName, db.StoragePoolVolumeTypeCustom, b.ID())
+ err = b.state.Cluster.RenameStoragePoolVolume(projectName, srcSnapshot.Name, newSnapVolName, db.StoragePoolVolumeTypeCustom, b.ID())
if err != nil {
return err
}
revert.Add(func() {
- b.state.Cluster.StoragePoolVolumeRename(projectName, newSnapVolName, srcSnapshot.Name, db.StoragePoolVolumeTypeCustom, b.ID())
+ b.state.Cluster.RenameStoragePoolVolume(projectName, newSnapVolName, srcSnapshot.Name, db.StoragePoolVolumeTypeCustom, b.ID())
})
}
- err = b.state.Cluster.StoragePoolVolumeRename(projectName, volName, newVolName, db.StoragePoolVolumeTypeCustom, b.ID())
+ err = b.state.Cluster.RenameStoragePoolVolume(projectName, volName, newVolName, db.StoragePoolVolumeTypeCustom, b.ID())
if err != nil {
return err
}
revert.Add(func() {
- b.state.Cluster.StoragePoolVolumeRename(projectName, newVolName, volName, db.StoragePoolVolumeTypeCustom, b.ID())
+ b.state.Cluster.RenameStoragePoolVolume(projectName, newVolName, volName, db.StoragePoolVolumeTypeCustom, b.ID())
})
// Get the volume name on storage.
@@ -2876,7 +2876,7 @@ func (b *lxdBackend) RenameCustomVolumeSnapshot(projectName, volName string, new
}
newVolName := drivers.GetSnapshotVolumeName(parentName, newSnapshotName)
- err = b.state.Cluster.StoragePoolVolumeRename(projectName, volName, newVolName, db.StoragePoolVolumeTypeCustom, b.ID())
+ err = b.state.Cluster.RenameStoragePoolVolume(projectName, volName, newVolName, db.StoragePoolVolumeTypeCustom, b.ID())
if err != nil {
// Get the volume name on storage.
newVolStorageName := project.StorageVolume(projectName, newVolName)
From 439473ff3c5d56470973d89d2e7cc2ae788868c8 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Wed, 6 May 2020 15:52:09 +0100
Subject: [PATCH 41/43] lxd/db: Rename StoragePoolVolumeCreate to
CreateStoragePoolVolume
Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
lxd/db/containers_test.go | 2 +-
lxd/db/storage_pools.go | 4 ++--
lxd/db/storage_pools_test.go | 6 +++---
lxd/instance/drivers/driver_lxc.go | 2 +-
lxd/instance/drivers/driver_qemu.go | 2 +-
lxd/patches.go | 16 ++++++++--------
lxd/storage/utils.go | 2 +-
7 files changed, 17 insertions(+), 17 deletions(-)
diff --git a/lxd/db/containers_test.go b/lxd/db/containers_test.go
index bf4c087b89..0aa8336206 100644
--- a/lxd/db/containers_test.go
+++ b/lxd/db/containers_test.go
@@ -355,7 +355,7 @@ func TestGetInstancePool(t *testing.T) {
poolID, err := cluster.CreateStoragePool("default", "", "dir", nil)
require.NoError(t, err)
- _, err = cluster.StoragePoolVolumeCreate("default", "c1", "", db.StoragePoolVolumeTypeContainer, poolID, nil)
+ _, err = cluster.CreateStoragePoolVolume("default", "c1", "", db.StoragePoolVolumeTypeContainer, poolID, nil)
require.NoError(t, err)
err = cluster.Transaction(func(tx *db.ClusterTx) error {
diff --git a/lxd/db/storage_pools.go b/lxd/db/storage_pools.go
index 53c97a126c..47e86e4f83 100644
--- a/lxd/db/storage_pools.go
+++ b/lxd/db/storage_pools.go
@@ -1074,9 +1074,9 @@ func storagePoolVolumeReplicateIfCeph(tx *sql.Tx, volumeID int64, project, volum
return nil
}
-// StoragePoolVolumeCreate creates a new storage volume attached to a given
+// CreateStoragePoolVolume creates a new storage volume attached to a given
// storage pool.
-func (c *Cluster) StoragePoolVolumeCreate(project, volumeName, volumeDescription string, volumeType int, poolID int64, volumeConfig map[string]string) (int64, error) {
+func (c *Cluster) CreateStoragePoolVolume(project, volumeName, volumeDescription string, volumeType int, poolID int64, volumeConfig map[string]string) (int64, error) {
var thisVolumeID int64
if shared.IsSnapshot(volumeName) {
diff --git a/lxd/db/storage_pools_test.go b/lxd/db/storage_pools_test.go
index 3fdb5604cd..15ef1abf2c 100644
--- a/lxd/db/storage_pools_test.go
+++ b/lxd/db/storage_pools_test.go
@@ -168,7 +168,7 @@ func TestStoragePoolVolume_Ceph(t *testing.T) {
require.NoError(t, err)
config := map[string]string{"k": "v"}
- volumeID, err := cluster.StoragePoolVolumeCreate("default", "v1", "", 1, poolID, config)
+ volumeID, err := cluster.CreateStoragePoolVolume("default", "v1", "", 1, poolID, config)
require.NoError(t, err)
// The returned volume ID is the one of the volume created on the local
@@ -213,7 +213,7 @@ func TestStoragePoolVolume_Ceph(t *testing.T) {
}
// Test creating a volume snapshot.
-func TestStoragePoolVolumeCreate_Snapshot(t *testing.T) {
+func TestCreateStoragePoolVolume_Snapshot(t *testing.T) {
cluster, cleanup := db.NewTestCluster(t)
defer cleanup()
@@ -221,7 +221,7 @@ func TestStoragePoolVolumeCreate_Snapshot(t *testing.T) {
require.NoError(t, err)
config := map[string]string{"k": "v"}
- _, err = cluster.StoragePoolVolumeCreate("default", "v1", "", 1, poolID, config)
+ _, err = cluster.CreateStoragePoolVolume("default", "v1", "", 1, poolID, config)
require.NoError(t, err)
config = map[string]string{"k": "v"}
diff --git a/lxd/instance/drivers/driver_lxc.go b/lxd/instance/drivers/driver_lxc.go
index 97e9501ff4..00236f0dcc 100644
--- a/lxd/instance/drivers/driver_lxc.go
+++ b/lxd/instance/drivers/driver_lxc.go
@@ -237,7 +237,7 @@ func lxcCreate(s *state.State, args db.InstanceArgs) (instance.Instance, error)
if c.IsSnapshot() {
_, err = s.Cluster.StoragePoolVolumeSnapshotCreate(args.Project, args.Name, "", db.StoragePoolVolumeTypeContainer, poolID, volumeConfig, time.Time{})
} else {
- _, err = s.Cluster.StoragePoolVolumeCreate(args.Project, args.Name, "", db.StoragePoolVolumeTypeContainer, poolID, volumeConfig)
+ _, err = s.Cluster.CreateStoragePoolVolume(args.Project, args.Name, "", db.StoragePoolVolumeTypeContainer, poolID, volumeConfig)
}
if err != nil {
c.Delete()
diff --git a/lxd/instance/drivers/driver_qemu.go b/lxd/instance/drivers/driver_qemu.go
index d45645aaea..3c2c81f83d 100644
--- a/lxd/instance/drivers/driver_qemu.go
+++ b/lxd/instance/drivers/driver_qemu.go
@@ -249,7 +249,7 @@ func qemuCreate(s *state.State, args db.InstanceArgs) (instance.Instance, error)
_, err = s.Cluster.StoragePoolVolumeSnapshotCreate(args.Project, args.Name, "", db.StoragePoolVolumeTypeVM, poolID, volumeConfig, time.Time{})
} else {
- _, err = s.Cluster.StoragePoolVolumeCreate(args.Project, args.Name, "", db.StoragePoolVolumeTypeVM, poolID, volumeConfig)
+ _, err = s.Cluster.CreateStoragePoolVolume(args.Project, args.Name, "", db.StoragePoolVolumeTypeVM, poolID, volumeConfig)
}
if err != nil {
return nil, err
diff --git a/lxd/patches.go b/lxd/patches.go
index 3d6b6e475b..486163e058 100644
--- a/lxd/patches.go
+++ b/lxd/patches.go
@@ -570,7 +570,7 @@ func upgradeFromStorageTypeBtrfs(name string, d *Daemon, defaultPoolName string,
}
} else if err == db.ErrNoSuchObject {
// Insert storage volumes for containers into the database.
- _, err := d.cluster.StoragePoolVolumeCreate("default", ct, "", db.StoragePoolVolumeTypeContainer, poolID, containerPoolVolumeConfig)
+ _, err := d.cluster.CreateStoragePoolVolume("default", ct, "", db.StoragePoolVolumeTypeContainer, poolID, containerPoolVolumeConfig)
if err != nil {
logger.Errorf("Could not insert a storage volume for container \"%s\"", ct)
return err
@@ -739,7 +739,7 @@ func upgradeFromStorageTypeBtrfs(name string, d *Daemon, defaultPoolName string,
}
} else if err == db.ErrNoSuchObject {
// Insert storage volumes for containers into the database.
- _, err := d.cluster.StoragePoolVolumeCreate("default", img, "", db.StoragePoolVolumeTypeImage, poolID, imagePoolVolumeConfig)
+ _, err := d.cluster.CreateStoragePoolVolume("default", img, "", db.StoragePoolVolumeTypeImage, poolID, imagePoolVolumeConfig)
if err != nil {
logger.Errorf("Could not insert a storage volume for image \"%s\"", img)
return err
@@ -860,7 +860,7 @@ func upgradeFromStorageTypeDir(name string, d *Daemon, defaultPoolName string, d
}
} else if err == db.ErrNoSuchObject {
// Insert storage volumes for containers into the database.
- _, err := d.cluster.StoragePoolVolumeCreate("default", ct, "", db.StoragePoolVolumeTypeContainer, poolID, containerPoolVolumeConfig)
+ _, err := d.cluster.CreateStoragePoolVolume("default", ct, "", db.StoragePoolVolumeTypeContainer, poolID, containerPoolVolumeConfig)
if err != nil {
logger.Errorf("Could not insert a storage volume for container \"%s\"", ct)
return err
@@ -1007,7 +1007,7 @@ func upgradeFromStorageTypeDir(name string, d *Daemon, defaultPoolName string, d
}
} else if err == db.ErrNoSuchObject {
// Insert storage volumes for containers into the database.
- _, err := d.cluster.StoragePoolVolumeCreate("default", img, "", db.StoragePoolVolumeTypeImage, poolID, imagePoolVolumeConfig)
+ _, err := d.cluster.CreateStoragePoolVolume("default", img, "", db.StoragePoolVolumeTypeImage, poolID, imagePoolVolumeConfig)
if err != nil {
logger.Errorf("Could not insert a storage volume for image \"%s\"", img)
return err
@@ -1169,7 +1169,7 @@ func upgradeFromStorageTypeLvm(name string, d *Daemon, defaultPoolName string, d
}
} else if err == db.ErrNoSuchObject {
// Insert storage volumes for containers into the database.
- _, err := d.cluster.StoragePoolVolumeCreate("default", ct, "", db.StoragePoolVolumeTypeContainer, poolID, containerPoolVolumeConfig)
+ _, err := d.cluster.CreateStoragePoolVolume("default", ct, "", db.StoragePoolVolumeTypeContainer, poolID, containerPoolVolumeConfig)
if err != nil {
logger.Errorf("Could not insert a storage volume for container \"%s\"", ct)
return err
@@ -1513,7 +1513,7 @@ func upgradeFromStorageTypeLvm(name string, d *Daemon, defaultPoolName string, d
}
} else if err == db.ErrNoSuchObject {
// Insert storage volumes for containers into the database.
- _, err := d.cluster.StoragePoolVolumeCreate("default", img, "", db.StoragePoolVolumeTypeImage, poolID, imagePoolVolumeConfig)
+ _, err := d.cluster.CreateStoragePoolVolume("default", img, "", db.StoragePoolVolumeTypeImage, poolID, imagePoolVolumeConfig)
if err != nil {
logger.Errorf("Could not insert a storage volume for image \"%s\"", img)
return err
@@ -1705,7 +1705,7 @@ func upgradeFromStorageTypeZfs(name string, d *Daemon, defaultPoolName string, d
}
} else if err == db.ErrNoSuchObject {
// Insert storage volumes for containers into the database.
- _, err := d.cluster.StoragePoolVolumeCreate("default", ct, "", db.StoragePoolVolumeTypeContainer, poolID, containerPoolVolumeConfig)
+ _, err := d.cluster.CreateStoragePoolVolume("default", ct, "", db.StoragePoolVolumeTypeContainer, poolID, containerPoolVolumeConfig)
if err != nil {
logger.Errorf("Could not insert a storage volume for container \"%s\"", ct)
return err
@@ -1847,7 +1847,7 @@ func upgradeFromStorageTypeZfs(name string, d *Daemon, defaultPoolName string, d
}
} else if err == db.ErrNoSuchObject {
// Insert storage volumes for containers into the database.
- _, err := d.cluster.StoragePoolVolumeCreate("default", img, "", db.StoragePoolVolumeTypeImage, poolID, imagePoolVolumeConfig)
+ _, err := d.cluster.CreateStoragePoolVolume("default", img, "", db.StoragePoolVolumeTypeImage, poolID, imagePoolVolumeConfig)
if err != nil {
logger.Errorf("Could not insert a storage volume for image \"%s\"", img)
return err
diff --git a/lxd/storage/utils.go b/lxd/storage/utils.go
index a69877042c..2291161dfd 100644
--- a/lxd/storage/utils.go
+++ b/lxd/storage/utils.go
@@ -159,7 +159,7 @@ func VolumeDBCreate(s *state.State, project, poolName, volumeName, volumeDescrip
if snapshot {
_, err = s.Cluster.StoragePoolVolumeSnapshotCreate(project, volumeName, volumeDescription, volumeType, poolID, volumeConfig, expiryDate)
} else {
- _, err = s.Cluster.StoragePoolVolumeCreate(project, volumeName, volumeDescription, volumeType, poolID, volumeConfig)
+ _, err = s.Cluster.CreateStoragePoolVolume(project, volumeName, volumeDescription, volumeType, poolID, volumeConfig)
}
if err != nil {
return fmt.Errorf("Error inserting %s of type %s into database: %s", poolName, volumeTypeName, err)
From 35e01b23803df4322a0084da4629edc79294f740 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Wed, 6 May 2020 15:53:28 +0100
Subject: [PATCH 42/43] lxd/db: Rename StoragePoolNodeVolumeGetTypeIDByProject
to GetStoragePoolNodeVolumeID
Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
lxd/db/storage_pools.go | 4 ++--
lxd/patches.go | 24 ++++++++++++------------
lxd/storage/utils.go | 2 +-
lxd/storage_volumes.go | 2 +-
4 files changed, 16 insertions(+), 16 deletions(-)
diff --git a/lxd/db/storage_pools.go b/lxd/db/storage_pools.go
index 47e86e4f83..50e2b58071 100644
--- a/lxd/db/storage_pools.go
+++ b/lxd/db/storage_pools.go
@@ -1170,9 +1170,9 @@ SELECT storage_volumes_all.id
return int64(result[0]), nil
}
-// StoragePoolNodeVolumeGetTypeIDByProject gets the ID of a storage volume on a given storage pool
+// GetStoragePoolNodeVolumeID gets the ID of a storage volume on a given storage pool
// of a given storage volume type and project, on the current node.
-func (c *Cluster) StoragePoolNodeVolumeGetTypeIDByProject(projectName string, volumeName string, volumeType int, poolID int64) (int64, error) {
+func (c *Cluster) GetStoragePoolNodeVolumeID(projectName string, volumeName string, volumeType int, poolID int64) (int64, error) {
return c.storagePoolVolumeGetTypeID(projectName, volumeName, volumeType, poolID, c.nodeID)
}
diff --git a/lxd/patches.go b/lxd/patches.go
index 486163e058..db06c6c5e9 100644
--- a/lxd/patches.go
+++ b/lxd/patches.go
@@ -561,7 +561,7 @@ func upgradeFromStorageTypeBtrfs(name string, d *Daemon, defaultPoolName string,
return err
}
- _, err = d.cluster.StoragePoolNodeVolumeGetTypeIDByProject(project.Default, ct, db.StoragePoolVolumeTypeContainer, poolID)
+ _, err = d.cluster.GetStoragePoolNodeVolumeID(project.Default, ct, db.StoragePoolVolumeTypeContainer, poolID)
if err == nil {
logger.Warnf("Storage volumes database already contains an entry for the container")
err := d.cluster.UpdateStoragePoolVolume("default", ct, db.StoragePoolVolumeTypeContainer, poolID, "", containerPoolVolumeConfig)
@@ -649,7 +649,7 @@ func upgradeFromStorageTypeBtrfs(name string, d *Daemon, defaultPoolName string,
return err
}
- _, err = d.cluster.StoragePoolNodeVolumeGetTypeIDByProject(project.Default, cs, db.StoragePoolVolumeTypeContainer, poolID)
+ _, err = d.cluster.GetStoragePoolNodeVolumeID(project.Default, cs, db.StoragePoolVolumeTypeContainer, poolID)
if err == nil {
logger.Warnf("Storage volumes database already contains an entry for the snapshot")
err := d.cluster.UpdateStoragePoolVolume("default", cs, db.StoragePoolVolumeTypeContainer, poolID, "", snapshotPoolVolumeConfig)
@@ -730,7 +730,7 @@ func upgradeFromStorageTypeBtrfs(name string, d *Daemon, defaultPoolName string,
return err
}
- _, err = d.cluster.StoragePoolNodeVolumeGetTypeIDByProject(project.Default, img, db.StoragePoolVolumeTypeImage, poolID)
+ _, err = d.cluster.GetStoragePoolNodeVolumeID(project.Default, img, db.StoragePoolVolumeTypeImage, poolID)
if err == nil {
logger.Warnf("Storage volumes database already contains an entry for the image")
err := d.cluster.UpdateStoragePoolVolume("default", img, db.StoragePoolVolumeTypeImage, poolID, "", imagePoolVolumeConfig)
@@ -851,7 +851,7 @@ func upgradeFromStorageTypeDir(name string, d *Daemon, defaultPoolName string, d
return err
}
- _, err = d.cluster.StoragePoolNodeVolumeGetTypeIDByProject(project.Default, ct, db.StoragePoolVolumeTypeContainer, poolID)
+ _, err = d.cluster.GetStoragePoolNodeVolumeID(project.Default, ct, db.StoragePoolVolumeTypeContainer, poolID)
if err == nil {
logger.Warnf("Storage volumes database already contains an entry for the container")
err := d.cluster.UpdateStoragePoolVolume("default", ct, db.StoragePoolVolumeTypeContainer, poolID, "", containerPoolVolumeConfig)
@@ -968,7 +968,7 @@ func upgradeFromStorageTypeDir(name string, d *Daemon, defaultPoolName string, d
return err
}
- _, err = d.cluster.StoragePoolNodeVolumeGetTypeIDByProject(project.Default, cs, db.StoragePoolVolumeTypeContainer, poolID)
+ _, err = d.cluster.GetStoragePoolNodeVolumeID(project.Default, cs, db.StoragePoolVolumeTypeContainer, poolID)
if err == nil {
logger.Warnf("Storage volumes database already contains an entry for the snapshot")
err := d.cluster.UpdateStoragePoolVolume("default", cs, db.StoragePoolVolumeTypeContainer, poolID, "", snapshotPoolVolumeConfig)
@@ -998,7 +998,7 @@ func upgradeFromStorageTypeDir(name string, d *Daemon, defaultPoolName string, d
return err
}
- _, err = d.cluster.StoragePoolNodeVolumeGetTypeIDByProject(project.Default, img, db.StoragePoolVolumeTypeImage, poolID)
+ _, err = d.cluster.GetStoragePoolNodeVolumeID(project.Default, img, db.StoragePoolVolumeTypeImage, poolID)
if err == nil {
logger.Warnf("Storage volumes database already contains an entry for the image")
err := d.cluster.UpdateStoragePoolVolume("default", img, db.StoragePoolVolumeTypeImage, poolID, "", imagePoolVolumeConfig)
@@ -1160,7 +1160,7 @@ func upgradeFromStorageTypeLvm(name string, d *Daemon, defaultPoolName string, d
return err
}
- _, err = d.cluster.StoragePoolNodeVolumeGetTypeIDByProject(project.Default, ct, db.StoragePoolVolumeTypeContainer, poolID)
+ _, err = d.cluster.GetStoragePoolNodeVolumeID(project.Default, ct, db.StoragePoolVolumeTypeContainer, poolID)
if err == nil {
logger.Warnf("Storage volumes database already contains an entry for the container")
err := d.cluster.UpdateStoragePoolVolume("default", ct, db.StoragePoolVolumeTypeContainer, poolID, "", containerPoolVolumeConfig)
@@ -1321,7 +1321,7 @@ func upgradeFromStorageTypeLvm(name string, d *Daemon, defaultPoolName string, d
return err
}
- _, err = d.cluster.StoragePoolNodeVolumeGetTypeIDByProject(project.Default, cs, db.StoragePoolVolumeTypeContainer, poolID)
+ _, err = d.cluster.GetStoragePoolNodeVolumeID(project.Default, cs, db.StoragePoolVolumeTypeContainer, poolID)
if err == nil {
logger.Warnf("Storage volumes database already contains an entry for the snapshot")
err := d.cluster.UpdateStoragePoolVolume("default", cs, db.StoragePoolVolumeTypeContainer, poolID, "", snapshotPoolVolumeConfig)
@@ -1504,7 +1504,7 @@ func upgradeFromStorageTypeLvm(name string, d *Daemon, defaultPoolName string, d
return err
}
- _, err = d.cluster.StoragePoolNodeVolumeGetTypeIDByProject(project.Default, img, db.StoragePoolVolumeTypeImage, poolID)
+ _, err = d.cluster.GetStoragePoolNodeVolumeID(project.Default, img, db.StoragePoolVolumeTypeImage, poolID)
if err == nil {
logger.Warnf("Storage volumes database already contains an entry for the image")
err := d.cluster.UpdateStoragePoolVolume("default", img, db.StoragePoolVolumeTypeImage, poolID, "", imagePoolVolumeConfig)
@@ -1696,7 +1696,7 @@ func upgradeFromStorageTypeZfs(name string, d *Daemon, defaultPoolName string, d
return err
}
- _, err = d.cluster.StoragePoolNodeVolumeGetTypeIDByProject(project.Default, ct, db.StoragePoolVolumeTypeContainer, poolID)
+ _, err = d.cluster.GetStoragePoolNodeVolumeID(project.Default, ct, db.StoragePoolVolumeTypeContainer, poolID)
if err == nil {
logger.Warnf("Storage volumes database already contains an entry for the container")
err := d.cluster.UpdateStoragePoolVolume("default", ct, db.StoragePoolVolumeTypeContainer, poolID, "", containerPoolVolumeConfig)
@@ -1782,7 +1782,7 @@ func upgradeFromStorageTypeZfs(name string, d *Daemon, defaultPoolName string, d
return err
}
- _, err = d.cluster.StoragePoolNodeVolumeGetTypeIDByProject(project.Default, cs, db.StoragePoolVolumeTypeContainer, poolID)
+ _, err = d.cluster.GetStoragePoolNodeVolumeID(project.Default, cs, db.StoragePoolVolumeTypeContainer, poolID)
if err == nil {
logger.Warnf("Storage volumes database already contains an entry for the snapshot")
err := d.cluster.UpdateStoragePoolVolume("default", cs, db.StoragePoolVolumeTypeContainer, poolID, "", snapshotPoolVolumeConfig)
@@ -1838,7 +1838,7 @@ func upgradeFromStorageTypeZfs(name string, d *Daemon, defaultPoolName string, d
return err
}
- _, err = d.cluster.StoragePoolNodeVolumeGetTypeIDByProject(project.Default, img, db.StoragePoolVolumeTypeImage, poolID)
+ _, err = d.cluster.GetStoragePoolNodeVolumeID(project.Default, img, db.StoragePoolVolumeTypeImage, poolID)
if err == nil {
logger.Warnf("Storage volumes database already contains an entry for the image")
err := d.cluster.UpdateStoragePoolVolume("default", img, db.StoragePoolVolumeTypeImage, poolID, "", imagePoolVolumeConfig)
diff --git a/lxd/storage/utils.go b/lxd/storage/utils.go
index 2291161dfd..8d9925606c 100644
--- a/lxd/storage/utils.go
+++ b/lxd/storage/utils.go
@@ -134,7 +134,7 @@ func VolumeDBCreate(s *state.State, project, poolName, volumeName, volumeDescrip
}
// Check that a storage volume of the same storage volume type does not already exist.
- volumeID, _ := s.Cluster.StoragePoolNodeVolumeGetTypeIDByProject(project, volumeName, volumeType, poolID)
+ volumeID, _ := s.Cluster.GetStoragePoolNodeVolumeID(project, volumeName, volumeType, poolID)
if volumeID > 0 {
return fmt.Errorf("A storage volume of type %s already exists", volumeTypeName)
}
diff --git a/lxd/storage_volumes.go b/lxd/storage_volumes.go
index c14ae149e4..c3b046a698 100644
--- a/lxd/storage_volumes.go
+++ b/lxd/storage_volumes.go
@@ -594,7 +594,7 @@ func storagePoolVolumeTypePost(d *Daemon, r *http.Request, volumeTypeName string
}
// Check that the name isn't already in use.
- _, err = d.cluster.StoragePoolNodeVolumeGetTypeIDByProject(projectName, req.Name, volumeType, poolID)
+ _, err = d.cluster.GetStoragePoolNodeVolumeID(projectName, req.Name, volumeType, poolID)
if err != db.ErrNoSuchObject {
if err != nil {
return response.InternalError(err)
From 49465a568d7214d7101438d0a1ed557bb1546523 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Wed, 6 May 2020 15:55:15 +0100
Subject: [PATCH 43/43] lxd/db: Rename StoragePoolInsertZfsDriver to
FillMissingStoragePoolDriver
Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
lxd/db/storage_pools.go | 6 +++---
lxd/patches.go | 2 +-
2 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/lxd/db/storage_pools.go b/lxd/db/storage_pools.go
index 50e2b58071..d7b243e409 100644
--- a/lxd/db/storage_pools.go
+++ b/lxd/db/storage_pools.go
@@ -1222,9 +1222,9 @@ func storagePoolVolumeTypeToName(volumeType int) (string, error) {
return "", fmt.Errorf("Invalid storage volume type")
}
-// StoragePoolInsertZfsDriver replaces the driver of all storage pools without
-// a driver, setting it to 'zfs'.
-func (c *Cluster) StoragePoolInsertZfsDriver() error {
+// FillMissingStoragePoolDriver fills the driver of all storage pools without a
+// driver, setting it to 'zfs'.
+func (c *Cluster) FillMissingStoragePoolDriver() error {
err := exec(c.db, "UPDATE storage_pools SET driver='zfs', description='' WHERE driver=''")
return err
}
diff --git a/lxd/patches.go b/lxd/patches.go
index db06c6c5e9..616d918e79 100644
--- a/lxd/patches.go
+++ b/lxd/patches.go
@@ -2544,7 +2544,7 @@ func patchStorageApiDetectLVSize(name string, d *Daemon) error {
}
func patchStorageApiInsertZfsDriver(name string, d *Daemon) error {
- return d.cluster.StoragePoolInsertZfsDriver()
+ return d.cluster.FillMissingStoragePoolDriver()
}
func patchStorageZFSnoauto(name string, d *Daemon) error {
More information about the lxc-devel
mailing list