[lxc-devel] [lxd/master] [WIP] Clustering

freeekanayaka on Github lxc-bot at linuxcontainers.org
Tue Dec 12 10:00:03 UTC 2017


A non-text attachment was scrubbed...
Name: not available
Type: text/x-mailbox
Size: 458 bytes
Desc: not available
URL: <http://lists.linuxcontainers.org/pipermail/lxc-devel/attachments/20171212/ee535dbe/attachment.bin>
-------------- next part --------------
From 27192992058bba26c7e307fe6b26d04ca9d7f160 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Thu, 14 Sep 2017 13:13:55 +0000
Subject: [PATCH 001/116] Add raft_nodes table

This new table is meant to hold addresses of LXD nodes that are
partecipating to the dqlite raft cluster. Each node in the cluster
will hold its own local copy of this table, regardless of whether it's
a raft node or not.

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/db/node/schema.go          |  7 ++++++-
 lxd/db/node/update.go          | 31 +++++++++++++++++++++++++++++++
 lxd/db/node/update_test.go     | 17 +++++++++++++++++
 test/suites/database_update.sh |  2 +-
 4 files changed, 55 insertions(+), 2 deletions(-)
 create mode 100644 lxd/db/node/update_test.go

diff --git a/lxd/db/node/schema.go b/lxd/db/node/schema.go
index cbf863e1c..a9754eeaa 100644
--- a/lxd/db/node/schema.go
+++ b/lxd/db/node/schema.go
@@ -155,6 +155,11 @@ CREATE TABLE profiles_devices_config (
     UNIQUE (profile_device_id, key),
     FOREIGN KEY (profile_device_id) REFERENCES profiles_devices (id) ON DELETE CASCADE
 );
+CREATE TABLE raft_nodes (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    address TEXT NOT NULL,
+    UNIQUE (address)
+);
 CREATE TABLE storage_pools (
     id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
     name VARCHAR(255) NOT NULL,
@@ -188,5 +193,5 @@ CREATE TABLE storage_volumes_config (
     FOREIGN KEY (storage_volume_id) REFERENCES storage_volumes (id) ON DELETE CASCADE
 );
 
-INSERT INTO schema (version, updated_at) VALUES (36, strftime("%s"))
+INSERT INTO schema (version, updated_at) VALUES (37, strftime("%s"))
 `
diff --git a/lxd/db/node/update.go b/lxd/db/node/update.go
index 299a645e4..95a660202 100644
--- a/lxd/db/node/update.go
+++ b/lxd/db/node/update.go
@@ -84,9 +84,40 @@ var updates = map[int]schema.Update{
 	34: updateFromV33,
 	35: updateFromV34,
 	36: updateFromV35,
+	37: updateFromV36,
 }
 
 // Schema updates begin here
+
+// Add a raft_nodes table to be used when running in clustered mode. It lists
+// the current nodes in the LXD cluster that are participating to the dqlite
+// database Raft cluster.
+//
+// The 'id' column contains the raft server ID of the database node, and the
+// 'address' column its network address. Both are used internally by the raft
+// Go package to manage the cluster.
+//
+// Typical setups will have 3 LXD cluster nodes that participate to the dqlite
+// database Raft cluster, and an arbitrary number of additional LXD cluster
+// nodes that don't. Non-database nodes are not tracked in this table, but rather
+// in the nodes table of the cluster database itself.
+//
+// The data in this table must be replicated by LXD on all nodes of the
+// cluster, regardless of whether they are part of the raft cluster or not, and
+// all nodes will consult this table when they need to find out a leader to
+// send SQL queries to.
+func updateFromV36(tx *sql.Tx) error {
+	stmts := `
+CREATE TABLE raft_nodes (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    address TEXT NOT NULL,
+    UNIQUE (address)
+);
+`
+	_, err := tx.Exec(stmts)
+	return err
+}
+
 func updateFromV35(tx *sql.Tx) error {
 	stmts := `
 CREATE TABLE tmp (
diff --git a/lxd/db/node/update_test.go b/lxd/db/node/update_test.go
new file mode 100644
index 000000000..980ef8bf3
--- /dev/null
+++ b/lxd/db/node/update_test.go
@@ -0,0 +1,17 @@
+package node_test
+
+import (
+	"testing"
+
+	"github.com/lxc/lxd/lxd/db/node"
+	"github.com/stretchr/testify/require"
+)
+
+func TestUpdateFromV36(t *testing.T) {
+	schema := node.Schema()
+	db, err := schema.ExerciseUpdate(37, nil)
+	require.NoError(t, err)
+
+	_, err = db.Exec("INSERT INTO raft_nodes VALUES (1, '1.2.3.4:666')")
+	require.NoError(t, err)
+}
diff --git a/test/suites/database_update.sh b/test/suites/database_update.sh
index 7b3737486..15189bd2f 100644
--- a/test/suites/database_update.sh
+++ b/test/suites/database_update.sh
@@ -9,7 +9,7 @@ test_database_update(){
   spawn_lxd "${LXD_MIGRATE_DIR}" true
 
   # Assert there are enough tables.
-  expected_tables=23
+  expected_tables=24
   tables=$(sqlite3 "${MIGRATE_DB}" ".dump" | grep -c "CREATE TABLE")
   [ "${tables}" -eq "${expected_tables}" ] || { echo "FAIL: Wrong number of tables after database migration. Found: ${tables}, expected ${expected_tables}"; false; }
 

From 57ae7e0bb0341a6d2bc71f699c901b474454af37 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Wed, 11 Oct 2017 15:05:22 +0000
Subject: [PATCH 002/116] Add query helpers to select and insert complex
 objects

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/db/query/objects.go      |  85 ++++++++++++++++++++
 lxd/db/query/objects_test.go | 187 +++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 272 insertions(+)
 create mode 100644 lxd/db/query/objects.go
 create mode 100644 lxd/db/query/objects_test.go

diff --git a/lxd/db/query/objects.go b/lxd/db/query/objects.go
new file mode 100644
index 000000000..f6dcdad09
--- /dev/null
+++ b/lxd/db/query/objects.go
@@ -0,0 +1,85 @@
+package query
+
+import (
+	"database/sql"
+	"fmt"
+	"strings"
+)
+
+// SelectObjects executes a statement which must yield rows with a specific
+// columns schema. It invokes the given Dest hook for each yielded row.
+func SelectObjects(tx *sql.Tx, dest Dest, query string, args ...interface{}) error {
+	rows, err := tx.Query(query, args...)
+	if err != nil {
+		return err
+	}
+	defer rows.Close()
+
+	for i := 0; rows.Next(); i++ {
+		err := rows.Scan(dest(i)...)
+		if err != nil {
+			return err
+		}
+	}
+
+	err = rows.Err()
+	if err != nil {
+		return err
+	}
+	return nil
+}
+
+// Dest is a function that is expected to return the objects to pass to the
+// 'dest' argument of sql.Rows.Scan(). It is invoked by SelectObjects once per
+// yielded row, and it will be passed the index of the row being scanned.
+type Dest func(i int) []interface{}
+
+// UpsertObject inserts or replaces a new row with the given column values, to
+// the given table using columns order. For example:
+//
+// UpsertObject(tx, "cars", []string{"id", "brand"}, []interface{}{1, "ferrari"})
+//
+// The number of elements in 'columns' must match the one in 'values'.
+func UpsertObject(tx *sql.Tx, table string, columns []string, values []interface{}) (int64, error) {
+	n := len(columns)
+	if n == 0 {
+		return -1, fmt.Errorf("columns length is zero")
+	}
+	if n != len(values) {
+		return -1, fmt.Errorf("columns length does not match values length")
+	}
+
+	stmt := fmt.Sprintf(
+		"INSERT OR REPLACE INTO %s (%s) VALUES %s",
+		table, strings.Join(columns, ", "), exprParams(n))
+	result, err := tx.Exec(stmt, values...)
+	if err != nil {
+		return -1, err
+	}
+	id, err := result.LastInsertId()
+	if err != nil {
+		return -1, err
+	}
+	return id, nil
+}
+
+// DeleteObject removes the row identified by the given ID. The given table
+// must have a primary key column called 'id'.
+//
+// It returns a flag indicating if a matching row was actually found and
+// deleted or not.
+func DeleteObject(tx *sql.Tx, table string, id int64) (bool, error) {
+	stmt := fmt.Sprintf("DELETE FROM %s WHERE id=?", table)
+	result, err := tx.Exec(stmt, id)
+	if err != nil {
+		return false, err
+	}
+	n, err := result.RowsAffected()
+	if err != nil {
+		return false, err
+	}
+	if n > 1 {
+		return true, fmt.Errorf("more than one row was deleted")
+	}
+	return n == 1, nil
+}
diff --git a/lxd/db/query/objects_test.go b/lxd/db/query/objects_test.go
new file mode 100644
index 000000000..d6bda9eb2
--- /dev/null
+++ b/lxd/db/query/objects_test.go
@@ -0,0 +1,187 @@
+package query_test
+
+import (
+	"database/sql"
+	"testing"
+
+	"github.com/lxc/lxd/lxd/db/query"
+	"github.com/mpvl/subtest"
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
+)
+
+// Exercise possible failure modes.
+func TestSelectObjects_Error(t *testing.T) {
+	cases := []struct {
+		dest  query.Dest
+		query string
+		error string
+	}{
+		{
+			func(int) []interface{} { return nil },
+			"garbage",
+			"near \"garbage\": syntax error",
+		},
+		{
+			func(int) []interface{} { return make([]interface{}, 1) },
+			"SELECT id, name FROM test",
+			"sql: expected 2 destination arguments in Scan, not 1",
+		},
+	}
+	for _, c := range cases {
+		subtest.Run(t, c.query, func(t *testing.T) {
+			tx := newTxForObjects(t)
+			err := query.SelectObjects(tx, c.dest, c.query)
+			assert.EqualError(t, err, c.error)
+		})
+	}
+}
+
+// Scan rows yielded by the query.
+func TestSelectObjects(t *testing.T) {
+	tx := newTxForObjects(t)
+	objects := make([]struct {
+		ID   int
+		Name string
+	}, 1)
+	object := objects[0]
+
+	dest := func(i int) []interface{} {
+		require.Equal(t, 0, i, "expected at most one row to be yielded")
+		return []interface{}{&object.ID, &object.Name}
+	}
+
+	stmt := "SELECT id, name FROM test WHERE name=?"
+	err := query.SelectObjects(tx, dest, stmt, "bar")
+	require.NoError(t, err)
+
+	assert.Equal(t, 1, object.ID)
+	assert.Equal(t, "bar", object.Name)
+}
+
+// Exercise possible failure modes.
+func TestUpsertObject_Error(t *testing.T) {
+	cases := []struct {
+		columns []string
+		values  []interface{}
+		error   string
+	}{
+		{
+			[]string{},
+			[]interface{}{},
+			"columns length is zero",
+		},
+		{
+			[]string{"id"},
+			[]interface{}{2, "egg"},
+			"columns length does not match values length",
+		},
+	}
+	for _, c := range cases {
+		subtest.Run(t, c.error, func(t *testing.T) {
+			tx := newTxForObjects(t)
+			id, err := query.UpsertObject(tx, "foo", c.columns, c.values)
+			assert.Equal(t, int64(-1), id)
+			assert.EqualError(t, err, c.error)
+		})
+	}
+}
+
+// Insert a new row.
+func TestUpsertObject_Insert(t *testing.T) {
+	tx := newTxForObjects(t)
+
+	id, err := query.UpsertObject(tx, "test", []string{"name"}, []interface{}{"egg"})
+	require.NoError(t, err)
+	assert.Equal(t, int64(2), id)
+
+	objects := make([]struct {
+		ID   int
+		Name string
+	}, 1)
+	object := objects[0]
+
+	dest := func(i int) []interface{} {
+		require.Equal(t, 0, i, "expected at most one row to be yielded")
+		return []interface{}{&object.ID, &object.Name}
+	}
+
+	stmt := "SELECT id, name FROM test WHERE name=?"
+	err = query.SelectObjects(tx, dest, stmt, "egg")
+	require.NoError(t, err)
+
+	assert.Equal(t, 2, object.ID)
+	assert.Equal(t, "egg", object.Name)
+}
+
+// Update an existing row.
+func TestUpsertObject_Update(t *testing.T) {
+	tx := newTxForObjects(t)
+
+	id, err := query.UpsertObject(tx, "test", []string{"id", "name"}, []interface{}{1, "egg"})
+	require.NoError(t, err)
+	assert.Equal(t, int64(1), id)
+
+	objects := make([]struct {
+		ID   int
+		Name string
+	}, 1)
+	object := objects[0]
+
+	dest := func(i int) []interface{} {
+		require.Equal(t, 0, i, "expected at most one row to be yielded")
+		return []interface{}{&object.ID, &object.Name}
+	}
+
+	stmt := "SELECT id, name FROM test WHERE name=?"
+	err = query.SelectObjects(tx, dest, stmt, "egg")
+	require.NoError(t, err)
+
+	assert.Equal(t, 1, object.ID)
+	assert.Equal(t, "egg", object.Name)
+}
+
+// Exercise possible failure modes.
+func TestDeleteObject_Error(t *testing.T) {
+	tx := newTxForObjects(t)
+
+	deleted, err := query.DeleteObject(tx, "foo", 1)
+	assert.False(t, deleted)
+	assert.EqualError(t, err, "no such table: foo")
+}
+
+// If an row was actually deleted, the returned flag is true.
+func TestDeleteObject_Deleted(t *testing.T) {
+	tx := newTxForObjects(t)
+
+	deleted, err := query.DeleteObject(tx, "test", 1)
+	assert.True(t, deleted)
+	assert.NoError(t, err)
+}
+
+// If no row was actually deleted, the returned flag is false.
+func TestDeleteObject_NotDeleted(t *testing.T) {
+	tx := newTxForObjects(t)
+
+	deleted, err := query.DeleteObject(tx, "test", 1000)
+	assert.False(t, deleted)
+	assert.NoError(t, err)
+}
+
+// Return a new transaction against an in-memory SQLite database with a single
+// test table populated with a few rows for testing object-related queries.
+func newTxForObjects(t *testing.T) *sql.Tx {
+	db, err := sql.Open("sqlite3", ":memory:")
+	assert.NoError(t, err)
+
+	_, err = db.Exec("CREATE TABLE test (id INTEGER PRIMARY KEY, name TEXT)")
+	assert.NoError(t, err)
+
+	_, err = db.Exec("INSERT INTO test VALUES (0, 'foo'), (1, 'bar')")
+	assert.NoError(t, err)
+
+	tx, err := db.Begin()
+	assert.NoError(t, err)
+
+	return tx
+}

From 9e494cc698e6c8aeb3fd1ae6e2c85452734557df Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Fri, 15 Sep 2017 07:23:30 +0000
Subject: [PATCH 003/116] Add InsertStrings helper to insert rows with a single
 string value

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/db/query/slices.go      | 25 +++++++++++++++++++++++++
 lxd/db/query/slices_test.go | 13 +++++++++++++
 2 files changed, 38 insertions(+)

diff --git a/lxd/db/query/slices.go b/lxd/db/query/slices.go
index 4e58126c7..59d0cc892 100644
--- a/lxd/db/query/slices.go
+++ b/lxd/db/query/slices.go
@@ -2,6 +2,8 @@ package query
 
 import (
 	"database/sql"
+	"fmt"
+	"strings"
 )
 
 // SelectStrings executes a statement which must yield rows with a single string
@@ -48,6 +50,29 @@ func SelectIntegers(tx *sql.Tx, query string) ([]int, error) {
 	return values, nil
 }
 
+// InsertStrings inserts a new row for each of the given strings, using the
+// given insert statement template, which must define exactly one insertion
+// column and one substitution placeholder for the values. For example:
+// InsertStrings(tx, "INSERT INTO foo(name) VALUES %s", []string{"bar"}).
+func InsertStrings(tx *sql.Tx, stmt string, values []string) error {
+	n := len(values)
+
+	if n == 0 {
+		return nil
+	}
+
+	params := make([]string, n)
+	args := make([]interface{}, n)
+	for i, value := range values {
+		params[i] = "(?)"
+		args[i] = value
+	}
+
+	stmt = fmt.Sprintf(stmt, strings.Join(params, ", "))
+	_, err := tx.Exec(stmt, args...)
+	return err
+}
+
 // Execute the given query and ensure that it yields rows with a single column
 // of the given database type. For every row yielded, execute the given
 // scanner.
diff --git a/lxd/db/query/slices_test.go b/lxd/db/query/slices_test.go
index f5bb6549a..36e31a5b9 100644
--- a/lxd/db/query/slices_test.go
+++ b/lxd/db/query/slices_test.go
@@ -6,6 +6,7 @@ import (
 
 	_ "github.com/mattn/go-sqlite3"
 	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
 
 	"github.com/lxc/lxd/lxd/db/query"
 	"github.com/lxc/lxd/shared/subtest"
@@ -51,6 +52,18 @@ func TestIntegers(t *testing.T) {
 	assert.Equal(t, []int{0, 1}, values)
 }
 
+// Insert new rows in bulk.
+func TestInsertStrings(t *testing.T) {
+	tx := newTxForSlices(t)
+
+	err := query.InsertStrings(tx, "INSERT INTO test(name) VALUES %s", []string{"xx", "yy"})
+	require.NoError(t, err)
+
+	values, err := query.SelectStrings(tx, "SELECT name FROM test ORDER BY name DESC LIMIT 2")
+	require.NoError(t, err)
+	assert.Equal(t, values, []string{"yy", "xx"})
+}
+
 // Return a new transaction against an in-memory SQLite database with a single
 // test table populated with a few rows.
 func newTxForSlices(t *testing.T) *sql.Tx {

From a44ee91da86b23581fbaa01206a8754bc7ca038f Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Sun, 1 Oct 2017 17:13:52 +0000
Subject: [PATCH 004/116] Add util.InMemoryNetwork to create in-memory
 listener/dialer pairs.

This is a convenience for creating in-memory networks that implement
the net.Conn interface. It will be used when running a node in
non-clustered mode, where there will be no actual TCP/gRCP connection
to an external dqlite node, but rather just an in-memory connection to
the local dqlite instance (which will be the leader).

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/util/net.go      | 47 +++++++++++++++++++++++++++++++++++++++++++++++
 lxd/util/net_test.go | 18 ++++++++++++++++++
 2 files changed, 65 insertions(+)

diff --git a/lxd/util/net.go b/lxd/util/net.go
index 1f96f27f4..38d6acb8b 100644
--- a/lxd/util/net.go
+++ b/lxd/util/net.go
@@ -7,6 +7,53 @@ import (
 	"github.com/lxc/lxd/shared"
 )
 
+// InMemoryNetwork creates a fully in-memory listener and dial function.
+//
+// Each time the dial function is invoked a new pair of net.Conn objects will
+// be created using net.Pipe: the listener's Accept method will unblock and
+// return one end of the pipe and the other end will be returned by the dial
+// function.
+func InMemoryNetwork() (net.Listener, func() net.Conn) {
+	listener := &inMemoryListener{conns: make(chan net.Conn, 16)}
+	dialer := func() net.Conn {
+		server, client := net.Pipe()
+		listener.conns <- server
+		return client
+	}
+	return listener, dialer
+}
+
+type inMemoryListener struct {
+	conns chan net.Conn
+}
+
+// Accept waits for and returns the next connection to the listener.
+func (l *inMemoryListener) Accept() (net.Conn, error) {
+	return <-l.conns, nil
+}
+
+// Close closes the listener.
+// Any blocked Accept operations will be unblocked and return errors.
+func (l *inMemoryListener) Close() error {
+	return nil
+}
+
+// Addr returns the listener's network address.
+func (l *inMemoryListener) Addr() net.Addr {
+	return &inMemoryAddr{}
+}
+
+type inMemoryAddr struct {
+}
+
+func (a *inMemoryAddr) Network() string {
+	return "memory"
+}
+
+func (a *inMemoryAddr) String() string {
+	return ""
+}
+
 // CanonicalNetworkAddress parses the given network address and returns a
 // string of the form "host:port", possibly filling it with the default port if
 // it's missing.
diff --git a/lxd/util/net_test.go b/lxd/util/net_test.go
index 0b29eb576..a56581464 100644
--- a/lxd/util/net_test.go
+++ b/lxd/util/net_test.go
@@ -6,8 +6,26 @@ import (
 	"github.com/lxc/lxd/lxd/util"
 	"github.com/mpvl/subtest"
 	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
 )
 
+// The connection returned by the dialer is paired with the one returned by the
+// Accept() method of the listener.
+func TestInMemoryNetwork(t *testing.T) {
+	listener, dialer := util.InMemoryNetwork()
+	client := dialer()
+	server, err := listener.Accept()
+	require.NoError(t, err)
+
+	go client.Write([]byte("hello"))
+	buffer := make([]byte, 5)
+	n, err := server.Read(buffer)
+	require.NoError(t, err)
+
+	assert.Equal(t, 5, n)
+	assert.Equal(t, []byte("hello"), buffer)
+}
+
 func TestCanonicalNetworkAddress(t *testing.T) {
 	cases := map[string]string{
 		"127.0.0.1":                             "127.0.0.1:8443",

From 6b1567166af7e944d04ff101fba14a063570d9b7 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Mon, 11 Sep 2017 12:19:44 +0000
Subject: [PATCH 005/116] Add db.Cluster with basic initialization

A new Cluster structure has been added to the lxd/db sub-package. It
is meant to mediate access to the dqlite-based cluster database. It
uses the go-grpc-sql package to serialize SQL queries over a gRPC
connection against the target dqlite leader node.

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/db/cluster/open.go   | 48 ++++++++++++++++++++++++++++++++++
 lxd/db/db.go             | 38 ++++++++++++++++++++++++++-
 lxd/db/db_export_test.go |  9 +++++++
 lxd/db/db_test.go        | 16 ++++++++++++
 lxd/db/testing.go        | 67 ++++++++++++++++++++++++++++++++++++++++++++++++
 lxd/db/transaction.go    | 18 +++++++++++++
 6 files changed, 195 insertions(+), 1 deletion(-)
 create mode 100644 lxd/db/cluster/open.go
 create mode 100644 lxd/db/db_export_test.go

diff --git a/lxd/db/cluster/open.go b/lxd/db/cluster/open.go
new file mode 100644
index 000000000..d135dea6f
--- /dev/null
+++ b/lxd/db/cluster/open.go
@@ -0,0 +1,48 @@
+package cluster
+
+import (
+	"database/sql"
+	"fmt"
+	"sync/atomic"
+
+	"github.com/CanonicalLtd/go-grpc-sql"
+)
+
+// Open the cluster database object.
+//
+// The name argument is the name of the cluster database. It defaults to
+// 'db.bin', but can be overwritten for testing.
+//
+// The dialer argument is a function that returns a gRPC dialer that can be
+// used to connect to a database node using the gRPC SQL package.
+func Open(name string, dialer grpcsql.Dialer) (*sql.DB, error) {
+	driver := grpcsql.NewDriver(dialer)
+	driverName := grpcSQLDriverName()
+	sql.Register(driverName, driver)
+
+	// Create the cluster db. This won't immediately establish any gRPC
+	// connection, that will happen only when a db transaction is started
+	// (see the database/sql connection pooling code for more details).
+	if name == "" {
+		name = "db.bin"
+	}
+	db, err := sql.Open(driverName, name)
+	if err != nil {
+		return nil, fmt.Errorf("cannot open cluster database: %v", err)
+	}
+
+	return db, nil
+}
+
+// Generate a new name for the grpcsql driver registration. We need it to be
+// unique for testing, see below.
+func grpcSQLDriverName() string {
+	defer atomic.AddUint64(&grpcSQLDriverSerial, 1)
+	return fmt.Sprintf("grpc-%d", grpcSQLDriverSerial)
+}
+
+// Monotonic serial number for registering new instances of grpcsql.Driver
+// using the database/sql stdlib package. This is needed since there's no way
+// to unregister drivers, and in unit tests more than one driver gets
+// registered.
+var grpcSQLDriverSerial uint64
diff --git a/lxd/db/db.go b/lxd/db/db.go
index c43bba0f3..f1eae9653 100644
--- a/lxd/db/db.go
+++ b/lxd/db/db.go
@@ -5,8 +5,10 @@ import (
 	"fmt"
 	"time"
 
+	grpcsql "github.com/CanonicalLtd/go-grpc-sql"
 	"github.com/mattn/go-sqlite3"
 
+	"github.com/lxc/lxd/lxd/db/cluster"
 	"github.com/lxc/lxd/lxd/db/node"
 	"github.com/lxc/lxd/lxd/db/query"
 	"github.com/lxc/lxd/shared/logger"
@@ -30,7 +32,6 @@ var (
 // Node mediates access to LXD's data stored in the node-local SQLite database.
 type Node struct {
 	db *sql.DB // Handle to the node-local SQLite database file.
-
 }
 
 // OpenNode creates a new Node object.
@@ -111,6 +112,41 @@ func (n *Node) Begin() (*sql.Tx, error) {
 	return begin(n.db)
 }
 
+// Cluster mediates access to LXD's data stored in the cluster dqlite database.
+type Cluster struct {
+	db *sql.DB // Handle to the cluster dqlite database, gated behind gRPC SQL.
+}
+
+// OpenCluster creates a new Cluster object for interacting with the dqlite
+// database.
+func OpenCluster(name string, dialer grpcsql.Dialer) (*Cluster, error) {
+	db, err := cluster.Open(name, dialer)
+	if err != nil {
+		return nil, err
+	}
+	cluster := &Cluster{
+		db: db,
+	}
+	return cluster, nil
+}
+
+// Transaction creates a new ClusterTx object and transactionally executes the
+// cluster database interactions invoked by the given function. If the function
+// returns no error, all database changes are committed to the cluster database
+// database, otherwise they are rolled back.
+func (c *Cluster) Transaction(f func(*ClusterTx) error) error {
+	clusterTx := &ClusterTx{}
+	return query.Transaction(c.db, func(tx *sql.Tx) error {
+		clusterTx.tx = tx
+		return f(clusterTx)
+	})
+}
+
+// Close the database facade.
+func (c *Cluster) Close() error {
+	return c.db.Close()
+}
+
 // UpdateSchemasDotGo updates the schema.go files in the local/ and cluster/
 // sub-packages.
 func UpdateSchemasDotGo() error {
diff --git a/lxd/db/db_export_test.go b/lxd/db/db_export_test.go
new file mode 100644
index 000000000..a975c9081
--- /dev/null
+++ b/lxd/db/db_export_test.go
@@ -0,0 +1,9 @@
+package db
+
+import "database/sql"
+
+// DB returns the low level database handle to the cluster gRPC SQL database
+// handler. Used by tests for introspecing the database with raw SQL.
+func (c *Cluster) DB() *sql.DB {
+	return c.db
+}
diff --git a/lxd/db/db_test.go b/lxd/db/db_test.go
index 243d48de0..cf2eeb6df 100644
--- a/lxd/db/db_test.go
+++ b/lxd/db/db_test.go
@@ -23,3 +23,19 @@ func TestNode_Schema(t *testing.T) {
 	assert.NoError(t, rows.Scan(&n))
 	assert.Equal(t, 1, n)
 }
+
+// A gRPC SQL connection is established when starting to interact with the
+// cluster database.
+func TestCluster_Setup(t *testing.T) {
+	cluster, cleanup := db.NewTestCluster(t)
+	defer cleanup()
+
+	db := cluster.DB()
+	rows, err := db.Query("SELECT COUNT(*) FROM sqlite_master")
+	assert.NoError(t, err)
+	defer rows.Close()
+	assert.Equal(t, true, rows.Next())
+	var n uint
+	assert.NoError(t, rows.Scan(&n))
+	assert.Zero(t, n)
+}
diff --git a/lxd/db/testing.go b/lxd/db/testing.go
index 188e6f630..1cb6344d3 100644
--- a/lxd/db/testing.go
+++ b/lxd/db/testing.go
@@ -2,10 +2,16 @@ package db
 
 import (
 	"io/ioutil"
+	"net"
 	"os"
 	"testing"
+	"time"
 
+	"github.com/CanonicalLtd/go-grpc-sql"
+	"github.com/lxc/lxd/lxd/util"
+	"github.com/mattn/go-sqlite3"
 	"github.com/stretchr/testify/require"
+	"google.golang.org/grpc"
 )
 
 // NewTestNode creates a new Node for testing purposes, along with a function
@@ -43,3 +49,64 @@ func NewTestNodeTx(t *testing.T) (*NodeTx, func()) {
 
 	return nodeTx, cleanup
 }
+
+// NewTestCluster creates a new Cluster for testing purposes, along with a function
+// that can be used to clean it up when done.
+func NewTestCluster(t *testing.T) (*Cluster, func()) {
+	// Create an in-memory gRPC SQL server and dialer.
+	server, dialer := newGrpcServer()
+
+	cluster, err := OpenCluster(":memory:", dialer)
+	require.NoError(t, err)
+
+	cleanup := func() {
+		require.NoError(t, cluster.Close())
+		server.Stop()
+	}
+
+	return cluster, cleanup
+}
+
+// NewTestClusterTx returns a fresh ClusterTx object, along with a function that can
+// be called to cleanup state when done with it.
+func NewTestClusterTx(t *testing.T) (*ClusterTx, func()) {
+	cluster, clusterCleanup := NewTestCluster(t)
+
+	var err error
+
+	clusterTx := &ClusterTx{}
+	clusterTx.tx, err = cluster.db.Begin()
+	require.NoError(t, err)
+
+	cleanup := func() {
+		err := clusterTx.tx.Commit()
+		require.NoError(t, err)
+		clusterCleanup()
+	}
+
+	return clusterTx, cleanup
+}
+
+// Create a new in-memory gRPC server attached to a grpc-sql gateway backed by a
+// SQLite driver.
+//
+// Return the newly created gRPC server and a dialer that can be used to
+// connect to it.
+func newGrpcServer() (*grpc.Server, grpcsql.Dialer) {
+	listener, dial := util.InMemoryNetwork()
+	server := grpcsql.NewServer(&sqlite3.SQLiteDriver{})
+
+	// Setup an in-memory gRPC dialer.
+	options := []grpc.DialOption{
+		grpc.WithInsecure(),
+		grpc.WithDialer(func(string, time.Duration) (net.Conn, error) {
+			return dial(), nil
+		}),
+	}
+	dialer := func() (*grpc.ClientConn, error) {
+		return grpc.Dial("", options...)
+	}
+
+	go server.Serve(listener)
+	return server, dialer
+}
diff --git a/lxd/db/transaction.go b/lxd/db/transaction.go
index 4e1d89c66..de30c11f7 100644
--- a/lxd/db/transaction.go
+++ b/lxd/db/transaction.go
@@ -9,3 +9,21 @@ import "database/sql"
 type NodeTx struct {
 	tx *sql.Tx // Handle to a transaction in the node-level SQLite database.
 }
+
+// Tx returns the low level database handle to the node-local SQLite
+// transaction.
+//
+// FIXME: this is a transitional method needed for compatibility with some
+//        legacy call sites. It should be removed when there are no more
+//        consumers.
+func (n *NodeTx) Tx() *sql.Tx {
+	return n.tx
+}
+
+// ClusterTx models a single interaction with a LXD cluster database.
+//
+// It wraps low-level sql.Tx objects and offers a high-level API to fetch and
+// update data.
+type ClusterTx struct {
+	tx *sql.Tx // Handle to a transaction in the cluster dqlite database.
+}

From 7f8882a8db355ec5ee7f8a049ba635d2ec81d240 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Sat, 14 Oct 2017 11:38:10 +0000
Subject: [PATCH 006/116] Add cluster.Gateway to manage the lifecycle of the
 cluster database

This is a first version of the Gateway object, an API that the daemon
will use in order to 1) run a dqlite node (if appropriate) 2) connect
to the leader dqlite node via gRPC.

For now there's no actual dqlite plumbing in place, and all the
Gateway does is to expose an regular sqlite db over an in-memory gRPC
network (client/server).

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/cluster/gateway.go      | 103 ++++++++++++++++++++++++++++++++++++++++++++
 lxd/cluster/gateway_test.go |  40 +++++++++++++++++
 lxd/db/db.go                |  11 ++++-
 3 files changed, 152 insertions(+), 2 deletions(-)
 create mode 100644 lxd/cluster/gateway.go
 create mode 100644 lxd/cluster/gateway_test.go

diff --git a/lxd/cluster/gateway.go b/lxd/cluster/gateway.go
new file mode 100644
index 000000000..41aee225b
--- /dev/null
+++ b/lxd/cluster/gateway.go
@@ -0,0 +1,103 @@
+package cluster
+
+import (
+	"net"
+	"time"
+
+	"github.com/CanonicalLtd/go-grpc-sql"
+	"github.com/lxc/lxd/lxd/db"
+	"github.com/lxc/lxd/lxd/util"
+	"github.com/lxc/lxd/shared"
+	"github.com/mattn/go-sqlite3"
+	"google.golang.org/grpc"
+)
+
+// NewGateway creates a new Gateway for managing access to the dqlite cluster.
+//
+// When a new gateway is created, the node-level database is queried to check
+// what kind of role this node plays and if it's exposed over the network. It
+// will initialize internal data structures accordingly, for example starting a
+// dqlite driver if this node is a database node.
+//
+// After creation, the Daemon is expected to expose whatever http handlers the
+// HandlerFuncs method returns and to access the dqlite cluster using the gRPC
+// dialer returned by the Dialer method.
+func NewGateway(db *db.Node, cert *shared.CertInfo, latency float64) (*Gateway, error) {
+	gateway := &Gateway{
+		db:      db,
+		cert:    cert,
+		latency: latency,
+	}
+
+	err := gateway.init()
+	if err != nil {
+		return nil, err
+	}
+
+	return gateway, nil
+}
+
+// Gateway mediates access to the dqlite cluster using a gRPC SQL client, and
+// possibly runs a dqlite replica on this LXD node (if we're configured to do
+// so).
+type Gateway struct {
+	db      *db.Node
+	cert    *shared.CertInfo
+	latency float64
+
+	// The gRPC server exposing the dqlite driver created by this
+	// gateway. It's nil if this LXD node is not supposed to be part of the
+	// raft cluster.
+	server *grpc.Server
+
+	// A dialer that will connect to the gRPC server using an in-memory
+	// net.Conn. It's non-nil when clustering is not enabled on this LXD
+	// node, and so we don't expose any dqlite or raft network endpoint,
+	// but still we want to use dqlite as backend for the "cluster"
+	// database, to minimize the difference between code paths in
+	// clustering and non-clustering modes.
+	memoryDial func() (*grpc.ClientConn, error)
+}
+
+// Dialer returns a gRPC dial function that can be used to connect to one of
+// the dqlite nodes via gRPC.
+func (g *Gateway) Dialer() grpcsql.Dialer {
+	return func() (*grpc.ClientConn, error) {
+		// Memory connection.
+		return g.memoryDial()
+	}
+}
+
+// Shutdown this gateway, stopping the gRPC server and possibly the raft factory.
+func (g *Gateway) Shutdown() error {
+	if g.server != nil {
+		g.server.Stop()
+		// Unset the memory dial, since Shutdown() is also called for
+		// switching between in-memory and network mode.
+		g.memoryDial = nil
+	}
+	return nil
+}
+
+// Initialize the gateway, creating a new raft factory and gRPC server (if this
+// node is a database node), and a gRPC dialer.
+func (g *Gateway) init() error {
+	g.server = grpcsql.NewServer(&sqlite3.SQLiteDriver{})
+	listener, dial := util.InMemoryNetwork()
+	go g.server.Serve(listener)
+	g.memoryDial = grpcMemoryDial(dial)
+	return nil
+}
+
+// Convert a raw in-memory dial function into a gRPC one.
+func grpcMemoryDial(dial func() net.Conn) func() (*grpc.ClientConn, error) {
+	options := []grpc.DialOption{
+		grpc.WithInsecure(),
+		grpc.WithDialer(func(string, time.Duration) (net.Conn, error) {
+			return dial(), nil
+		}),
+	}
+	return func() (*grpc.ClientConn, error) {
+		return grpc.Dial("", options...)
+	}
+}
diff --git a/lxd/cluster/gateway_test.go b/lxd/cluster/gateway_test.go
new file mode 100644
index 000000000..33072e993
--- /dev/null
+++ b/lxd/cluster/gateway_test.go
@@ -0,0 +1,40 @@
+package cluster_test
+
+import (
+	"os"
+	"path/filepath"
+	"testing"
+
+	"github.com/lxc/lxd/lxd/cluster"
+	"github.com/lxc/lxd/lxd/db"
+	"github.com/lxc/lxd/shared"
+	"github.com/lxc/lxd/shared/logging"
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
+)
+
+// Basic creation and shutdown. By default, the gateway runs an in-memory gRPC
+// server.
+func TestGateway_Single(t *testing.T) {
+	db, cleanup := db.NewTestNode(t)
+	defer cleanup()
+
+	cert := shared.TestingKeyPair()
+	gateway := newGateway(t, db, cert)
+	defer gateway.Shutdown()
+
+	dialer := gateway.Dialer()
+	conn, err := dialer()
+	assert.NoError(t, err)
+	assert.NotNil(t, conn)
+}
+
+// Create a new test Gateway with the given parameters, and ensure no error
+// happens.
+func newGateway(t *testing.T, db *db.Node, certInfo *shared.CertInfo) *cluster.Gateway {
+	logging.Testing(t)
+	require.NoError(t, os.Mkdir(filepath.Join(db.Dir(), "raft"), 0755))
+	gateway, err := cluster.NewGateway(db, certInfo, 0.2)
+	require.NoError(t, err)
+	return gateway
+}
diff --git a/lxd/db/db.go b/lxd/db/db.go
index f1eae9653..9c6add273 100644
--- a/lxd/db/db.go
+++ b/lxd/db/db.go
@@ -31,7 +31,8 @@ var (
 
 // Node mediates access to LXD's data stored in the node-local SQLite database.
 type Node struct {
-	db *sql.DB // Handle to the node-local SQLite database file.
+	db  *sql.DB // Handle to the node-local SQLite database file.
+	dir string  // Reference to the directory where the database file lives.
 }
 
 // OpenNode creates a new Node object.
@@ -55,7 +56,8 @@ func OpenNode(dir string, fresh func(*Node) error, legacyPatches map[int]*Legacy
 	}
 
 	node := &Node{
-		db: db,
+		db:  db,
+		dir: dir,
 	}
 
 	if initial == 0 {
@@ -90,6 +92,11 @@ func (n *Node) DB() *sql.DB {
 	return n.db
 }
 
+// Dir returns the directory of the underlying SQLite database file.
+func (n *Node) Dir() string {
+	return n.dir
+}
+
 // Transaction creates a new NodeTx object and transactionally executes the
 // node-level database interactions invoked by the given function. If the
 // function returns no error, all database changes are committed to the

From 79e3e7df76f08de4e46027003fb309f17080f877 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Sat, 14 Oct 2017 12:01:54 +0000
Subject: [PATCH 007/116] Wire cluster.Gateway into Daemon

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/daemon.go                  | 71 +++++++++++++++++++++++++++++++-----------
 lxd/daemon_integration_test.go |  4 ++-
 lxd/main_daemon.go             |  5 ++-
 3 files changed, 58 insertions(+), 22 deletions(-)

diff --git a/lxd/daemon.go b/lxd/daemon.go
index 3a22932cb..6128bb37a 100644
--- a/lxd/daemon.go
+++ b/lxd/daemon.go
@@ -25,6 +25,7 @@ import (
 	"gopkg.in/macaroon-bakery.v2/bakery/identchecker"
 	"gopkg.in/macaroon-bakery.v2/httpbakery"
 
+	"github.com/lxc/lxd/lxd/cluster"
 	"github.com/lxc/lxd/lxd/db"
 	"github.com/lxc/lxd/lxd/endpoints"
 	"github.com/lxc/lxd/lxd/state"
@@ -43,6 +44,7 @@ type Daemon struct {
 	clientCerts  []x509.Certificate
 	os           *sys.OS
 	db           *db.Node
+	cluster      *db.Cluster
 	readyChan    chan bool
 	shutdownChan chan bool
 
@@ -56,6 +58,7 @@ type Daemon struct {
 
 	config    *DaemonConfig
 	endpoints *endpoints.Endpoints
+	gateway   *cluster.Gateway
 
 	proxy func(req *http.Request) (*url.URL, error)
 
@@ -69,7 +72,8 @@ type externalAuth struct {
 
 // DaemonConfig holds configuration values for Daemon.
 type DaemonConfig struct {
-	Group string // Group name the local unix socket should be chown'ed to
+	Group       string  // Group name the local unix socket should be chown'ed to
+	RaftLatency float64 // Coarse grain measure of the cluster latency
 }
 
 // NewDaemon returns a new Daemon object with the given configuration.
@@ -80,9 +84,16 @@ func NewDaemon(config *DaemonConfig, os *sys.OS) *Daemon {
 	}
 }
 
+// DefaultDaemonConfig returns a DaemonConfig object with default values/
+func DefaultDaemonConfig() *DaemonConfig {
+	return &DaemonConfig{
+		RaftLatency: 1.0,
+	}
+}
+
 // DefaultDaemon returns a new, un-initialized Daemon object with default values.
 func DefaultDaemon() *Daemon {
-	config := &DaemonConfig{}
+	config := DefaultDaemonConfig()
 	os := sys.DefaultOS()
 	return NewDaemon(config, os)
 }
@@ -362,6 +373,37 @@ func (d *Daemon) init() error {
 		return err
 	}
 
+	/* Setup server certificate */
+	certInfo, err := shared.KeyPairAndCA(d.os.VarDir, "server", shared.CertServer)
+	if err != nil {
+		return err
+	}
+
+	/* Setup dqlite */
+	d.gateway, err = cluster.NewGateway(d.db, certInfo, d.config.RaftLatency)
+	if err != nil {
+		return err
+	}
+
+	/* Setup some mounts (nice to have) */
+	if !d.os.MockMode {
+		// Attempt to mount the shmounts tmpfs
+		setupSharedMounts()
+
+		// Attempt to Mount the devlxd tmpfs
+		devlxd := filepath.Join(d.os.VarDir, "devlxd")
+		if !shared.IsMountPoint(devlxd) {
+			syscall.Mount("tmpfs", devlxd, "tmpfs", 0, "size=100k,mode=0755")
+		}
+	}
+
+	/* Open the cluster database */
+	clusterFilename := filepath.Join(d.os.VarDir, "db.bin")
+	d.cluster, err = db.OpenCluster(clusterFilename, d.gateway.Dialer())
+	if err != nil {
+		return err
+	}
+
 	/* Read the storage pools */
 	err = SetupStorageDriver(d.State(), false)
 	if err != nil {
@@ -396,17 +438,6 @@ func (d *Daemon) init() error {
 		daemonConfig["core.proxy_ignore_hosts"].Get(),
 	)
 
-	/* Setup some mounts (nice to have) */
-	if !d.os.MockMode {
-		// Attempt to mount the shmounts tmpfs
-		setupSharedMounts()
-
-		// Attempt to Mount the devlxd tmpfs
-		if !shared.IsMountPoint(shared.VarPath("devlxd")) {
-			syscall.Mount("tmpfs", shared.VarPath("devlxd"), "tmpfs", 0, "size=100k,mode=0755")
-		}
-	}
-
 	if !d.os.MockMode {
 		/* Start the scheduler */
 		go deviceEventListener(d.State())
@@ -419,11 +450,6 @@ func (d *Daemon) init() error {
 	}
 
 	/* Setup the web server */
-	certInfo, err := shared.KeyPairAndCA(d.os.VarDir, "server", shared.CertServer)
-	if err != nil {
-		return err
-	}
-
 	config := &endpoints.Config{
 		Dir:                  d.os.VarDir,
 		Cert:                 certInfo,
@@ -531,6 +557,15 @@ func (d *Daemon) Stop() error {
 		logger.Infof("Closing the database")
 		trackError(d.db.Close())
 	}
+	if d.cluster != nil {
+		trackError(d.cluster.Close())
+	}
+	if d.gateway != nil {
+		trackError(d.gateway.Shutdown())
+	}
+	if d.endpoints != nil {
+		trackError(d.endpoints.Down())
+	}
 
 	logger.Infof("Saving simplestreams cache")
 	trackError(imageSaveStreamCache(d.os))
diff --git a/lxd/daemon_integration_test.go b/lxd/daemon_integration_test.go
index 79e8700b3..0f689dfa5 100644
--- a/lxd/daemon_integration_test.go
+++ b/lxd/daemon_integration_test.go
@@ -55,7 +55,9 @@ func newDaemon(t *testing.T) (*Daemon, func()) {
 
 // Create a new DaemonConfig object for testing purposes.
 func newConfig() *DaemonConfig {
-	return &DaemonConfig{}
+	return &DaemonConfig{
+		RaftLatency: 0.2,
+	}
 }
 
 // Create a new sys.OS object for testing purposes.
diff --git a/lxd/main_daemon.go b/lxd/main_daemon.go
index 4b0948544..7b9d84372 100644
--- a/lxd/main_daemon.go
+++ b/lxd/main_daemon.go
@@ -38,9 +38,8 @@ func cmdDaemon(args *Args) error {
 		}
 
 	}
-	c := &DaemonConfig{
-		Group: args.Group,
-	}
+	c := DefaultDaemonConfig()
+	c.Group = args.Group
 	d := NewDaemon(c, sys.DefaultOS())
 	err = d.Init()
 	if err != nil {

From fa2b5de57deb423814a45b1864f26db2ab35608b Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Thu, 12 Oct 2017 20:12:21 +0000
Subject: [PATCH 008/116] Add V1 cluster schema

This is an initial pass at creating the first version of the cluster
database schema.

An new updateFromV0 patch has been added, which for now only creates a
single table ("nodes") for holding the list of all LXD nodes
participating to the cluster.

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/db/cluster/open.go        |  9 +++++++++
 lxd/db/cluster/schema.go      | 22 +++++++++++++++++++++
 lxd/db/cluster/update.go      | 46 +++++++++++++++++++++++++++++++++++++++++++
 lxd/db/cluster/update_test.go | 26 ++++++++++++++++++++++++
 lxd/db/db.go                  |  6 +++++-
 5 files changed, 108 insertions(+), 1 deletion(-)
 create mode 100644 lxd/db/cluster/schema.go
 create mode 100644 lxd/db/cluster/update.go
 create mode 100644 lxd/db/cluster/update_test.go

diff --git a/lxd/db/cluster/open.go b/lxd/db/cluster/open.go
index d135dea6f..bf05f8790 100644
--- a/lxd/db/cluster/open.go
+++ b/lxd/db/cluster/open.go
@@ -34,6 +34,15 @@ func Open(name string, dialer grpcsql.Dialer) (*sql.DB, error) {
 	return db, nil
 }
 
+// EnsureSchema applies all relevant schema updates to the cluster database.
+//
+// Return the initial schema version found before starting the update, along
+// with any error occurred.
+func EnsureSchema(db *sql.DB) (int, error) {
+	schema := Schema()
+	return schema.Ensure(db)
+}
+
 // Generate a new name for the grpcsql driver registration. We need it to be
 // unique for testing, see below.
 func grpcSQLDriverName() string {
diff --git a/lxd/db/cluster/schema.go b/lxd/db/cluster/schema.go
new file mode 100644
index 000000000..90a358e96
--- /dev/null
+++ b/lxd/db/cluster/schema.go
@@ -0,0 +1,22 @@
+package cluster
+
+// DO NOT EDIT BY HAND
+//
+// This code was generated by the schema.DotGo function. If you need to
+// modify the database schema, please add a new schema update to update.go
+// and the run 'make update-schema'.
+const freshSchema = `
+CREATE TABLE nodes (
+    id INTEGER PRIMARY KEY,
+    name TEXT NOT NULL,
+    description TEXT DEFAULT '',
+    address TEXT NOT NULL,
+    schema INTEGER NOT NULL,
+    api_extensions INTEGER NOT NULL,
+    heartbeat DATETIME DEFAULT CURRENT_TIMESTAMP,
+    UNIQUE (name),
+    UNIQUE (address)
+);
+
+INSERT INTO schema (version, updated_at) VALUES (1, strftime("%s"))
+`
diff --git a/lxd/db/cluster/update.go b/lxd/db/cluster/update.go
new file mode 100644
index 000000000..3d43e9b2e
--- /dev/null
+++ b/lxd/db/cluster/update.go
@@ -0,0 +1,46 @@
+package cluster
+
+import (
+	"database/sql"
+
+	"github.com/lxc/lxd/lxd/db/schema"
+)
+
+// Schema for the cluster database.
+func Schema() *schema.Schema {
+	schema := schema.NewFromMap(updates)
+	schema.Fresh(freshSchema)
+	return schema
+}
+
+// SchemaDotGo refreshes the schema.go file in this package, using the updates
+// defined here.
+func SchemaDotGo() error {
+	return schema.DotGo(updates, "schema")
+}
+
+// SchemaVersion is the current version of the cluster database schema.
+var SchemaVersion = len(updates)
+
+var updates = map[int]schema.Update{
+	1: updateFromV0,
+}
+
+func updateFromV0(tx *sql.Tx) error {
+	// v0..v1 the dawn of clustering
+	stmt := `
+CREATE TABLE nodes (
+    id INTEGER PRIMARY KEY,
+    name TEXT NOT NULL,
+    description TEXT DEFAULT '',
+    address TEXT NOT NULL,
+    schema INTEGER NOT NULL,
+    api_extensions INTEGER NOT NULL,
+    heartbeat DATETIME DEFAULT CURRENT_TIMESTAMP,
+    UNIQUE (name),
+    UNIQUE (address)
+);
+`
+	_, err := tx.Exec(stmt)
+	return err
+}
diff --git a/lxd/db/cluster/update_test.go b/lxd/db/cluster/update_test.go
new file mode 100644
index 000000000..c80a51574
--- /dev/null
+++ b/lxd/db/cluster/update_test.go
@@ -0,0 +1,26 @@
+package cluster_test
+
+import (
+	"testing"
+	"time"
+
+	"github.com/lxc/lxd/lxd/db/cluster"
+	"github.com/stretchr/testify/require"
+)
+
+func TestUpdateFromV0(t *testing.T) {
+	schema := cluster.Schema()
+	db, err := schema.ExerciseUpdate(1, nil)
+	require.NoError(t, err)
+
+	_, err = db.Exec("INSERT INTO nodes VALUES (1, 'foo', 'blah', '1.2.3.4:666', 1, 32, ?)", time.Now())
+	require.NoError(t, err)
+
+	// Unique constraint on name
+	_, err = db.Exec("INSERT INTO nodes VALUES (2, 'foo', 'gosh', '5.6.7.8:666', 5, 20, ?)", time.Now())
+	require.Error(t, err)
+
+	// Unique constraint on address
+	_, err = db.Exec("INSERT INTO nodes VALUES (3, 'bar', 'gasp', '1.2.3.4:666', 9, 11), ?)", time.Now())
+	require.Error(t, err)
+}
diff --git a/lxd/db/db.go b/lxd/db/db.go
index 9c6add273..0bc0d0e39 100644
--- a/lxd/db/db.go
+++ b/lxd/db/db.go
@@ -159,7 +159,11 @@ func (c *Cluster) Close() error {
 func UpdateSchemasDotGo() error {
 	err := node.SchemaDotGo()
 	if err != nil {
-		return fmt.Errorf("failed to update local schema.go: %v", err)
+		return fmt.Errorf("failed to update node schema.go: %v", err)
+	}
+	err = cluster.SchemaDotGo()
+	if err != nil {
+		return fmt.Errorf("failed to update cluster schema.go: %v", err)
 	}
 
 	return nil

From a17da23b5e0522fc83fa8bea6a3f5a41bbd68957 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Sat, 14 Oct 2017 12:24:49 +0000
Subject: [PATCH 009/116] Wire cluster.EnsureSchema into db.OpenCluster

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/db/db.go      | 10 +++++++++-
 lxd/db/db_test.go | 32 +++++++++++++++++++-------------
 2 files changed, 28 insertions(+), 14 deletions(-)

diff --git a/lxd/db/db.go b/lxd/db/db.go
index 0bc0d0e39..f07ced010 100644
--- a/lxd/db/db.go
+++ b/lxd/db/db.go
@@ -7,6 +7,7 @@ import (
 
 	grpcsql "github.com/CanonicalLtd/go-grpc-sql"
 	"github.com/mattn/go-sqlite3"
+	"github.com/pkg/errors"
 
 	"github.com/lxc/lxd/lxd/db/cluster"
 	"github.com/lxc/lxd/lxd/db/node"
@@ -129,11 +130,18 @@ type Cluster struct {
 func OpenCluster(name string, dialer grpcsql.Dialer) (*Cluster, error) {
 	db, err := cluster.Open(name, dialer)
 	if err != nil {
-		return nil, err
+		return nil, errors.Wrap(err, "failed to open database")
+	}
+
+	_, err = cluster.EnsureSchema(db)
+	if err != nil {
+		return nil, errors.Wrap(err, "failed to ensure schema")
 	}
+
 	cluster := &Cluster{
 		db: db,
 	}
+
 	return cluster, nil
 }
 
diff --git a/lxd/db/db_test.go b/lxd/db/db_test.go
index cf2eeb6df..33b27c003 100644
--- a/lxd/db/db_test.go
+++ b/lxd/db/db_test.go
@@ -4,7 +4,9 @@ import (
 	"testing"
 
 	"github.com/lxc/lxd/lxd/db"
+	"github.com/lxc/lxd/lxd/db/query"
 	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
 )
 
 // Node database objects automatically initialize their schema as needed.
@@ -15,13 +17,14 @@ func TestNode_Schema(t *testing.T) {
 	// The underlying node-level database has exactly one row in the schema
 	// table.
 	db := node.DB()
-	rows, err := db.Query("SELECT COUNT(*) FROM schema")
-	assert.NoError(t, err)
-	defer rows.Close()
-	assert.Equal(t, true, rows.Next())
-	var n int
-	assert.NoError(t, rows.Scan(&n))
+	tx, err := db.Begin()
+	require.NoError(t, err)
+	n, err := query.Count(tx, "schema", "")
+	require.NoError(t, err)
 	assert.Equal(t, 1, n)
+
+	assert.NoError(t, tx.Commit())
+	assert.NoError(t, db.Close())
 }
 
 // A gRPC SQL connection is established when starting to interact with the
@@ -30,12 +33,15 @@ func TestCluster_Setup(t *testing.T) {
 	cluster, cleanup := db.NewTestCluster(t)
 	defer cleanup()
 
+	// The underlying node-level database has exactly one row in the schema
+	// table.
 	db := cluster.DB()
-	rows, err := db.Query("SELECT COUNT(*) FROM sqlite_master")
-	assert.NoError(t, err)
-	defer rows.Close()
-	assert.Equal(t, true, rows.Next())
-	var n uint
-	assert.NoError(t, rows.Scan(&n))
-	assert.Zero(t, n)
+	tx, err := db.Begin()
+	require.NoError(t, err)
+	n, err := query.Count(tx, "schema", "")
+	require.NoError(t, err)
+	assert.Equal(t, 1, n)
+
+	assert.NoError(t, tx.Commit())
+	assert.NoError(t, db.Close())
 }

From 3cb751fed78c49cfc88bf3f3d2637a8df999bf70 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Fri, 13 Oct 2017 10:21:56 +0000
Subject: [PATCH 010/116] Check the versions of other nodes in
 cluster.EnsureSchema

Modify cluster.EnsureSchema to also check that all other nodes in the
cluster have a schema version and an API extensions count that match
the ones of the node.

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/daemon.go                        |   9 +-
 lxd/db/cluster/open.go               | 124 +++++++++++++++++++++++-
 lxd/db/cluster/open_test.go          | 180 +++++++++++++++++++++++++++++++++++
 lxd/db/cluster/query.go              |  50 ++++++++++
 lxd/db/cluster/schema_export_test.go |   3 +
 lxd/db/db.go                         |  16 +++-
 lxd/db/testing.go                    |   2 +-
 7 files changed, 374 insertions(+), 10 deletions(-)
 create mode 100644 lxd/db/cluster/open_test.go
 create mode 100644 lxd/db/cluster/query.go
 create mode 100644 lxd/db/cluster/schema_export_test.go

diff --git a/lxd/daemon.go b/lxd/daemon.go
index 6128bb37a..ce870ce20 100644
--- a/lxd/daemon.go
+++ b/lxd/daemon.go
@@ -19,6 +19,7 @@ import (
 	"github.com/gorilla/mux"
 	"github.com/juju/idmclient"
 	_ "github.com/mattn/go-sqlite3"
+	"github.com/pkg/errors"
 	"golang.org/x/net/context"
 	"gopkg.in/macaroon-bakery.v2/bakery"
 	"gopkg.in/macaroon-bakery.v2/bakery/checkers"
@@ -397,11 +398,13 @@ func (d *Daemon) init() error {
 		}
 	}
 
+	address := daemonConfig["core.https_address"].Get()
+
 	/* Open the cluster database */
 	clusterFilename := filepath.Join(d.os.VarDir, "db.bin")
-	d.cluster, err = db.OpenCluster(clusterFilename, d.gateway.Dialer())
+	d.cluster, err = db.OpenCluster(clusterFilename, d.gateway.Dialer(), address)
 	if err != nil {
-		return err
+		return errors.Wrap(err, "failed to open cluster database")
 	}
 
 	/* Read the storage pools */
@@ -456,7 +459,7 @@ func (d *Daemon) init() error {
 		RestServer:           RestServer(d),
 		DevLxdServer:         DevLxdServer(d),
 		LocalUnixSocketGroup: d.config.Group,
-		NetworkAddress:       daemonConfig["core.https_address"].Get(),
+		NetworkAddress:       address,
 	}
 	d.endpoints, err = endpoints.Up(config)
 	if err != nil {
diff --git a/lxd/db/cluster/open.go b/lxd/db/cluster/open.go
index bf05f8790..f9b3139e7 100644
--- a/lxd/db/cluster/open.go
+++ b/lxd/db/cluster/open.go
@@ -6,6 +6,9 @@ import (
 	"sync/atomic"
 
 	"github.com/CanonicalLtd/go-grpc-sql"
+	"github.com/lxc/lxd/lxd/db/schema"
+	"github.com/lxc/lxd/shared/version"
+	"github.com/pkg/errors"
 )
 
 // Open the cluster database object.
@@ -36,11 +39,58 @@ func Open(name string, dialer grpcsql.Dialer) (*sql.DB, error) {
 
 // EnsureSchema applies all relevant schema updates to the cluster database.
 //
-// Return the initial schema version found before starting the update, along
-// with any error occurred.
-func EnsureSchema(db *sql.DB) (int, error) {
+// Before actually doing anything, this function will make sure that all nodes
+// in the cluster have a schema version and a number of API extensions that
+// match our one. If it's not the case, we either return an error (if some
+// nodes have version greater than us and we need to be upgraded), or return
+// false and no error (if some nodes have a lower version, and we need to wait
+// till they get upgraded and restarted).
+func EnsureSchema(db *sql.DB, address string) (bool, error) {
+	someNodesAreBehind := false
+	apiExtensions := len(version.APIExtensions)
+
+	check := func(current int, tx *sql.Tx) error {
+		// If we're bootstrapping a fresh schema, skip any check, since
+		// it's safe to assume we are the only node.
+		if current == 0 {
+			return nil
+		}
+
+		// Check if we're clustered
+		n, err := selectNodesCount(tx)
+		if err != nil {
+			return errors.Wrap(err, "failed to fetch current nodes count")
+		}
+		if n == 0 {
+			return nil // Nothing to do.
+		}
+
+		// Update the schema and api_extension columns of ourselves.
+		err = updateNodeVersion(tx, address, apiExtensions)
+		if err != nil {
+			return errors.Wrap(err, "failed to update node version")
+
+		}
+
+		err = checkClusterIsUpgradable(tx, [2]int{len(updates), apiExtensions})
+		if err == errSomeNodesAreBehind {
+			someNodesAreBehind = true
+			return schema.ErrGracefulAbort
+		}
+		return err
+	}
+
 	schema := Schema()
-	return schema.Ensure(db)
+	schema.Check(check)
+
+	_, err := schema.Ensure(db)
+	if someNodesAreBehind {
+		return false, nil
+	}
+	if err != nil {
+		return false, err
+	}
+	return true, err
 }
 
 // Generate a new name for the grpcsql driver registration. We need it to be
@@ -55,3 +105,69 @@ func grpcSQLDriverName() string {
 // to unregister drivers, and in unit tests more than one driver gets
 // registered.
 var grpcSQLDriverSerial uint64
+
+func checkClusterIsUpgradable(tx *sql.Tx, target [2]int) error {
+	// Get the current versions in the nodes table.
+	versions, err := selectNodesVersions(tx)
+	if err != nil {
+		return errors.Wrap(err, "failed to fetch current nodes versions")
+	}
+
+	for _, version := range versions {
+		n, err := compareVersions(target, version)
+		if err != nil {
+			return err
+		}
+		switch n {
+		case 0:
+			// Versions are equal, there's hope for the
+			// update. Let's check the next node.
+			continue
+		case 1:
+			// Our version is bigger, we should stop here
+			// and wait for other nodes to be upgraded and
+			// restarted.
+			return errSomeNodesAreBehind
+		case 2:
+			// Another node has a version greater than ours
+			// and presumeably is waiting for other nodes
+			// to upgrade. Let's error out and shutdown
+			// since we need a greater version.
+			return fmt.Errorf("this node's version is behind, please upgrade")
+		default:
+			// Sanity.
+			panic("unexpected return value from compareVersions")
+		}
+	}
+	return nil
+}
+
+// Compare two nodes versions.
+//
+// A version consists of the version the node's schema and the number of API
+// extensions it supports.
+//
+// Return 0 if they equal, 1 if the first version is greater than the second
+// and 2 if the second is greater than the first.
+//
+// Return an error if inconsistent versions are detected, for example the first
+// node's schema is greater than the second's, but the number of extensions is
+// smaller.
+func compareVersions(version1, version2 [2]int) (int, error) {
+	schema1, extensions1 := version1[0], version1[1]
+	schema2, extensions2 := version2[0], version2[1]
+
+	if schema1 == schema2 && extensions1 == extensions2 {
+		return 0, nil
+	}
+	if schema1 >= schema2 && extensions1 >= extensions2 {
+		return 1, nil
+	}
+	if schema1 <= schema2 && extensions1 <= extensions2 {
+		return 2, nil
+	}
+
+	return -1, fmt.Errorf("nodes have inconsistent versions")
+}
+
+var errSomeNodesAreBehind = fmt.Errorf("some nodes are behind this node's version")
diff --git a/lxd/db/cluster/open_test.go b/lxd/db/cluster/open_test.go
new file mode 100644
index 000000000..f858d7b35
--- /dev/null
+++ b/lxd/db/cluster/open_test.go
@@ -0,0 +1,180 @@
+package cluster_test
+
+import (
+	"database/sql"
+	"fmt"
+	"testing"
+
+	"github.com/lxc/lxd/lxd/db/cluster"
+	"github.com/lxc/lxd/lxd/db/query"
+	"github.com/lxc/lxd/shared/version"
+	"github.com/mpvl/subtest"
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
+)
+
+// If the node is not clustered, the schema updates works normally.
+func TestEnsureSchema_NoClustered(t *testing.T) {
+	db := newDB(t)
+	ready, err := cluster.EnsureSchema(db, "1.2.3.4:666")
+	assert.True(t, ready)
+	assert.NoError(t, err)
+}
+
+// Exercise EnsureSchema failures when the cluster can't be upgraded right now.
+func TestEnsureSchema_ClusterNotUpgradable(t *testing.T) {
+	schema := cluster.SchemaVersion
+	apiExtensions := len(version.APIExtensions)
+
+	cases := []struct {
+		title string
+		setup func(*testing.T, *sql.DB)
+		ready bool
+		error string
+	}{
+		{
+			`a node's schema version is behind`,
+			func(t *testing.T, db *sql.DB) {
+				addNode(t, db, "1", schema, apiExtensions)
+				addNode(t, db, "2", schema-1, apiExtensions)
+			},
+			false, // The schema was not updated
+			"",    // No error is returned
+		},
+		{
+			`a node's number of API extensions is behind`,
+			func(t *testing.T, db *sql.DB) {
+				addNode(t, db, "1", schema, apiExtensions)
+				addNode(t, db, "2", schema, apiExtensions-1)
+			},
+			false, // The schema was not updated
+			"",    // No error is returned
+		},
+		{
+			`this node's schema is behind`,
+			func(t *testing.T, db *sql.DB) {
+				addNode(t, db, "1", schema, apiExtensions)
+				addNode(t, db, "2", schema+1, apiExtensions)
+			},
+			false,
+			"this node's version is behind, please upgrade",
+		},
+		{
+			`this node's number of API extensions is behind`,
+			func(t *testing.T, db *sql.DB) {
+				addNode(t, db, "1", schema, apiExtensions)
+				addNode(t, db, "2", schema, apiExtensions+1)
+			},
+			false,
+			"this node's version is behind, please upgrade",
+		},
+		{
+			`inconsistent schema version and API extensions number`,
+			func(t *testing.T, db *sql.DB) {
+				addNode(t, db, "1", schema, apiExtensions)
+				addNode(t, db, "2", schema+1, apiExtensions-1)
+			},
+			false,
+			"nodes have inconsistent versions",
+		},
+	}
+	for _, c := range cases {
+		subtest.Run(t, c.title, func(t *testing.T) {
+			db := newDB(t)
+			c.setup(t, db)
+			ready, err := cluster.EnsureSchema(db, "1")
+			assert.Equal(t, c.ready, ready)
+			if c.error == "" {
+				assert.NoError(t, err)
+			} else {
+				assert.EqualError(t, err, c.error)
+			}
+		})
+	}
+}
+
+// Regardless of whether the schema could actually be upgraded or not, the
+// version of this node gets updated.
+func TestEnsureSchema_UpdateNodeVersion(t *testing.T) {
+	schema := cluster.SchemaVersion
+	apiExtensions := len(version.APIExtensions)
+
+	cases := []struct {
+		setup func(*testing.T, *sql.DB)
+		ready bool
+	}{
+		{
+			func(t *testing.T, db *sql.DB) {},
+			true,
+		},
+		{
+			func(t *testing.T, db *sql.DB) {
+				// Add a node which is behind.
+				addNode(t, db, "2", schema, apiExtensions-1)
+			},
+			true,
+		},
+	}
+	for _, c := range cases {
+		subtest.Run(t, fmt.Sprintf("%v", c.ready), func(t *testing.T) {
+			db := newDB(t)
+
+			// Add ourselves with an older schema version and API
+			// extensions number.
+			addNode(t, db, "1", schema-1, apiExtensions-1)
+
+			// Ensure the schema.
+			ready, err := cluster.EnsureSchema(db, "1")
+			assert.NoError(t, err)
+			assert.Equal(t, c.ready, ready)
+
+			// Check that the nodes table was updated with our new
+			// schema version and API extensions number.
+			assertNode(t, db, "1", schema, apiExtensions)
+		})
+	}
+}
+
+// Create a new in-memory SQLite database with a fresh cluster schema.
+func newDB(t *testing.T) *sql.DB {
+	db, err := sql.Open("sqlite3", ":memory:")
+	require.NoError(t, err)
+
+	createTableSchema := `
+CREATE TABLE schema (
+    id         INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    version    INTEGER NOT NULL,
+    updated_at DATETIME NOT NULL,
+    UNIQUE (version)
+);
+`
+	_, err = db.Exec(createTableSchema + cluster.FreshSchema)
+	require.NoError(t, err)
+
+	return db
+}
+
+// Add a new node with the given address, schema version and number of api extensions.
+func addNode(t *testing.T, db *sql.DB, address string, schema int, apiExtensions int) {
+	err := query.Transaction(db, func(tx *sql.Tx) error {
+		stmt := `
+INSERT INTO nodes(name, address, schema, api_extensions) VALUES (?, ?, ?, ?)
+`
+		name := fmt.Sprintf("node at %s", address)
+		_, err := tx.Exec(stmt, name, address, schema, apiExtensions)
+		return err
+	})
+	require.NoError(t, err)
+}
+
+// Assert that the node with the given address has the given schema version and API
+// extensions number.
+func assertNode(t *testing.T, db *sql.DB, address string, schema int, apiExtensions int) {
+	err := query.Transaction(db, func(tx *sql.Tx) error {
+		where := "address=? AND schema=? AND api_extensions=?"
+		n, err := query.Count(tx, "nodes", where, address, schema, apiExtensions)
+		assert.Equal(t, 1, n, "node does not have expected version")
+		return err
+	})
+	require.NoError(t, err)
+}
diff --git a/lxd/db/cluster/query.go b/lxd/db/cluster/query.go
new file mode 100644
index 000000000..286ffe2db
--- /dev/null
+++ b/lxd/db/cluster/query.go
@@ -0,0 +1,50 @@
+package cluster
+
+import (
+	"database/sql"
+	"fmt"
+
+	"github.com/lxc/lxd/lxd/db/query"
+)
+
+// Update the schema and api_extensions columns of the row in the nodes table
+// that matches the given id.
+//
+// If not such row is found, an error is returned.
+func updateNodeVersion(tx *sql.Tx, address string, apiExtensions int) error {
+	stmt := "UPDATE nodes SET schema=?, api_extensions=? WHERE address=?"
+	result, err := tx.Exec(stmt, len(updates), apiExtensions, address)
+	if err != nil {
+		return err
+	}
+	n, err := result.RowsAffected()
+	if err != nil {
+		return err
+	}
+	if n != 1 {
+		return fmt.Errorf("updated %d rows instead of 1", n)
+	}
+	return nil
+}
+
+// Return the number of rows in the nodes table.
+func selectNodesCount(tx *sql.Tx) (int, error) {
+	return query.Count(tx, "nodes", "")
+}
+
+// Return a slice of binary integer tuples. Each tuple contains the schema
+// version and number of api extensions of a node in the cluster.
+func selectNodesVersions(tx *sql.Tx) ([][2]int, error) {
+	versions := [][2]int{}
+
+	dest := func(i int) []interface{} {
+		versions = append(versions, [2]int{})
+		return []interface{}{&versions[i][0], &versions[i][1]}
+	}
+
+	err := query.SelectObjects(tx, dest, "SELECT schema, api_extensions FROM nodes")
+	if err != nil {
+		return nil, err
+	}
+	return versions, nil
+}
diff --git a/lxd/db/cluster/schema_export_test.go b/lxd/db/cluster/schema_export_test.go
new file mode 100644
index 000000000..d2041016a
--- /dev/null
+++ b/lxd/db/cluster/schema_export_test.go
@@ -0,0 +1,3 @@
+package cluster
+
+var FreshSchema = freshSchema
diff --git a/lxd/db/db.go b/lxd/db/db.go
index f07ced010..6b4a49b6d 100644
--- a/lxd/db/db.go
+++ b/lxd/db/db.go
@@ -28,6 +28,8 @@ var (
 	 * already do.
 	 */
 	NoSuchObjectError = fmt.Errorf("No such object")
+
+	Upgrading = fmt.Errorf("The cluster database is upgrading")
 )
 
 // Node mediates access to LXD's data stored in the node-local SQLite database.
@@ -127,13 +129,23 @@ type Cluster struct {
 
 // OpenCluster creates a new Cluster object for interacting with the dqlite
 // database.
-func OpenCluster(name string, dialer grpcsql.Dialer) (*Cluster, error) {
+//
+// - name: Basename of the database file holding the data. Typically "db.bin".
+// - dialer: Function used to connect to the dqlite backend via gRPC SQL.
+// - address: Network address of this node (or empty string).
+// - api: Number of API extensions that this node supports.
+//
+// The address and api parameters will be used to determine if the cluster
+// database matches our version, and possibly trigger a schema update. If the
+// schema update can't be performed right now, because some nodes are still
+// behind, an Upgrading error is returned.
+func OpenCluster(name string, dialer grpcsql.Dialer, address string) (*Cluster, error) {
 	db, err := cluster.Open(name, dialer)
 	if err != nil {
 		return nil, errors.Wrap(err, "failed to open database")
 	}
 
-	_, err = cluster.EnsureSchema(db)
+	_, err = cluster.EnsureSchema(db, address)
 	if err != nil {
 		return nil, errors.Wrap(err, "failed to ensure schema")
 	}
diff --git a/lxd/db/testing.go b/lxd/db/testing.go
index 1cb6344d3..65c5ddcae 100644
--- a/lxd/db/testing.go
+++ b/lxd/db/testing.go
@@ -56,7 +56,7 @@ func NewTestCluster(t *testing.T) (*Cluster, func()) {
 	// Create an in-memory gRPC SQL server and dialer.
 	server, dialer := newGrpcServer()
 
-	cluster, err := OpenCluster(":memory:", dialer)
+	cluster, err := OpenCluster(":memory:", dialer, "1")
 	require.NoError(t, err)
 
 	cleanup := func() {

From 0479b1cb0d9a56e32c1e0186f578fd251070a6f2 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Thu, 12 Oct 2017 16:18:14 +0000
Subject: [PATCH 011/116] Rename State.DB to State.Node and add State.Cluster

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/container.go             | 34 +++++++++++++++++-----------------
 lxd/container_lxc.go         | 18 +++++++++---------
 lxd/containers.go            |  8 ++++----
 lxd/containers_get.go        |  2 +-
 lxd/daemon.go                |  2 +-
 lxd/devices.go               |  8 ++++----
 lxd/logging.go               |  2 +-
 lxd/networks.go              |  8 ++++----
 lxd/networks_utils.go        |  4 ++--
 lxd/profiles.go              |  6 +++---
 lxd/state/state.go           | 12 +++++++-----
 lxd/storage.go               | 28 ++++++++++++++--------------
 lxd/storage_ceph.go          |  2 +-
 lxd/storage_lvm_utils.go     |  6 +++---
 lxd/storage_pools_utils.go   | 10 +++++-----
 lxd/storage_volumes_utils.go | 16 ++++++++--------
 16 files changed, 84 insertions(+), 82 deletions(-)

diff --git a/lxd/container.go b/lxd/container.go
index 9b9d89258..fb509df76 100644
--- a/lxd/container.go
+++ b/lxd/container.go
@@ -573,7 +573,7 @@ func containerCreateEmptySnapshot(s *state.State, args db.ContainerArgs) (contai
 	// Now create the empty snapshot
 	err = c.Storage().ContainerSnapshotCreateEmpty(c)
 	if err != nil {
-		s.DB.ContainerRemove(args.Name)
+		s.Node.ContainerRemove(args.Name)
 		return nil, err
 	}
 
@@ -582,7 +582,7 @@ func containerCreateEmptySnapshot(s *state.State, args db.ContainerArgs) (contai
 
 func containerCreateFromImage(s *state.State, args db.ContainerArgs, hash string) (container, error) {
 	// Get the image properties
-	_, img, err := s.DB.ImageGet(hash, false, false)
+	_, img, err := s.Node.ImageGet(hash, false, false)
 	if err != nil {
 		return nil, err
 	}
@@ -603,16 +603,16 @@ func containerCreateFromImage(s *state.State, args db.ContainerArgs, hash string
 		return nil, err
 	}
 
-	err = s.DB.ImageLastAccessUpdate(hash, time.Now().UTC())
+	err = s.Node.ImageLastAccessUpdate(hash, time.Now().UTC())
 	if err != nil {
-		s.DB.ContainerRemove(args.Name)
+		s.Node.ContainerRemove(args.Name)
 		return nil, fmt.Errorf("Error updating image last use date: %s", err)
 	}
 
 	// Now create the storage from an image
 	err = c.Storage().ContainerCreateFromImage(c, hash)
 	if err != nil {
-		s.DB.ContainerRemove(args.Name)
+		s.Node.ContainerRemove(args.Name)
 		return nil, err
 	}
 
@@ -637,7 +637,7 @@ func containerCreateAsCopy(s *state.State, args db.ContainerArgs, sourceContaine
 	if !containerOnly {
 		snapshots, err := sourceContainer.Snapshots()
 		if err != nil {
-			s.DB.ContainerRemove(args.Name)
+			s.Node.ContainerRemove(args.Name)
 			return nil, err
 		}
 
@@ -669,9 +669,9 @@ func containerCreateAsCopy(s *state.State, args db.ContainerArgs, sourceContaine
 	err = ct.Storage().ContainerCopy(ct, sourceContainer, containerOnly)
 	if err != nil {
 		for _, v := range csList {
-			s.DB.ContainerRemove((*v).Name())
+			s.Node.ContainerRemove((*v).Name())
 		}
-		s.DB.ContainerRemove(args.Name)
+		s.Node.ContainerRemove(args.Name)
 		return nil, err
 	}
 
@@ -750,7 +750,7 @@ func containerCreateAsSnapshot(s *state.State, args db.ContainerArgs, sourceCont
 	// Clone the container
 	err = sourceContainer.Storage().ContainerSnapshotCreate(c, sourceContainer)
 	if err != nil {
-		s.DB.ContainerRemove(args.Name)
+		s.Node.ContainerRemove(args.Name)
 		return nil, err
 	}
 
@@ -813,7 +813,7 @@ func containerCreateInternal(s *state.State, args db.ContainerArgs) (container,
 	}
 
 	// Validate container devices
-	err = containerValidDevices(s.DB, args.Devices, false, false)
+	err = containerValidDevices(s.Node, args.Devices, false, false)
 	if err != nil {
 		return nil, err
 	}
@@ -829,7 +829,7 @@ func containerCreateInternal(s *state.State, args db.ContainerArgs) (container,
 	}
 
 	// Validate profiles
-	profiles, err := s.DB.Profiles()
+	profiles, err := s.Node.Profiles()
 	if err != nil {
 		return nil, err
 	}
@@ -841,7 +841,7 @@ func containerCreateInternal(s *state.State, args db.ContainerArgs) (container,
 	}
 
 	// Create the container entry
-	id, err := s.DB.ContainerCreate(args)
+	id, err := s.Node.ContainerCreate(args)
 	if err != nil {
 		if err == db.DbErrAlreadyDefined {
 			thing := "Container"
@@ -859,9 +859,9 @@ func containerCreateInternal(s *state.State, args db.ContainerArgs) (container,
 	args.Id = id
 
 	// Read the timestamp from the database
-	dbArgs, err := s.DB.ContainerGet(args.Name)
+	dbArgs, err := s.Node.ContainerGet(args.Name)
 	if err != nil {
-		s.DB.ContainerRemove(args.Name)
+		s.Node.ContainerRemove(args.Name)
 		return nil, err
 	}
 	args.CreationDate = dbArgs.CreationDate
@@ -870,7 +870,7 @@ func containerCreateInternal(s *state.State, args db.ContainerArgs) (container,
 	// Setup the container struct and finish creation (storage and idmap)
 	c, err := containerLXCCreate(s, args)
 	if err != nil {
-		s.DB.ContainerRemove(args.Name)
+		s.Node.ContainerRemove(args.Name)
 		return nil, err
 	}
 
@@ -925,7 +925,7 @@ func containerConfigureInternal(c container) error {
 
 func containerLoadById(s *state.State, id int) (container, error) {
 	// Get the DB record
-	name, err := s.DB.ContainerName(id)
+	name, err := s.Node.ContainerName(id)
 	if err != nil {
 		return nil, err
 	}
@@ -935,7 +935,7 @@ func containerLoadById(s *state.State, id int) (container, error) {
 
 func containerLoadByName(s *state.State, name string) (container, error) {
 	// Get the DB record
-	args, err := s.DB.ContainerGet(name)
+	args, err := s.Node.ContainerGet(name)
 	if err != nil {
 		return nil, err
 	}
diff --git a/lxd/container_lxc.go b/lxd/container_lxc.go
index 5851f36a7..7ec0dc567 100644
--- a/lxd/container_lxc.go
+++ b/lxd/container_lxc.go
@@ -274,7 +274,7 @@ func containerLXCCreate(s *state.State, args db.ContainerArgs) (container, error
 	// Create the container struct
 	c := &containerLXC{
 		state:        s,
-		db:           s.DB,
+		db:           s.Node,
 		id:           args.Id,
 		name:         args.Name,
 		description:  args.Description,
@@ -310,7 +310,7 @@ func containerLXCCreate(s *state.State, args db.ContainerArgs) (container, error
 		return nil, err
 	}
 
-	err = containerValidDevices(s.DB, c.expandedDevices, false, true)
+	err = containerValidDevices(s.Node, c.expandedDevices, false, true)
 	if err != nil {
 		c.Delete()
 		logger.Error("Failed creating container", ctxMap)
@@ -332,7 +332,7 @@ func containerLXCCreate(s *state.State, args db.ContainerArgs) (container, error
 	storagePool := rootDiskDevice["pool"]
 
 	// Get the storage pool ID for the container
-	poolID, pool, err := s.DB.StoragePoolGet(storagePool)
+	poolID, pool, err := s.Node.StoragePoolGet(storagePool)
 	if err != nil {
 		c.Delete()
 		return nil, err
@@ -346,7 +346,7 @@ func containerLXCCreate(s *state.State, args db.ContainerArgs) (container, error
 	}
 
 	// Create a new database entry for the container's storage volume
-	_, err = s.DB.StoragePoolVolumeCreate(args.Name, "", storagePoolVolumeTypeContainer, poolID, volumeConfig)
+	_, err = s.Node.StoragePoolVolumeCreate(args.Name, "", storagePoolVolumeTypeContainer, poolID, volumeConfig)
 	if err != nil {
 		c.Delete()
 		return nil, err
@@ -356,7 +356,7 @@ func containerLXCCreate(s *state.State, args db.ContainerArgs) (container, error
 	cStorage, err := storagePoolVolumeContainerCreateInit(s, storagePool, args.Name)
 	if err != nil {
 		c.Delete()
-		s.DB.StoragePoolVolumeDelete(args.Name, storagePoolVolumeTypeContainer, poolID)
+		s.Node.StoragePoolVolumeDelete(args.Name, storagePoolVolumeTypeContainer, poolID)
 		logger.Error("Failed to initialize container storage", ctxMap)
 		return nil, err
 	}
@@ -442,7 +442,7 @@ func containerLXCLoad(s *state.State, args db.ContainerArgs) (container, error)
 	// Create the container struct
 	c := &containerLXC{
 		state:        s,
-		db:           s.DB,
+		db:           s.Node,
 		id:           args.Id,
 		name:         args.Name,
 		description:  args.Description,
@@ -728,7 +728,7 @@ func findIdmap(state *state.State, cName string, isolatedStr string, configBase
 	idmapLock.Lock()
 	defer idmapLock.Unlock()
 
-	cs, err := state.DB.ContainersList(db.CTypeRegular)
+	cs, err := state.Node.ContainersList(db.CTypeRegular)
 	if err != nil {
 		return nil, 0, err
 	}
@@ -3350,12 +3350,12 @@ func writeBackupFile(c container) error {
 	}
 
 	s := c.DaemonState()
-	poolID, pool, err := s.DB.StoragePoolGet(poolName)
+	poolID, pool, err := s.Node.StoragePoolGet(poolName)
 	if err != nil {
 		return err
 	}
 
-	_, volume, err := s.DB.StoragePoolVolumeGetType(c.Name(), storagePoolVolumeTypeContainer, poolID)
+	_, volume, err := s.Node.StoragePoolVolumeGetType(c.Name(), storagePoolVolumeTypeContainer, poolID)
 	if err != nil {
 		return err
 	}
diff --git a/lxd/containers.go b/lxd/containers.go
index 0ab650cd1..e95d579ed 100644
--- a/lxd/containers.go
+++ b/lxd/containers.go
@@ -106,7 +106,7 @@ func (slice containerAutostartList) Swap(i, j int) {
 
 func containersRestart(s *state.State) error {
 	// Get all the containers
-	result, err := s.DB.ContainersList(db.CTypeRegular)
+	result, err := s.Node.ContainersList(db.CTypeRegular)
 	if err != nil {
 		return err
 	}
@@ -153,13 +153,13 @@ func containersShutdown(s *state.State) error {
 	var wg sync.WaitGroup
 
 	// Get all the containers
-	results, err := s.DB.ContainersList(db.CTypeRegular)
+	results, err := s.Node.ContainersList(db.CTypeRegular)
 	if err != nil {
 		return err
 	}
 
 	// Reset all container states
-	err = s.DB.ContainersResetState()
+	err = s.Node.ContainersResetState()
 	if err != nil {
 		return err
 	}
@@ -207,7 +207,7 @@ func containerDeleteSnapshots(s *state.State, cname string) error {
 	logger.Debug("containerDeleteSnapshots",
 		log.Ctx{"container": cname})
 
-	results, err := s.DB.ContainerGetSnapshots(cname)
+	results, err := s.Node.ContainerGetSnapshots(cname)
 	if err != nil {
 		return err
 	}
diff --git a/lxd/containers_get.go b/lxd/containers_get.go
index b86dbb336..9ae37928b 100644
--- a/lxd/containers_get.go
+++ b/lxd/containers_get.go
@@ -34,7 +34,7 @@ func containersGet(d *Daemon, r *http.Request) Response {
 }
 
 func doContainersGet(s *state.State, recursion bool) (interface{}, error) {
-	result, err := s.DB.ContainersList(db.CTypeRegular)
+	result, err := s.Node.ContainersList(db.CTypeRegular)
 	if err != nil {
 		return nil, err
 	}
diff --git a/lxd/daemon.go b/lxd/daemon.go
index ce870ce20..170192abd 100644
--- a/lxd/daemon.go
+++ b/lxd/daemon.go
@@ -184,7 +184,7 @@ func isJSONRequest(r *http.Request) bool {
 
 // State creates a new State instance liked to our internal db and os.
 func (d *Daemon) State() *state.State {
-	return state.NewState(d.db, d.os)
+	return state.NewState(d.db, d.cluster, d.os)
 }
 
 // UnixSocket returns the full path to the unix.socket file that this daemon is
diff --git a/lxd/devices.go b/lxd/devices.go
index f29b2df24..917b69422 100644
--- a/lxd/devices.go
+++ b/lxd/devices.go
@@ -604,7 +604,7 @@ func deviceTaskBalance(s *state.State) {
 	}
 
 	// Iterate through the containers
-	containers, err := s.DB.ContainersList(db.CTypeRegular)
+	containers, err := s.Node.ContainersList(db.CTypeRegular)
 	if err != nil {
 		logger.Error("problem loading containers list", log.Ctx{"err": err})
 		return
@@ -730,7 +730,7 @@ func deviceNetworkPriority(s *state.State, netif string) {
 		return
 	}
 
-	containers, err := s.DB.ContainersList(db.CTypeRegular)
+	containers, err := s.Node.ContainersList(db.CTypeRegular)
 	if err != nil {
 		return
 	}
@@ -761,7 +761,7 @@ func deviceNetworkPriority(s *state.State, netif string) {
 }
 
 func deviceUSBEvent(s *state.State, usb usbDevice) {
-	containers, err := s.DB.ContainersList(db.CTypeRegular)
+	containers, err := s.Node.ContainersList(db.CTypeRegular)
 	if err != nil {
 		logger.Error("problem loading containers list", log.Ctx{"err": err})
 		return
@@ -847,7 +847,7 @@ func deviceEventListener(s *state.State) {
 
 			logger.Debugf("Scheduler: network: %s has been added: updating network priorities", e[0])
 			deviceNetworkPriority(s, e[0])
-			networkAutoAttach(s.DB, e[0])
+			networkAutoAttach(s.Node, e[0])
 		case e := <-chUSB:
 			deviceUSBEvent(s, e)
 		case e := <-deviceSchedRebalance:
diff --git a/lxd/logging.go b/lxd/logging.go
index 6587149cd..8a0856f13 100644
--- a/lxd/logging.go
+++ b/lxd/logging.go
@@ -41,7 +41,7 @@ func expireLogs(ctx context.Context, state *state.State) error {
 	var containers []string
 	ch := make(chan struct{})
 	go func() {
-		containers, err = state.DB.ContainersList(db.CTypeRegular)
+		containers, err = state.Node.ContainersList(db.CTypeRegular)
 		ch <- struct{}{}
 	}()
 	select {
diff --git a/lxd/networks.go b/lxd/networks.go
index 7e78acea2..76234965d 100644
--- a/lxd/networks.go
+++ b/lxd/networks.go
@@ -392,19 +392,19 @@ var networkCmd = Command{name: "networks/{name}", get: networkGet, delete: netwo
 
 // The network structs and functions
 func networkLoadByName(s *state.State, name string) (*network, error) {
-	id, dbInfo, err := s.DB.NetworkGet(name)
+	id, dbInfo, err := s.Node.NetworkGet(name)
 	if err != nil {
 		return nil, err
 	}
 
-	n := network{db: s.DB, state: s, id: id, name: name, description: dbInfo.Description, config: dbInfo.Config}
+	n := network{db: s.Node, state: s, id: id, name: name, description: dbInfo.Description, config: dbInfo.Config}
 
 	return &n, nil
 }
 
 func networkStartup(s *state.State) error {
 	// Get a list of managed networks
-	networks, err := s.DB.Networks()
+	networks, err := s.Node.Networks()
 	if err != nil {
 		return err
 	}
@@ -428,7 +428,7 @@ func networkStartup(s *state.State) error {
 
 func networkShutdown(s *state.State) error {
 	// Get a list of managed networks
-	networks, err := s.DB.Networks()
+	networks, err := s.Node.Networks()
 	if err != nil {
 		return err
 	}
diff --git a/lxd/networks_utils.go b/lxd/networks_utils.go
index 20b9b9024..d10b4b00e 100644
--- a/lxd/networks_utils.go
+++ b/lxd/networks_utils.go
@@ -744,7 +744,7 @@ func networkUpdateStatic(s *state.State, networkName string) error {
 	defer networkStaticLock.Unlock()
 
 	// Get all the containers
-	containers, err := s.DB.ContainersList(db.CTypeRegular)
+	containers, err := s.Node.ContainersList(db.CTypeRegular)
 	if err != nil {
 		return err
 	}
@@ -753,7 +753,7 @@ func networkUpdateStatic(s *state.State, networkName string) error {
 	var networks []string
 	if networkName == "" {
 		var err error
-		networks, err = s.DB.Networks()
+		networks, err = s.Node.Networks()
 		if err != nil {
 			return err
 		}
diff --git a/lxd/profiles.go b/lxd/profiles.go
index 6b36e2203..e92a08034 100644
--- a/lxd/profiles.go
+++ b/lxd/profiles.go
@@ -105,12 +105,12 @@ var profilesCmd = Command{
 	post: profilesPost}
 
 func doProfileGet(s *state.State, name string) (*api.Profile, error) {
-	_, profile, err := s.DB.ProfileGet(name)
+	_, profile, err := s.Node.ProfileGet(name)
 	if err != nil {
 		return nil, err
 	}
 
-	cts, err := s.DB.ProfileContainersGet(name)
+	cts, err := s.Node.ProfileContainersGet(name)
 	if err != nil {
 		return nil, err
 	}
@@ -139,7 +139,7 @@ func profileGet(d *Daemon, r *http.Request) Response {
 func getContainersWithProfile(s *state.State, profile string) []container {
 	results := []container{}
 
-	output, err := s.DB.ProfileContainersGet(profile)
+	output, err := s.Node.ProfileContainersGet(profile)
 	if err != nil {
 		return results
 	}
diff --git a/lxd/state/state.go b/lxd/state/state.go
index 62b0afd72..dc49a823b 100644
--- a/lxd/state/state.go
+++ b/lxd/state/state.go
@@ -9,15 +9,17 @@ import (
 // and the operating system. It's typically used by model entities such as
 // containers, volumes, etc. in order to perform changes.
 type State struct {
-	DB *db.Node
-	OS *sys.OS
+	Node    *db.Node
+	Cluster *db.Cluster
+	OS      *sys.OS
 }
 
 // NewState returns a new State object with the given database and operating
 // system components.
-func NewState(db *db.Node, os *sys.OS) *State {
+func NewState(node *db.Node, cluster *db.Cluster, os *sys.OS) *State {
 	return &State{
-		DB: db,
-		OS: os,
+		Node:    node,
+		Cluster: cluster,
+		OS:      os,
 	}
 }
diff --git a/lxd/storage.go b/lxd/storage.go
index aaf581c0a..582bd6403 100644
--- a/lxd/storage.go
+++ b/lxd/storage.go
@@ -284,7 +284,7 @@ func storageCoreInit(driver string) (storage, error) {
 
 func storageInit(s *state.State, poolName string, volumeName string, volumeType int) (storage, error) {
 	// Load the storage pool.
-	poolID, pool, err := s.DB.StoragePoolGet(poolName)
+	poolID, pool, err := s.Node.StoragePoolGet(poolName)
 	if err != nil {
 		return nil, err
 	}
@@ -299,7 +299,7 @@ func storageInit(s *state.State, poolName string, volumeName string, volumeType
 	// Load the storage volume.
 	volume := &api.StorageVolume{}
 	if volumeName != "" && volumeType >= 0 {
-		_, volume, err = s.DB.StoragePoolVolumeGetType(volumeName, volumeType, poolID)
+		_, volume, err = s.Node.StoragePoolVolumeGetType(volumeName, volumeType, poolID)
 		if err != nil {
 			return nil, err
 		}
@@ -317,7 +317,7 @@ func storageInit(s *state.State, poolName string, volumeName string, volumeType
 		btrfs.pool = pool
 		btrfs.volume = volume
 		btrfs.s = s
-		btrfs.db = s.DB
+		btrfs.db = s.Node
 		err = btrfs.StoragePoolInit()
 		if err != nil {
 			return nil, err
@@ -329,7 +329,7 @@ func storageInit(s *state.State, poolName string, volumeName string, volumeType
 		dir.pool = pool
 		dir.volume = volume
 		dir.s = s
-		dir.db = s.DB
+		dir.db = s.Node
 		err = dir.StoragePoolInit()
 		if err != nil {
 			return nil, err
@@ -341,7 +341,7 @@ func storageInit(s *state.State, poolName string, volumeName string, volumeType
 		ceph.pool = pool
 		ceph.volume = volume
 		ceph.s = s
-		ceph.db = s.DB
+		ceph.db = s.Node
 		err = ceph.StoragePoolInit()
 		if err != nil {
 			return nil, err
@@ -353,7 +353,7 @@ func storageInit(s *state.State, poolName string, volumeName string, volumeType
 		lvm.pool = pool
 		lvm.volume = volume
 		lvm.s = s
-		lvm.db = s.DB
+		lvm.db = s.Node
 		err = lvm.StoragePoolInit()
 		if err != nil {
 			return nil, err
@@ -365,7 +365,7 @@ func storageInit(s *state.State, poolName string, volumeName string, volumeType
 		mock.pool = pool
 		mock.volume = volume
 		mock.s = s
-		mock.db = s.DB
+		mock.db = s.Node
 		err = mock.StoragePoolInit()
 		if err != nil {
 			return nil, err
@@ -377,7 +377,7 @@ func storageInit(s *state.State, poolName string, volumeName string, volumeType
 		zfs.pool = pool
 		zfs.volume = volume
 		zfs.s = s
-		zfs.db = s.DB
+		zfs.db = s.Node
 		err = zfs.StoragePoolInit()
 		if err != nil {
 			return nil, err
@@ -518,11 +518,11 @@ func storagePoolVolumeAttachInit(s *state.State, poolName string, volumeName str
 
 	st.SetStoragePoolVolumeWritable(&poolVolumePut)
 
-	poolID, err := s.DB.StoragePoolGetID(poolName)
+	poolID, err := s.Node.StoragePoolGetID(poolName)
 	if err != nil {
 		return nil, err
 	}
-	err = s.DB.StoragePoolVolumeUpdate(volumeName, volumeType, poolID, poolVolumePut.Description, poolVolumePut.Config)
+	err = s.Node.StoragePoolVolumeUpdate(volumeName, volumeType, poolID, poolVolumePut.Description, poolVolumePut.Config)
 	if err != nil {
 		return nil, err
 	}
@@ -545,7 +545,7 @@ func storagePoolVolumeContainerCreateInit(s *state.State, poolName string, conta
 
 func storagePoolVolumeContainerLoadInit(s *state.State, containerName string) (storage, error) {
 	// Get the storage pool of a given container.
-	poolName, err := s.DB.ContainerPool(containerName)
+	poolName, err := s.Node.ContainerPool(containerName)
 	if err != nil {
 		return nil, err
 	}
@@ -811,7 +811,7 @@ func StorageProgressWriter(op *operation, key string, description string) func(i
 }
 
 func SetupStorageDriver(s *state.State, forceCheck bool) error {
-	pools, err := s.DB.StoragePools()
+	pools, err := s.Node.StoragePools()
 	if err != nil {
 		if err == db.NoSuchObjectError {
 			logger.Debugf("No existing storage pools detected.")
@@ -828,7 +828,7 @@ func SetupStorageDriver(s *state.State, forceCheck bool) error {
 	// but the upgrade somehow got messed up then there will be no
 	// "storage_api" entry in the db.
 	if len(pools) > 0 && !forceCheck {
-		appliedPatches, err := s.DB.Patches()
+		appliedPatches, err := s.Node.Patches()
 		if err != nil {
 			return err
 		}
@@ -864,7 +864,7 @@ func SetupStorageDriver(s *state.State, forceCheck bool) error {
 	// appropriate. (Should be cheaper then querying the db all the time,
 	// especially if we keep adding more storage drivers.)
 	if !storagePoolDriversCacheInitialized {
-		tmp, err := s.DB.StoragePoolsGetDrivers()
+		tmp, err := s.Node.StoragePoolsGetDrivers()
 		if err != nil && err != db.NoSuchObjectError {
 			return nil
 		}
diff --git a/lxd/storage_ceph.go b/lxd/storage_ceph.go
index 1af3cc410..0c6c7f0ba 100644
--- a/lxd/storage_ceph.go
+++ b/lxd/storage_ceph.go
@@ -972,7 +972,7 @@ func (s *storageCeph) ContainerCreateFromImage(container container, fingerprint
 			fingerprint, storagePoolVolumeTypeNameImage, s.UserName)
 
 		if ok {
-			_, volume, err := s.s.DB.StoragePoolVolumeGetType(fingerprint, db.StoragePoolVolumeTypeImage, s.poolID)
+			_, volume, err := s.s.Node.StoragePoolVolumeGetType(fingerprint, db.StoragePoolVolumeTypeImage, s.poolID)
 			if err != nil {
 				return err
 			}
diff --git a/lxd/storage_lvm_utils.go b/lxd/storage_lvm_utils.go
index f9f3f1340..261e457b9 100644
--- a/lxd/storage_lvm_utils.go
+++ b/lxd/storage_lvm_utils.go
@@ -497,7 +497,7 @@ func (s *storageLvm) containerCreateFromImageThinLv(c container, fp string) erro
 		var imgerr error
 		ok, _ := storageLVExists(imageLvmDevPath)
 		if ok {
-			_, volume, err := s.s.DB.StoragePoolVolumeGetType(fp, db.StoragePoolVolumeTypeImage, s.poolID)
+			_, volume, err := s.s.Node.StoragePoolVolumeGetType(fp, db.StoragePoolVolumeTypeImage, s.poolID)
 			if err != nil {
 				return err
 			}
@@ -684,7 +684,7 @@ func storageLVMThinpoolExists(vgName string, poolName string) (bool, error) {
 func storageLVMGetThinPoolUsers(s *state.State) ([]string, error) {
 	results := []string{}
 
-	cNames, err := s.DB.ContainersList(db.CTypeRegular)
+	cNames, err := s.Node.ContainersList(db.CTypeRegular)
 	if err != nil {
 		return results, err
 	}
@@ -702,7 +702,7 @@ func storageLVMGetThinPoolUsers(s *state.State) ([]string, error) {
 		}
 	}
 
-	imageNames, err := s.DB.ImagesGet(false)
+	imageNames, err := s.Node.ImagesGet(false)
 	if err != nil {
 		return results, err
 	}
diff --git a/lxd/storage_pools_utils.go b/lxd/storage_pools_utils.go
index 1059d3765..849100675 100644
--- a/lxd/storage_pools_utils.go
+++ b/lxd/storage_pools_utils.go
@@ -62,7 +62,7 @@ func storagePoolUpdate(state *state.State, name, newDescription string, newConfi
 
 	// Update the database if something changed
 	if len(changedConfig) != 0 || newDescription != oldDescription {
-		err = state.DB.StoragePoolUpdate(name, newDescription, newConfig)
+		err = state.Node.StoragePoolUpdate(name, newDescription, newConfig)
 		if err != nil {
 			return err
 		}
@@ -164,7 +164,7 @@ func storagePoolDBCreate(s *state.State, poolName, poolDescription string, drive
 	}
 
 	// Check that the storage pool does not already exist.
-	_, err = s.DB.StoragePoolGetID(poolName)
+	_, err = s.Node.StoragePoolGetID(poolName)
 	if err == nil {
 		return fmt.Errorf("The storage pool already exists")
 	}
@@ -187,7 +187,7 @@ func storagePoolDBCreate(s *state.State, poolName, poolDescription string, drive
 	}
 
 	// Create the database entry for the storage pool.
-	_, err = dbStoragePoolCreateAndUpdateCache(s.DB, poolName, poolDescription, driver, config)
+	_, err = dbStoragePoolCreateAndUpdateCache(s.Node, poolName, poolDescription, driver, config)
 	if err != nil {
 		return fmt.Errorf("Error inserting %s into database: %s", poolName, err)
 	}
@@ -209,7 +209,7 @@ func storagePoolCreateInternal(state *state.State, poolName, poolDescription str
 		if !tryUndo {
 			return
 		}
-		dbStoragePoolDeleteAndUpdateCache(state.DB, poolName)
+		dbStoragePoolDeleteAndUpdateCache(state.Node, poolName)
 	}()
 
 	s, err := storagePoolInit(state, poolName)
@@ -238,7 +238,7 @@ func storagePoolCreateInternal(state *state.State, poolName, poolDescription str
 	configDiff, _ := storageConfigDiff(config, postCreateConfig)
 	if len(configDiff) > 0 {
 		// Create the database entry for the storage pool.
-		err = state.DB.StoragePoolUpdate(poolName, poolDescription, postCreateConfig)
+		err = state.Node.StoragePoolUpdate(poolName, poolDescription, postCreateConfig)
 		if err != nil {
 			return fmt.Errorf("Error inserting %s into database: %s", poolName, err)
 		}
diff --git a/lxd/storage_volumes_utils.go b/lxd/storage_volumes_utils.go
index 7e690e60b..c79b1e461 100644
--- a/lxd/storage_volumes_utils.go
+++ b/lxd/storage_volumes_utils.go
@@ -151,14 +151,14 @@ func storagePoolVolumeUpdate(state *state.State, poolName string, volumeName str
 		s.SetStoragePoolVolumeWritable(&newWritable)
 	}
 
-	poolID, err := state.DB.StoragePoolGetID(poolName)
+	poolID, err := state.Node.StoragePoolGetID(poolName)
 	if err != nil {
 		return err
 	}
 
 	// Update the database if something changed
 	if len(changedConfig) != 0 || newDescription != oldDescription {
-		err = state.DB.StoragePoolVolumeUpdate(volumeName, volumeType, poolID, newDescription, newConfig)
+		err = state.Node.StoragePoolVolumeUpdate(volumeName, volumeType, poolID, newDescription, newConfig)
 		if err != nil {
 			return err
 		}
@@ -172,7 +172,7 @@ func storagePoolVolumeUpdate(state *state.State, poolName string, volumeName str
 
 func storagePoolVolumeUsedByContainersGet(s *state.State, volumeName string,
 	volumeTypeName string) ([]string, error) {
-	cts, err := s.DB.ContainersList(db.CTypeRegular)
+	cts, err := s.Node.ContainersList(db.CTypeRegular)
 	if err != nil {
 		return []string{}, err
 	}
@@ -233,7 +233,7 @@ func storagePoolVolumeUsedByGet(s *state.State, volumeName string, volumeTypeNam
 			fmt.Sprintf("/%s/containers/%s", version.APIVersion, ct))
 	}
 
-	profiles, err := profilesUsingPoolVolumeGetNames(s.DB, volumeName, volumeTypeName)
+	profiles, err := profilesUsingPoolVolumeGetNames(s.Node, volumeName, volumeTypeName)
 	if err != nil {
 		return []string{}, err
 	}
@@ -302,14 +302,14 @@ func storagePoolVolumeDBCreate(s *state.State, poolName string, volumeName, volu
 	}
 
 	// Load storage pool the volume will be attached to.
-	poolID, poolStruct, err := s.DB.StoragePoolGet(poolName)
+	poolID, poolStruct, err := s.Node.StoragePoolGet(poolName)
 	if err != nil {
 		return err
 	}
 
 	// Check that a storage volume of the same storage volume type does not
 	// already exist.
-	volumeID, _ := s.DB.StoragePoolVolumeGetTypeID(volumeName, volumeType, poolID)
+	volumeID, _ := s.Node.StoragePoolVolumeGetTypeID(volumeName, volumeType, poolID)
 	if volumeID > 0 {
 		return fmt.Errorf("a storage volume of type %s does already exist", volumeTypeName)
 	}
@@ -331,7 +331,7 @@ func storagePoolVolumeDBCreate(s *state.State, poolName string, volumeName, volu
 	}
 
 	// Create the database entry for the storage volume.
-	_, err = s.DB.StoragePoolVolumeCreate(volumeName, volumeDescription, volumeType, poolID, volumeConfig)
+	_, err = s.Node.StoragePoolVolumeCreate(volumeName, volumeDescription, volumeType, poolID, volumeConfig)
 	if err != nil {
 		return fmt.Errorf("Error inserting %s of type %s into database: %s", poolName, volumeTypeName, err)
 	}
@@ -361,7 +361,7 @@ func storagePoolVolumeCreateInternal(state *state.State, poolName string, volume
 	// Create storage volume.
 	err = s.StoragePoolVolumeCreate()
 	if err != nil {
-		state.DB.StoragePoolVolumeDelete(volumeName, volumeType, poolID)
+		state.Node.StoragePoolVolumeDelete(volumeName, volumeType, poolID)
 		return err
 	}
 

From ed39c211d43cab5a15f399dd9a68c650ca197c90 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Thu, 12 Oct 2017 16:52:25 +0000
Subject: [PATCH 012/116] Add testing facilities for state.State and sys.OS

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/state/testing.go | 29 +++++++++++++++++++++++++++++
 lxd/sys/testing.go   | 28 ++++++++++++++++++++++++++++
 2 files changed, 57 insertions(+)
 create mode 100644 lxd/state/testing.go
 create mode 100644 lxd/sys/testing.go

diff --git a/lxd/state/testing.go b/lxd/state/testing.go
new file mode 100644
index 000000000..f49cebd09
--- /dev/null
+++ b/lxd/state/testing.go
@@ -0,0 +1,29 @@
+package state
+
+import (
+	"testing"
+
+	"github.com/lxc/lxd/lxd/db"
+	"github.com/lxc/lxd/lxd/sys"
+)
+
+// NewTestState returns a State object initialized with testable instances of
+// the node/cluster databases and of the OS facade.
+//
+// Return the newly created State object, along with a function that can be
+// used for cleaning it up.
+func NewTestState(t *testing.T) (*State, func()) {
+	node, nodeCleanup := db.NewTestNode(t)
+	cluster, clusterCleanup := db.NewTestCluster(t)
+	os, osCleanup := sys.NewTestOS(t)
+
+	cleanup := func() {
+		nodeCleanup()
+		clusterCleanup()
+		osCleanup()
+	}
+
+	state := NewState(node, cluster, os)
+
+	return state, cleanup
+}
diff --git a/lxd/sys/testing.go b/lxd/sys/testing.go
new file mode 100644
index 000000000..b0bb8a42a
--- /dev/null
+++ b/lxd/sys/testing.go
@@ -0,0 +1,28 @@
+package sys
+
+import (
+	"io/ioutil"
+	"os"
+	"path/filepath"
+	"testing"
+
+	"github.com/stretchr/testify/require"
+)
+
+// NewTestOS returns a new OS instance initialized with test values.
+func NewTestOS(t *testing.T) (*OS, func()) {
+	dir, err := ioutil.TempDir("", "lxd-sys-os-test-")
+	require.NoError(t, err)
+
+	cleanup := func() {
+		require.NoError(t, os.RemoveAll(dir))
+	}
+
+	os := &OS{
+		VarDir:   dir,
+		CacheDir: filepath.Join(dir, "cache"),
+		LogDir:   filepath.Join(dir, "log"),
+	}
+
+	return os, cleanup
+}

From 2d42e41d772ca77dbc003c527b4db6a2beea87b3 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Fri, 15 Sep 2017 07:24:43 +0000
Subject: [PATCH 013/116] Add db APIs to read and update the raft_nodes table

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/db/query/slices.go |  12 ++---
 lxd/db/raft.go         | 114 ++++++++++++++++++++++++++++++++++++++++++
 lxd/db/raft_test.go    | 133 +++++++++++++++++++++++++++++++++++++++++++++++++
 3 files changed, 253 insertions(+), 6 deletions(-)
 create mode 100644 lxd/db/raft.go
 create mode 100644 lxd/db/raft_test.go

diff --git a/lxd/db/query/slices.go b/lxd/db/query/slices.go
index 59d0cc892..6cd9a7934 100644
--- a/lxd/db/query/slices.go
+++ b/lxd/db/query/slices.go
@@ -8,7 +8,7 @@ import (
 
 // SelectStrings executes a statement which must yield rows with a single string
 // column. It returns the list of column values.
-func SelectStrings(tx *sql.Tx, query string) ([]string, error) {
+func SelectStrings(tx *sql.Tx, query string, args ...interface{}) ([]string, error) {
 	values := []string{}
 	scan := func(rows *sql.Rows) error {
 		var value string
@@ -20,7 +20,7 @@ func SelectStrings(tx *sql.Tx, query string) ([]string, error) {
 		return nil
 	}
 
-	err := scanSingleColumn(tx, query, "TEXT", scan)
+	err := scanSingleColumn(tx, query, args, "TEXT", scan)
 	if err != nil {
 		return nil, err
 	}
@@ -30,7 +30,7 @@ func SelectStrings(tx *sql.Tx, query string) ([]string, error) {
 
 // SelectIntegers executes a statement which must yield rows with a single integer
 // column. It returns the list of column values.
-func SelectIntegers(tx *sql.Tx, query string) ([]int, error) {
+func SelectIntegers(tx *sql.Tx, query string, args ...interface{}) ([]int, error) {
 	values := []int{}
 	scan := func(rows *sql.Rows) error {
 		var value int
@@ -42,7 +42,7 @@ func SelectIntegers(tx *sql.Tx, query string) ([]int, error) {
 		return nil
 	}
 
-	err := scanSingleColumn(tx, query, "INTEGER", scan)
+	err := scanSingleColumn(tx, query, args, "INTEGER", scan)
 	if err != nil {
 		return nil, err
 	}
@@ -76,8 +76,8 @@ func InsertStrings(tx *sql.Tx, stmt string, values []string) error {
 // Execute the given query and ensure that it yields rows with a single column
 // of the given database type. For every row yielded, execute the given
 // scanner.
-func scanSingleColumn(tx *sql.Tx, query string, typeName string, scan scanFunc) error {
-	rows, err := tx.Query(query)
+func scanSingleColumn(tx *sql.Tx, query string, args []interface{}, typeName string, scan scanFunc) error {
+	rows, err := tx.Query(query, args...)
 	if err != nil {
 		return err
 	}
diff --git a/lxd/db/raft.go b/lxd/db/raft.go
new file mode 100644
index 000000000..40d6b29cb
--- /dev/null
+++ b/lxd/db/raft.go
@@ -0,0 +1,114 @@
+package db
+
+import (
+	"fmt"
+
+	"github.com/lxc/lxd/lxd/db/query"
+	"github.com/pkg/errors"
+)
+
+// RaftNode holds information about a single node in the dqlite raft cluster.
+type RaftNode struct {
+	ID      int64  // Stable node identifier
+	Address string // Network address of the node
+}
+
+// RaftNodes returns information about all LXD nodes that are members of the
+// dqlite Raft cluster (possibly including the local node). If this LXD
+// instance is not running in clustered mode, an empty list is returned.
+func (n *NodeTx) RaftNodes() ([]RaftNode, error) {
+	nodes := []RaftNode{}
+	dest := func(i int) []interface{} {
+		nodes = append(nodes, RaftNode{})
+		return []interface{}{&nodes[i].ID, &nodes[i].Address}
+	}
+	err := query.SelectObjects(n.tx, dest, "SELECT id, address FROM raft_nodes ORDER BY id")
+	if err != nil {
+		return nil, errors.Wrap(err, "failed to fecth raft nodes")
+	}
+	return nodes, nil
+}
+
+// RaftNodeAddresses returns the addresses of all LXD nodes that are members of
+// the dqlite Raft cluster (possibly including the local node). If this LXD
+// instance is not running in clustered mode, an empty list is returned.
+func (n *NodeTx) RaftNodeAddresses() ([]string, error) {
+	return query.SelectStrings(n.tx, "SELECT address FROM raft_nodes")
+}
+
+// RaftNodeAddress returns the address of the LXD raft node with the given ID,
+// if any matching row exists.
+func (n *NodeTx) RaftNodeAddress(id int64) (string, error) {
+	stmt := "SELECT address FROM raft_nodes WHERE id=?"
+	addresses, err := query.SelectStrings(n.tx, stmt, id)
+	if err != nil {
+		return "", err
+	}
+	switch len(addresses) {
+	case 0:
+		return "", NoSuchObjectError
+	case 1:
+		return addresses[0], nil
+	default:
+		// This should never happen since we have a UNIQUE constraint
+		// on the raft_nodes.id column.
+		return "", fmt.Errorf("more than one match found")
+	}
+}
+
+// RaftNodeFirst adds a the first node if the cluster. It ensures that the
+// database ID is 1, to match the server ID of first raft log entry.
+//
+// This method is supposed to be called when there are no rows in raft_nodes,
+// and it will replace whatever existing row has ID 1.
+func (n *NodeTx) RaftNodeFirst(address string) error {
+	columns := []string{"id", "address"}
+	values := []interface{}{int64(1), address}
+	id, err := query.UpsertObject(n.tx, "raft_nodes", columns, values)
+	if err != nil {
+		return err
+	}
+	if id != 1 {
+		return fmt.Errorf("could not set raft node ID to 1")
+	}
+	return nil
+}
+
+// RaftNodeAdd adds a node to the current list of LXD nodes that are part of the
+// dqlite Raft cluster. It returns the ID of the newly inserted row.
+func (n *NodeTx) RaftNodeAdd(address string) (int64, error) {
+	columns := []string{"address"}
+	values := []interface{}{address}
+	return query.UpsertObject(n.tx, "raft_nodes", columns, values)
+}
+
+// RaftNodeDelete removes a node from the current list of LXD nodes that are
+// part of the dqlite Raft cluster.
+func (n *NodeTx) RaftNodeDelete(id int64) error {
+	deleted, err := query.DeleteObject(n.tx, "raft_nodes", id)
+	if err != nil {
+		return err
+	}
+	if !deleted {
+		return NoSuchObjectError
+	}
+	return nil
+}
+
+// RaftNodesReplace replaces the current list of raft nodes.
+func (n *NodeTx) RaftNodesReplace(nodes []RaftNode) error {
+	_, err := n.tx.Exec("DELETE FROM raft_nodes")
+	if err != nil {
+		return err
+	}
+
+	columns := []string{"id", "address"}
+	for _, node := range nodes {
+		values := []interface{}{node.ID, node.Address}
+		_, err := query.UpsertObject(n.tx, "raft_nodes", columns, values)
+		if err != nil {
+			return err
+		}
+	}
+	return nil
+}
diff --git a/lxd/db/raft_test.go b/lxd/db/raft_test.go
new file mode 100644
index 000000000..dd74b8237
--- /dev/null
+++ b/lxd/db/raft_test.go
@@ -0,0 +1,133 @@
+package db_test
+
+import (
+	"testing"
+
+	"github.com/lxc/lxd/lxd/db"
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
+)
+
+// Fetch all raft nodes.
+func TestRaftNodes(t *testing.T) {
+	tx, cleanup := db.NewTestNodeTx(t)
+	defer cleanup()
+
+	id1, err := tx.RaftNodeAdd("1.2.3.4:666")
+	require.NoError(t, err)
+
+	id2, err := tx.RaftNodeAdd("5.6.7.8:666")
+	require.NoError(t, err)
+
+	nodes, err := tx.RaftNodes()
+	require.NoError(t, err)
+
+	assert.Equal(t, id1, nodes[0].ID)
+	assert.Equal(t, id2, nodes[1].ID)
+	assert.Equal(t, "1.2.3.4:666", nodes[0].Address)
+	assert.Equal(t, "5.6.7.8:666", nodes[1].Address)
+}
+
+// Fetch the addresses of all raft nodes.
+func TestRaftNodeAddresses(t *testing.T) {
+	tx, cleanup := db.NewTestNodeTx(t)
+	defer cleanup()
+
+	_, err := tx.RaftNodeAdd("1.2.3.4:666")
+	require.NoError(t, err)
+
+	_, err = tx.RaftNodeAdd("5.6.7.8:666")
+	require.NoError(t, err)
+
+	addresses, err := tx.RaftNodeAddresses()
+	require.NoError(t, err)
+
+	assert.Equal(t, []string{"1.2.3.4:666", "5.6.7.8:666"}, addresses)
+}
+
+// Fetch the address of the raft node with the given ID.
+func TestRaftNodeAddress(t *testing.T) {
+	tx, cleanup := db.NewTestNodeTx(t)
+	defer cleanup()
+
+	_, err := tx.RaftNodeAdd("1.2.3.4:666")
+	require.NoError(t, err)
+
+	id, err := tx.RaftNodeAdd("5.6.7.8:666")
+	require.NoError(t, err)
+
+	address, err := tx.RaftNodeAddress(id)
+	require.NoError(t, err)
+	assert.Equal(t, "5.6.7.8:666", address)
+}
+
+// Add the first raft node.
+func TestRaftNodeFirst(t *testing.T) {
+	tx, cleanup := db.NewTestNodeTx(t)
+	defer cleanup()
+
+	err := tx.RaftNodeFirst("1.2.3.4:666")
+	assert.NoError(t, err)
+
+	err = tx.RaftNodeDelete(1)
+	assert.NoError(t, err)
+
+	err = tx.RaftNodeFirst("5.6.7.8:666")
+	assert.NoError(t, err)
+
+	address, err := tx.RaftNodeAddress(1)
+	require.NoError(t, err)
+	assert.Equal(t, "5.6.7.8:666", address)
+}
+
+// Add a new raft node.
+func TestRaftNodeAdd(t *testing.T) {
+	tx, cleanup := db.NewTestNodeTx(t)
+	defer cleanup()
+
+	id, err := tx.RaftNodeAdd("1.2.3.4:666")
+	assert.Equal(t, int64(1), id)
+	assert.NoError(t, err)
+}
+
+// Delete an existing raft node.
+func TestRaftNodeDelete(t *testing.T) {
+	tx, cleanup := db.NewTestNodeTx(t)
+	defer cleanup()
+
+	id, err := tx.RaftNodeAdd("1.2.3.4:666")
+	require.NoError(t, err)
+
+	err = tx.RaftNodeDelete(id)
+	assert.NoError(t, err)
+}
+
+// Delete a non-existing raft node returns an error.
+func TestRaftNodeDelete_NonExisting(t *testing.T) {
+	tx, cleanup := db.NewTestNodeTx(t)
+	defer cleanup()
+
+	err := tx.RaftNodeDelete(1)
+	assert.Equal(t, db.NoSuchObjectError, err)
+}
+
+// Replace all existing raft nodes.
+func TestRaftNodesReplace(t *testing.T) {
+	tx, cleanup := db.NewTestNodeTx(t)
+	defer cleanup()
+
+	_, err := tx.RaftNodeAdd("1.2.3.4:666")
+	require.NoError(t, err)
+
+	nodes := []db.RaftNode{
+		{ID: 2, Address: "2.2.2.2:666"},
+		{ID: 3, Address: "3.3.3.3:666"},
+	}
+	err = tx.RaftNodesReplace(nodes)
+	assert.NoError(t, err)
+
+	newNodes, err := tx.RaftNodes()
+	require.NoError(t, err)
+
+	assert.Equal(t, nodes, newNodes)
+}

From c1bab88ea953606844ffb602aa480f93c95076cb Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Wed, 11 Oct 2017 13:34:20 +0000
Subject: [PATCH 014/116] Add node.DetermineRole function to figure what role a
 node plays

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/node/raft.go      | 60 +++++++++++++++++++++++++++++++++++++++
 lxd/node/raft_test.go | 77 +++++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 137 insertions(+)
 create mode 100644 lxd/node/raft.go
 create mode 100644 lxd/node/raft_test.go

diff --git a/lxd/node/raft.go b/lxd/node/raft.go
new file mode 100644
index 000000000..8b4605356
--- /dev/null
+++ b/lxd/node/raft.go
@@ -0,0 +1,60 @@
+package node
+
+import "github.com/lxc/lxd/lxd/db"
+
+// DetermineRaftNode figures out what raft node ID and address we have, if any.
+//
+// This decision is based on the values of the core.https_address config key
+// and on the rows in the raft_nodes table, both stored in the node-level
+// SQLite database.
+//
+// The following rules are applied:
+//
+// - If no core.https_address config key is set, this is a non-clustered node
+//   and the returned RaftNode will have ID 1 but no address, to signal that
+//   the node should setup an in-memory raft cluster where the node itself
+//   is the only member and leader.
+//
+// - If core.https_address config key is set, but there is no row in the
+//   raft_nodes table, this is a non-clustered node as well, and same behavior
+//   as the previous case applies.
+//
+// - If core.https_address config key is set and there is at least one row in
+//   the raft_nodes table, then this node is considered a raft node if
+//   core.https_address matches one of the rows in raft_nodes. In that case,
+//   the matching db.RaftNode row is returned, otherwise nil.
+func DetermineRaftNode(tx *db.NodeTx) (*db.RaftNode, error) {
+	config, err := ConfigLoad(tx)
+	if err != nil {
+		return nil, err
+	}
+
+	address := config.HTTPSAddress()
+
+	// If core.https_address is the empty string, then this LXD instance is
+	// not running in clustering mode.
+	if address == "" {
+		return &db.RaftNode{ID: 1}, nil
+	}
+
+	nodes, err := tx.RaftNodes()
+	if err != nil {
+		return nil, err
+	}
+
+	// If core.https_address is set, but raft_nodes has no rows, this is
+	// still an instance not running in clustering mode.
+	if len(nodes) == 0 {
+		return &db.RaftNode{ID: 1}, nil
+	}
+
+	// If there is one or more row in raft_nodes, try to find a matching
+	// one.
+	for _, node := range nodes {
+		if node.Address == address {
+			return &node, nil
+		}
+	}
+
+	return nil, nil
+}
diff --git a/lxd/node/raft_test.go b/lxd/node/raft_test.go
new file mode 100644
index 000000000..b376bdc3f
--- /dev/null
+++ b/lxd/node/raft_test.go
@@ -0,0 +1,77 @@
+package node_test
+
+import (
+	"testing"
+
+	"github.com/lxc/lxd/lxd/db"
+	"github.com/lxc/lxd/lxd/node"
+	"github.com/mpvl/subtest"
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
+)
+
+// The raft identity (ID and address) of a node depends on the value of
+// core.https_address and the entries of the raft_nodes table.
+func TestDetermineRaftNode(t *testing.T) {
+	cases := []struct {
+		title     string
+		address   string       // Value of core.https_address
+		addresses []string     // Entries in raft_nodes
+		node      *db.RaftNode // Expected node value
+	}{
+		{
+			`no core.https_address set`,
+			"",
+			[]string{},
+			&db.RaftNode{ID: 1},
+		},
+		{
+			`core.https_address set and and no raft_nodes rows`,
+			"1.2.3.4:8443",
+			[]string{},
+			&db.RaftNode{ID: 1},
+		},
+		{
+			`core.https_address set and matching the one and only raft_nodes row`,
+			"1.2.3.4:8443",
+			[]string{"1.2.3.4:8443"},
+			&db.RaftNode{ID: 1, Address: "1.2.3.4:8443"},
+		},
+		{
+			`core.https_address set and matching one of many raft_nodes rows`,
+			"5.6.7.8:999",
+			[]string{"1.2.3.4:666", "5.6.7.8:999"},
+			&db.RaftNode{ID: 2, Address: "5.6.7.8:999"},
+		},
+		{
+			`core.https_address set and no matching raft_nodes row`,
+			"1.2.3.4:666",
+			[]string{"5.6.7.8:999"},
+			nil,
+		},
+	}
+
+	for _, c := range cases {
+		subtest.Run(t, c.title, func(t *testing.T) {
+			tx, cleanup := db.NewTestNodeTx(t)
+			defer cleanup()
+
+			err := tx.UpdateConfig(map[string]string{"core.https_address": c.address})
+			require.NoError(t, err)
+
+			for _, address := range c.addresses {
+				_, err := tx.RaftNodeAdd(address)
+				require.NoError(t, err)
+			}
+
+			node, err := node.DetermineRaftNode(tx)
+			require.NoError(t, err)
+			if c.node == nil {
+				assert.Nil(t, node)
+			} else {
+				assert.Equal(t, c.node.ID, node.ID)
+				assert.Equal(t, c.node.Address, node.Address)
+			}
+		})
+	}
+}

From 20e8eba2ade2a7ae1a6d29bfaa9f597ed218f795 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Sat, 19 Aug 2017 20:58:27 +0000
Subject: [PATCH 015/116] Add sqlite submodule

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 .gitmodules                    |  4 ++++
 Makefile                       | 13 +++++++++++--
 lxd/.dir-locals.el             | 17 ++++++++++++++++-
 lxd/.go-rename-wrapper         |  7 +++++++
 lxd/.go-wrapper                |  7 +++++++
 lxd/sqlite                     |  1 +
 test/suites/static_analysis.sh |  9 ++++++++-
 7 files changed, 54 insertions(+), 4 deletions(-)
 create mode 100644 .gitmodules
 create mode 100755 lxd/.go-rename-wrapper
 create mode 100755 lxd/.go-wrapper
 create mode 160000 lxd/sqlite

diff --git a/.gitmodules b/.gitmodules
new file mode 100644
index 000000000..06ca26ad9
--- /dev/null
+++ b/.gitmodules
@@ -0,0 +1,4 @@
+[submodule "lxd/sqlite"]
+	path = lxd/sqlite
+	url = https://github.com/CanonicalLtd/sqlite.git
+	ignore = dirty
\ No newline at end of file
diff --git a/Makefile b/Makefile
index 891088074..3ad71816f 100644
--- a/Makefile
+++ b/Makefile
@@ -3,6 +3,7 @@ POFILES=$(wildcard po/*.po)
 MOFILES=$(patsubst %.po,%.mo,$(POFILES))
 LINGUAS=$(basename $(POFILES))
 POTFILE=po/$(DOMAIN).pot
+GO_SERVER=./lxd/.go-wrapper
 
 # dist is primarily for use when packaging; for development we still manage
 # dependencies via `go get` explicitly.
@@ -13,8 +14,8 @@ TAGS=$(shell printf "\#include <sqlite3.h>\nvoid main(){}" | $(CC) -o /dev/null
 
 .PHONY: default
 default:
-	go get -t -v -d ./...
-	go install -v $(TAGS) $(DEBUG) ./...
+	$(GO_SERVER) get -t -v -d ./...
+	$(GO_SERVER) install -v $(TAGS) $(DEBUG) ./...
 	@echo "LXD built successfully"
 
 .PHONY: client
@@ -105,6 +106,14 @@ update-pot:
 
 build-mo: $(MOFILES)
 
+.PHONY: build-sqlite
+build-sqlite:
+	cd lxd/sqlite && \
+	    git log -1 --format=format:%ci%n | sed -e 's/ [-+].*//;s/ /T/;s/^/D /' > manifest && \
+	    echo $(shell git log -1 --format=format:%H) > manifest.uuid && \
+	    ./configure && \
+	    make
+
 static-analysis:
 	(cd test;  /bin/sh -x -c ". suites/static_analysis.sh; test_static_analysis")
 
diff --git a/lxd/.dir-locals.el b/lxd/.dir-locals.el
index 9bebcc48c..315bd893b 100644
--- a/lxd/.dir-locals.el
+++ b/lxd/.dir-locals.el
@@ -1,3 +1,18 @@
 ;;; Directory Local Variables
 ;;; For more information see (info "(emacs) Directory Variables")
-((go-mode . ((go-test-args . "-tags libsqlite3"))))
+((go-mode
+  . ((go-test-args . "-tags libsqlite3 -timeout 10s")
+     (eval
+      . (set
+	 (make-local-variable 'flycheck-go-build-tags)
+	 '("libsqlite3")))
+     (eval
+      . (let* ((locals-path
+     		(let ((d (dir-locals-find-file ".")))
+     		  (if (stringp d) (file-name-directory d) (car d))))
+	       (go-wrapper (s-concat locals-path ".go-wrapper"))
+	       (go-rename-wrapper (s-concat locals-path ".go-rename-wrapper")))
+     	  (progn
+	    (set (make-local-variable 'go-command) go-wrapper)
+	    (set (make-local-variable 'flycheck-go-build-executable) go-wrapper)
+	    (set (make-local-variable 'go-rename-command) go-rename-wrapper)))))))
diff --git a/lxd/.go-rename-wrapper b/lxd/.go-rename-wrapper
new file mode 100755
index 000000000..1ad3ceefa
--- /dev/null
+++ b/lxd/.go-rename-wrapper
@@ -0,0 +1,7 @@
+#!/bin/sh
+
+export CGO_CFLAGS="-I$(realpath $(dirname $0))/sqlite/"
+export CGO_LDFLAGS="-L$(realpath $(dirname $0))/sqlite/.libs"
+export LD_LIBRARY_PATH="$(realpath $(dirname $0))/sqlite/.libs"
+
+gorename $@
diff --git a/lxd/.go-wrapper b/lxd/.go-wrapper
new file mode 100755
index 000000000..9fd28b735
--- /dev/null
+++ b/lxd/.go-wrapper
@@ -0,0 +1,7 @@
+#!/bin/sh
+
+export CGO_CFLAGS="-I$(realpath $(dirname $0))/sqlite/"
+export CGO_LDFLAGS="-L$(realpath $(dirname $0))/sqlite/.libs"
+export LD_LIBRARY_PATH="$(realpath $(dirname $0))/sqlite/.libs"
+
+go $@
diff --git a/lxd/sqlite b/lxd/sqlite
new file mode 160000
index 000000000..235392610
--- /dev/null
+++ b/lxd/sqlite
@@ -0,0 +1 @@
+Subproject commit 235392610287d85dda11a6eee4d6e34d7cc6ef3f
diff --git a/test/suites/static_analysis.sh b/test/suites/static_analysis.sh
index b0d8672ce..d834d1495 100644
--- a/test/suites/static_analysis.sh
+++ b/test/suites/static_analysis.sh
@@ -22,8 +22,15 @@ test_static_analysis() {
     fi
 
     # Go static analysis
+    CGO_CFLAGS="-I$(pwd)/lxd/sqlite/"
+    CGO_LDFLAGS="-L$(pwd)/lxd/sqlite/.libs"
+    LD_LIBRARY_PATH="$(pwd)/lxd/sqlite/.libs"
+    export CGO_CFLAGS
+    export CGO_LDFLAGS
+    export LD_LIBRARY_PATH
+
     ## Functions starting by empty line
-    OUT=$(grep -r "^$" -B1 . | grep "func " | grep -v "}$" || true)
+    OUT=$(grep -r "^$" -B1 . | grep "func " | grep -v "}$" | grep -v "./lxd/sqlite/" || true)
     if [ -n "${OUT}" ]; then
       echo "ERROR: Functions must not start with an empty line: ${OUT}"
       false

From 7a05842dbd066ca330b0da23152c2ad0b241728b Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Thu, 12 Oct 2017 17:47:30 +0000
Subject: [PATCH 016/116] Add cluster.newRaft APIs to bring up a LXD-specific
 raft instance

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/api_internal.go             |   2 +-
 lxd/cluster/gateway.go          |   1 +
 lxd/cluster/raft.go             | 420 ++++++++++++++++++++++++++++++++++++++++
 lxd/cluster/raft_export_test.go |  19 ++
 lxd/cluster/raft_test.go        | 143 ++++++++++++++
 lxd/cluster/tls.go              |  35 ++++
 lxd/daemon.go                   |  75 ++++---
 lxd/endpoints/network.go        |  17 +-
 lxd/main_test.go                |   9 +-
 lxd/sys/fs.go                   |   1 +
 lxd/util/net.go                 |  24 +++
 11 files changed, 696 insertions(+), 50 deletions(-)
 create mode 100644 lxd/cluster/raft.go
 create mode 100644 lxd/cluster/raft_export_test.go
 create mode 100644 lxd/cluster/raft_test.go
 create mode 100644 lxd/cluster/tls.go

diff --git a/lxd/api_internal.go b/lxd/api_internal.go
index 4a253db7f..b5a568d8d 100644
--- a/lxd/api_internal.go
+++ b/lxd/api_internal.go
@@ -41,7 +41,7 @@ func internalWaitReady(d *Daemon, r *http.Request) Response {
 }
 
 func internalShutdown(d *Daemon, r *http.Request) Response {
-	d.shutdownChan <- true
+	d.shutdownChan <- struct{}{}
 
 	return EmptySyncResponse
 }
diff --git a/lxd/cluster/gateway.go b/lxd/cluster/gateway.go
index 41aee225b..10a560aca 100644
--- a/lxd/cluster/gateway.go
+++ b/lxd/cluster/gateway.go
@@ -93,6 +93,7 @@ func (g *Gateway) init() error {
 func grpcMemoryDial(dial func() net.Conn) func() (*grpc.ClientConn, error) {
 	options := []grpc.DialOption{
 		grpc.WithInsecure(),
+		grpc.WithBlock(),
 		grpc.WithDialer(func(string, time.Duration) (net.Conn, error) {
 			return dial(), nil
 		}),
diff --git a/lxd/cluster/raft.go b/lxd/cluster/raft.go
new file mode 100644
index 000000000..0b24ff8b9
--- /dev/null
+++ b/lxd/cluster/raft.go
@@ -0,0 +1,420 @@
+package cluster
+
+import (
+	"bytes"
+	"crypto/x509"
+	"fmt"
+	"log"
+	"math"
+	"net"
+	"net/http"
+	"os"
+	"path/filepath"
+	"strconv"
+	"strings"
+	"time"
+
+	"github.com/CanonicalLtd/dqlite"
+	"github.com/CanonicalLtd/raft-http"
+	"github.com/CanonicalLtd/raft-membership"
+	"github.com/boltdb/bolt"
+	"github.com/hashicorp/raft"
+	"github.com/hashicorp/raft-boltdb"
+	"github.com/lxc/lxd/lxd/db"
+	"github.com/lxc/lxd/lxd/node"
+	"github.com/lxc/lxd/lxd/util"
+	"github.com/lxc/lxd/shared"
+	"github.com/lxc/lxd/shared/logger"
+	"github.com/pkg/errors"
+	log15 "gopkg.in/inconshreveable/log15.v2"
+)
+
+// Create a raft instance and all its dependencies, to be used as backend for
+// the dqlite driver running on this LXD node.
+//
+// If this node should not serve as dqlite node, nil is returned.
+//
+// The raft instance will use an in-memory transport if clustering is not
+// enabled on this node.
+//
+// The certInfo parameter should contain the cluster TLS keypair and optional
+// CA certificate.
+//
+// The latency parameter is a coarse grain measure of how fast/reliable network
+// links are. This is used to tweak the various timeouts parameters of the raft
+// algorithm. See the raft.Config structure for more details. A value of 1.0
+// means use the default values from hashicorp's raft package. Values closer to
+// 0 reduce the values of the various timeouts (useful when running unit tests
+// in-memory).
+func newRaft(database *db.Node, cert *shared.CertInfo, latency float64) (*raftInstance, error) {
+	if latency <= 0 {
+		return nil, fmt.Errorf("latency should be positive")
+	}
+
+	// Figure out if we actually need to act as dqlite node.
+	var info *db.RaftNode
+	err := database.Transaction(func(tx *db.NodeTx) error {
+		var err error
+		info, err = node.DetermineRaftNode(tx)
+		return err
+	})
+	if err != nil {
+		return nil, err
+	}
+
+	// If we're not part of the dqlite cluster, there's nothing to do.
+	if info == nil {
+		return nil, nil
+	}
+	logger.Info("Start database node", log15.Ctx{"id": info.ID, "address": info.Address})
+
+	// Initialize a raft instance along with all needed dependencies.
+	instance, err := raftInstanceInit(database, info, cert, latency)
+	if err != nil {
+		return nil, err
+	}
+
+	return instance, nil
+}
+
+// A LXD-specific wrapper around raft.Raft, which also holds a reference to its
+// network transport and dqlite FSM.
+type raftInstance struct {
+	layer             *rafthttp.Layer       // HTTP-based raft transport layer
+	handler           http.HandlerFunc      // Handles join/leave/connect requests
+	membershipChanger func(*raft.Raft)      // Forwards to raft membership requests from handler
+	logs              *raftboltdb.BoltStore // Raft logs store, needs to be closed upon shutdown
+	fsm               raft.FSM              // The dqlite FSM linked to the raft instance
+	raft              *raft.Raft            // The actual raft instance
+}
+
+// Create a new raftFactory, instantiating all needed raft dependencies.
+func raftInstanceInit(
+	db *db.Node, node *db.RaftNode, cert *shared.CertInfo, latency float64) (*raftInstance, error) {
+	// FIXME: should be a parameter
+	timeout := 5 * time.Second
+
+	logger := raftLogger()
+
+	// Raft config.
+	config := raftConfig(latency)
+	config.Logger = logger
+	config.LocalID = raft.ServerID(strconv.Itoa(int(node.ID)))
+
+	// Raft transport
+	var handler *rafthttp.Handler
+	var membershipChanger func(*raft.Raft)
+	var layer *rafthttp.Layer
+	var transport raft.Transport
+	addr := node.Address
+	if addr == "" {
+		// This should normally be used only for testing as it can
+		// cause split-brian, but since we are not exposing raft to the
+		// network at all it's safe to do so. When this node gets
+		// exposed to the network and assigned an address, we need to
+		// restart raft anyways.
+		config.StartAsLeader = true
+		transport = raftMemoryTransport()
+	} else {
+		dial, err := raftDial(cert)
+		if err != nil {
+			return nil, err
+		}
+
+		transport, handler, layer, err = raftNetworkTransport(db, addr, logger, timeout, dial)
+		if err != nil {
+			return nil, err
+		}
+		membershipChanger = func(raft *raft.Raft) {
+			raftmembership.HandleChangeRequests(raft, handler.Requests())
+		}
+	}
+
+	err := raft.ValidateConfig(config)
+	if err != nil {
+		return nil, errors.Wrap(err, "invalid raft configuration")
+	}
+
+	// Data directory
+	dir := filepath.Join(db.Dir(), "raft")
+	if !shared.PathExists(dir) {
+		err := os.Mkdir(dir, 0750)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	// Raft logs store
+	logs, err := raftboltdb.New(raftboltdb.Options{
+		Path:        filepath.Join(dir, "logs.db"),
+		BoltOptions: &bolt.Options{Timeout: timeout},
+	})
+	if err != nil {
+		return nil, errors.Wrap(err, "failed to create bolt store for raft logs")
+	}
+
+	// Raft snapshot store
+	snaps, err := raft.NewFileSnapshotStoreWithLogger(dir, 2, logger)
+	if err != nil {
+		return nil, errors.Wrap(err, "failed to create file snapshot store")
+	}
+
+	// If we are the initial node, we use the last index persisted in the
+	// logs store and other checks to determine if we have ever
+	// bootstrapped the cluster, and if not we do so (see raft.HasExistingState).
+	if node.ID == 1 {
+		err := raftMaybeBootstrap(config, logs, snaps, transport)
+		if err != nil {
+			return nil, errors.Wrap(err, "failed to boostrap cluster")
+		}
+	}
+
+	// The dqlite FSM.
+	fsm := dqlite.NewFSM(dir)
+
+	// The actual raft instance.
+	raft, err := raft.NewRaft(config, fsm, logs, logs, snaps, transport)
+	if err != nil {
+		logs.Close()
+		return nil, errors.Wrap(err, "failed to start raft")
+	}
+
+	if membershipChanger != nil {
+		// Process Raft connections over HTTP. This goroutine will
+		// terminate when instance.handler.Close() is called, which
+		// happens indirectly when the raft instance is shutdown in
+		// instance.Shutdown(), and the associated transport is closed.
+		go membershipChanger(raft)
+	}
+
+	instance := &raftInstance{
+		layer:             layer,
+		handler:           raftHandler(cert, handler),
+		membershipChanger: membershipChanger,
+		logs:              logs,
+		fsm:               fsm,
+		raft:              raft,
+	}
+
+	return instance, nil
+}
+
+// FSM returns the dqlite FSM associated with the raft instance.
+func (i *raftInstance) FSM() raft.FSM {
+	return i.fsm
+}
+
+// Raft returns the actual underlying raft instance.
+func (i *raftInstance) Raft() *raft.Raft {
+	return i.raft
+}
+
+// HandlerFunc can be used to handle HTTP requests performed against the LXD
+// API RaftEndpoint ("/internal/raft"), in order to join/leave/form the raft
+// cluster.
+//
+// If it returns nil, it means that this node is not supposed to expose a raft
+// endpoint over the network, because it's running as a non-clustered single
+// node.
+func (i *raftInstance) HandlerFunc() http.HandlerFunc {
+	if i.handler == nil {
+		return nil
+	}
+	return i.handler.ServeHTTP
+}
+
+// MembershipChanger returns the underlying rafthttp.Layer, which can be used
+// to change the membership of this node in the cluster.
+func (i *raftInstance) MembershipChanger() raftmembership.Changer {
+	return i.layer
+}
+
+// Shutdown raft and any raft-related resource we have instantiated.
+func (i *raftInstance) Shutdown() error {
+	logger.Info("Stop database node")
+	err := i.raft.Shutdown().Error()
+	if err != nil {
+		return errors.Wrap(err, "failed to shutdown raft")
+	}
+	err = i.logs.Close()
+	if err != nil {
+		return errors.Wrap(err, "failed to close boltdb logs store")
+	}
+	return nil
+}
+
+// Create an in-memory raft transport.
+func raftMemoryTransport() raft.Transport {
+	_, transport := raft.NewInmemTransport("0")
+	return transport
+}
+
+// Create a rafthttp.Dial function that connects over TLS using the given
+// cluster (and optionally CA) certificate both as client and remote
+// certificate.
+func raftDial(cert *shared.CertInfo) (rafthttp.Dial, error) {
+	config, err := tlsClientConfig(cert)
+	if err != nil {
+		return nil, err
+	}
+	dial := rafthttp.NewDialTLS(config)
+	return dial, nil
+}
+
+// Create a network raft transport that will handle connections using a
+// rafthttp.Handler.
+func raftNetworkTransport(
+	db *db.Node,
+	address string,
+	logger *log.Logger,
+	timeout time.Duration,
+	dial rafthttp.Dial) (raft.Transport, *rafthttp.Handler, *rafthttp.Layer, error) {
+	handler := rafthttp.NewHandler()
+	addr, err := net.ResolveTCPAddr("tcp", address)
+	if err != nil {
+		return nil, nil, nil, errors.Wrap(err, "invalid node address")
+	}
+
+	layer := rafthttp.NewLayer(raftEndpoint, addr, handler, dial)
+	config := &raft.NetworkTransportConfig{
+		Logger:                logger,
+		Stream:                layer,
+		MaxPool:               2,
+		Timeout:               timeout,
+		ServerAddressProvider: &raftAddressProvider{db: db},
+	}
+	transport := raft.NewNetworkTransportWithConfig(config)
+
+	return transport, handler, layer, nil
+}
+
+// The LXD API endpoint path that gets routed to a rafthttp.Handler for
+// joining/leaving the cluster and exchanging raft commands between nodes.
+const raftEndpoint = "/internal/raft"
+
+// An address provider that looks up server addresses in the raft_nodes table.
+type raftAddressProvider struct {
+	db *db.Node
+}
+
+func (p *raftAddressProvider) ServerAddr(id raft.ServerID) (raft.ServerAddress, error) {
+	databaseID, err := strconv.Atoi(string(id))
+	if err != nil {
+		return "", errors.Wrap(err, "non-numeric server ID")
+	}
+	var address string
+	err = p.db.Transaction(func(tx *db.NodeTx) error {
+		var err error
+		address, err = tx.RaftNodeAddress(int64(databaseID))
+		return err
+	})
+	if err != nil {
+		return "", err
+	}
+	return raft.ServerAddress(address), nil
+}
+
+// Create a base raft configuration tweaked for a network with the given latency measure.
+func raftConfig(latency float64) *raft.Config {
+	config := raft.DefaultConfig()
+	scale := func(duration *time.Duration) {
+		*duration = time.Duration((math.Ceil(float64(*duration) * latency)))
+	}
+	durations := []*time.Duration{
+		&config.HeartbeatTimeout,
+		&config.ElectionTimeout,
+		&config.CommitTimeout,
+		&config.LeaderLeaseTimeout,
+	}
+	for _, duration := range durations {
+		scale(duration)
+	}
+	return config
+}
+
+// Helper to bootstrap the raft cluster if needed.
+func raftMaybeBootstrap(
+	conf *raft.Config,
+	logs *raftboltdb.BoltStore,
+	snaps raft.SnapshotStore,
+	trans raft.Transport) error {
+	// First check if we were already bootstrapped.
+	hasExistingState, err := raft.HasExistingState(logs, logs, snaps)
+	if err != nil {
+		return errors.Wrap(err, "failed to check if raft has existing state")
+	}
+	if hasExistingState {
+		return nil
+	}
+	server := raft.Server{
+		ID:      conf.LocalID,
+		Address: trans.LocalAddr(),
+	}
+	configuration := raft.Configuration{
+		Servers: []raft.Server{server},
+	}
+	return raft.BootstrapCluster(conf, logs, logs, snaps, trans, configuration)
+}
+
+func raftHandler(info *shared.CertInfo, handler *rafthttp.Handler) http.HandlerFunc {
+	if handler == nil {
+		return nil
+	}
+	cert, err := x509.ParseCertificate(info.KeyPair().Certificate[0])
+	if err != nil {
+		// Since we have already loaded this certificate, typically
+		// using LoadX509KeyPair, an error should never happen, but
+		// check for good measure.
+		panic(fmt.Sprintf("invalid keypair material: %v", err))
+	}
+	trustedCerts := []x509.Certificate{*cert}
+	return func(w http.ResponseWriter, r *http.Request) {
+		if r.TLS == nil || !util.CheckTrustState(*r.TLS.PeerCertificates[0], trustedCerts) {
+			http.Error(w, "403 invalid client certificate", http.StatusForbidden)
+		}
+		handler.ServeHTTP(w, r)
+	}
+}
+
+func raftLogger() *log.Logger {
+	return log.New(&raftLogWriter{}, "", 0)
+}
+
+// Implement io.Writer on top of LXD's logging system.
+type raftLogWriter struct {
+}
+
+func (o *raftLogWriter) Write(line []byte) (n int, err error) {
+	// Parse the log level according to hashicorp's raft pkg convetions.
+	level := ""
+	msg := ""
+	x := bytes.IndexByte(line, '[')
+	if x >= 0 {
+		y := bytes.IndexByte(line[x:], ']')
+		if y >= 0 {
+			level = string(line[x+1 : x+y])
+
+			// Capitalize the string, to match LXD logging conventions
+			first := strings.ToUpper(string(line[x+y+2]))
+			rest := string(line[x+y+3 : len(line)-1])
+			msg = first + rest
+		}
+	}
+
+	if level == "" {
+		// Ignore log entries that don't stick to the convetion.
+		return len(line), nil
+	}
+
+	switch level {
+	case "DEBUG":
+		logger.Debug(msg)
+	case "INFO":
+		logger.Info(msg)
+	case "WARN":
+		logger.Warn(msg)
+	default:
+		// Ignore any other log level.
+	}
+	return len(line), nil
+}
diff --git a/lxd/cluster/raft_export_test.go b/lxd/cluster/raft_export_test.go
new file mode 100644
index 000000000..e4b7c6dc6
--- /dev/null
+++ b/lxd/cluster/raft_export_test.go
@@ -0,0 +1,19 @@
+package cluster
+
+import (
+	"github.com/lxc/lxd/lxd/db"
+	"github.com/lxc/lxd/shared"
+)
+
+// Export raft-related APIs for black box unit testing.
+func NewRaft(db *db.Node, cert *shared.CertInfo, latency float64) (*RaftInstance, error) {
+	instance, err := newRaft(db, cert, latency)
+	if err != nil {
+		return nil, err
+	}
+	return &RaftInstance{*instance}, nil
+}
+
+type RaftInstance struct {
+	raftInstance
+}
diff --git a/lxd/cluster/raft_test.go b/lxd/cluster/raft_test.go
new file mode 100644
index 000000000..9e6cfb983
--- /dev/null
+++ b/lxd/cluster/raft_test.go
@@ -0,0 +1,143 @@
+package cluster_test
+
+import (
+	"net/http"
+	"net/http/httptest"
+	"strconv"
+	"testing"
+	"time"
+
+	"github.com/CanonicalLtd/raft-test"
+	"github.com/hashicorp/raft"
+	"github.com/lxc/lxd/lxd/cluster"
+	"github.com/lxc/lxd/lxd/db"
+	"github.com/lxc/lxd/lxd/util"
+	"github.com/lxc/lxd/shared"
+	"github.com/lxc/lxd/shared/logging"
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
+)
+
+// By default a node starts in single mode.
+func TestRaftFactory_Single(t *testing.T) {
+	db, cleanup := db.NewTestNode(t)
+	defer cleanup()
+
+	cert := shared.TestingKeyPair()
+
+	instance := newRaft(t, db, cert)
+	defer instance.Shutdown()
+
+	rafttest.WaitLeader(t, instance.Raft(), time.Second)
+	assert.Equal(t, raft.Leader, instance.Raft().State())
+}
+
+// If there's a network address configured, but we are the only raft node in
+// the factory starts raft in single mode.
+func TestRaftFactory_SingleWithNetworkAddress(t *testing.T) {
+	db, cleanup := db.NewTestNode(t)
+	defer cleanup()
+
+	cert := shared.TestingKeyPair()
+
+	setRaftRole(t, db, "1.2.3.4:666")
+
+	instance := newRaft(t, db, cert)
+	defer instance.Shutdown()
+
+	rafttest.WaitLeader(t, instance.Raft(), time.Second)
+	assert.Equal(t, raft.Leader, instance.Raft().State())
+}
+
+// When the factory is started the first time on a non-clustered node, it will
+// use the memory transport and the raft node will not have a real network
+// address. The in-memory address gets saved in the first log committed in the
+// store as the address of the server with ID "1". If the LXD instance is then
+// reconfigured to enable clustering, we now use a real network transport and
+// setup a ServerAddressProvider that will override the initial in-memory
+// address of node "1" with its real network address, as configured in the
+// raft_nodes table.
+func TestRaftFactory_TransitionToClusteredMode(t *testing.T) {
+	db, cleanup := db.NewTestNode(t)
+	defer cleanup()
+
+	cert := shared.TestingKeyPair()
+
+	instance := newRaft(t, db, cert)
+	instance.Shutdown()
+
+	setRaftRole(t, db, "1.2.3.4:666")
+
+	instance = newRaft(t, db, cert)
+	defer instance.Shutdown()
+
+	rafttest.WaitLeader(t, instance.Raft(), time.Second)
+	assert.Equal(t, raft.Leader, instance.Raft().State())
+}
+
+// If there is more than one node, the raft object is created with
+// cluster-compatible parameters..
+func TestRaftFactory_MultiNode(t *testing.T) {
+	cert := shared.TestingKeyPair()
+
+	leader := ""
+	for i := 0; i < 2; i++ {
+		db, cleanup := db.NewTestNode(t)
+		defer cleanup()
+
+		mux := http.NewServeMux()
+		server := newServer(cert, mux)
+		defer server.Close()
+
+		address := server.Listener.Addr().String()
+		setRaftRole(t, db, address)
+
+		instance := newRaft(t, db, cert)
+		defer instance.Shutdown()
+		if i == 0 {
+			leader = address
+			rafttest.WaitLeader(t, instance.Raft(), time.Second)
+		}
+
+		mux.HandleFunc("/internal/raft", instance.HandlerFunc())
+
+		if i > 0 {
+			id := raft.ServerID(strconv.Itoa(i + 1))
+			target := raft.ServerAddress(leader)
+			err := instance.MembershipChanger().Join(id, target, 5*time.Second)
+			require.NoError(t, err)
+		}
+	}
+}
+
+// Create a new test RaftInstance.
+func newRaft(t *testing.T, db *db.Node, cert *shared.CertInfo) *cluster.RaftInstance {
+	logging.Testing(t)
+	instance, err := cluster.NewRaft(db, cert, 0.2)
+	require.NoError(t, err)
+	return instance
+}
+
+// Set the core.https_address config key to the given address, and insert the
+// address into the raft_nodes table.
+//
+// This effectively makes the node act as a database raft node.
+func setRaftRole(t *testing.T, database *db.Node, address string) {
+	require.NoError(t, database.Transaction(func(tx *db.NodeTx) error {
+		err := tx.UpdateConfig(map[string]string{"core.https_address": address})
+		if err != nil {
+			return err
+		}
+		_, err = tx.RaftNodeAdd(address)
+		return err
+	}))
+}
+
+// Create a new test HTTP server configured with the given TLS certificate and
+// using the given handler.
+func newServer(cert *shared.CertInfo, handler http.Handler) *httptest.Server {
+	server := httptest.NewUnstartedServer(handler)
+	server.TLS = util.ServerTLSConfig(cert)
+	server.StartTLS()
+	return server
+}
diff --git a/lxd/cluster/tls.go b/lxd/cluster/tls.go
new file mode 100644
index 000000000..aa9b75731
--- /dev/null
+++ b/lxd/cluster/tls.go
@@ -0,0 +1,35 @@
+package cluster
+
+import (
+	"crypto/tls"
+	"crypto/x509"
+
+	"github.com/lxc/lxd/shared"
+)
+
+// Return a TLS configuration suitable for establishing inter-node network
+// connections using the cluster certificate.
+func tlsClientConfig(info *shared.CertInfo) (*tls.Config, error) {
+	keypair := info.KeyPair()
+	ca := info.CA()
+	config := shared.InitTLSConfig()
+	config.Certificates = []tls.Certificate{keypair}
+	config.RootCAs = x509.NewCertPool()
+	if ca != nil {
+		config.RootCAs.AddCert(ca)
+	}
+	// Since the same cluster keypair is used both as server and as client
+	// cert, let's add it to the CA pool to make it trusted.
+	cert, err := x509.ParseCertificate(keypair.Certificate[0])
+	if err != nil {
+		return nil, err
+	}
+	cert.IsCA = true
+	cert.KeyUsage = x509.KeyUsageCertSign
+	config.RootCAs.AddCert(cert)
+
+	if cert.DNSNames != nil {
+		config.ServerName = cert.DNSNames[0]
+	}
+	return config, nil
+}
diff --git a/lxd/daemon.go b/lxd/daemon.go
index 170192abd..a122a62d0 100644
--- a/lxd/daemon.go
+++ b/lxd/daemon.go
@@ -46,8 +46,9 @@ type Daemon struct {
 	os           *sys.OS
 	db           *db.Node
 	cluster      *db.Cluster
-	readyChan    chan bool
-	shutdownChan chan bool
+	setupChan    chan struct{} // Closed when basic Daemon setup is completed
+	readyChan    chan struct{} // Closed when LXD is fully ready
+	shutdownChan chan struct{}
 
 	// Tasks registry for long-running background tasks.
 	tasks task.Group
@@ -80,8 +81,11 @@ type DaemonConfig struct {
 // NewDaemon returns a new Daemon object with the given configuration.
 func NewDaemon(config *DaemonConfig, os *sys.OS) *Daemon {
 	return &Daemon{
-		config: config,
-		os:     os,
+		config:       config,
+		os:           os,
+		setupChan:    make(chan struct{}),
+		readyChan:    make(chan struct{}),
+		shutdownChan: make(chan struct{}),
 	}
 }
 
@@ -204,6 +208,10 @@ func (d *Daemon) createCmd(restAPI *mux.Router, version string, c Command) {
 	restAPI.HandleFunc(uri, func(w http.ResponseWriter, r *http.Request) {
 		w.Header().Set("Content-Type", "application/json")
 
+		// Block public API requests until we're done with basic
+		// initialization tasks, such setting up the cluster database.
+		<-d.setupChan
+
 		untrustedOk := (r.Method == "GET" && c.untrustedGet) || (r.Method == "POST" && c.untrustedPost)
 		err := d.checkTrustedClient(r)
 		if err == nil {
@@ -337,10 +345,6 @@ func (d *Daemon) Init() error {
 }
 
 func (d *Daemon) init() error {
-	/* Initialize some variables */
-	d.readyChan = make(chan bool)
-	d.shutdownChan = make(chan bool)
-
 	/* Set the LVM environment */
 	err := os.Setenv("LVM_SUPPRESS_FD_WARNINGS", "1")
 	if err != nil {
@@ -407,6 +411,20 @@ func (d *Daemon) init() error {
 		return errors.Wrap(err, "failed to open cluster database")
 	}
 
+	/* Setup the web server */
+	config := &endpoints.Config{
+		Dir:                  d.os.VarDir,
+		Cert:                 certInfo,
+		RestServer:           RestServer(d),
+		DevLxdServer:         DevLxdServer(d),
+		LocalUnixSocketGroup: d.config.Group,
+		NetworkAddress:       address,
+	}
+	d.endpoints, err = endpoints.Up(config)
+	if err != nil {
+		return err
+	}
+
 	/* Read the storage pools */
 	err = SetupStorageDriver(d.State(), false)
 	if err != nil {
@@ -452,19 +470,7 @@ func (d *Daemon) init() error {
 		return err
 	}
 
-	/* Setup the web server */
-	config := &endpoints.Config{
-		Dir:                  d.os.VarDir,
-		Cert:                 certInfo,
-		RestServer:           RestServer(d),
-		DevLxdServer:         DevLxdServer(d),
-		LocalUnixSocketGroup: d.config.Group,
-		NetworkAddress:       address,
-	}
-	d.endpoints, err = endpoints.Up(config)
-	if err != nil {
-		return fmt.Errorf("cannot start API endpoints: %v", err)
-	}
+	close(d.setupChan)
 
 	// Run the post initialization actions
 	err = d.Ready()
@@ -544,17 +550,10 @@ func (d *Daemon) Stop() error {
 
 	trackError(d.tasks.Stop(time.Second)) // Give tasks at most a second to cleanup.
 
+	shouldUnmount := false
 	if d.db != nil {
 		if n, err := d.numRunningContainers(); err != nil || n == 0 {
-			logger.Infof("Unmounting temporary filesystems")
-
-			syscall.Unmount(shared.VarPath("devlxd"), syscall.MNT_DETACH)
-			syscall.Unmount(shared.VarPath("shmounts"), syscall.MNT_DETACH)
-
-			logger.Infof("Done unmounting temporary filesystems")
-		} else {
-			logger.Debugf(
-				"Not unmounting temporary filesystems (containers are still running)")
+			shouldUnmount = true
 		}
 
 		logger.Infof("Closing the database")
@@ -570,6 +569,22 @@ func (d *Daemon) Stop() error {
 		trackError(d.endpoints.Down())
 	}
 
+	if d.endpoints != nil {
+		trackError(d.endpoints.Down())
+	}
+
+	if shouldUnmount {
+		logger.Infof("Unmounting temporary filesystems")
+
+		syscall.Unmount(shared.VarPath("devlxd"), syscall.MNT_DETACH)
+		syscall.Unmount(shared.VarPath("shmounts"), syscall.MNT_DETACH)
+
+		logger.Infof("Done unmounting temporary filesystems")
+	} else {
+		logger.Debugf(
+			"Not unmounting temporary filesystems (containers are still running)")
+	}
+
 	logger.Infof("Saving simplestreams cache")
 	trackError(imageSaveStreamCache(d.os))
 	logger.Infof("Saved simplestreams cache")
diff --git a/lxd/endpoints/network.go b/lxd/endpoints/network.go
index 01c169b0f..5da1bc573 100644
--- a/lxd/endpoints/network.go
+++ b/lxd/endpoints/network.go
@@ -2,7 +2,6 @@ package endpoints
 
 import (
 	"crypto/tls"
-	"crypto/x509"
 	"fmt"
 	"net"
 	"sync"
@@ -137,22 +136,10 @@ func (l *networkListener) Accept() (net.Conn, error) {
 
 // Config safely swaps the underlying TLS configuration.
 func (l *networkListener) Config(cert *shared.CertInfo) {
-	config := shared.InitTLSConfig()
-	config.ClientAuth = tls.RequestClientCert
-	config.Certificates = []tls.Certificate{cert.KeyPair()}
-
-	if cert.CA() != nil {
-		pool := x509.NewCertPool()
-		pool.AddCert(cert.CA())
-		config.RootCAs = pool
-		config.ClientCAs = pool
-
-		logger.Infof("LXD is in CA mode, only CA-signed certificates will be allowed")
-	}
-
-	config.BuildNameToCertificate()
+	config := util.ServerTLSConfig(cert)
 
 	l.mu.Lock()
 	defer l.mu.Unlock()
+
 	l.config = config
 }
diff --git a/lxd/main_test.go b/lxd/main_test.go
index 0f05ac700..5555e199e 100644
--- a/lxd/main_test.go
+++ b/lxd/main_test.go
@@ -62,8 +62,6 @@ func (suite *lxdTestSuite) SetupTest() {
 		suite.T().Fatalf("failed to start daemon: %v", err)
 	}
 
-	daemonConfigInit(suite.d.db.DB())
-
 	// Create default storage pool. Make sure that we don't pass a nil to
 	// the next function.
 	poolConfig := map[string]string{}
@@ -107,8 +105,11 @@ func (suite *lxdTestSuite) SetupTest() {
 }
 
 func (suite *lxdTestSuite) TearDownTest() {
-	suite.d.Stop()
-	err := os.RemoveAll(suite.tmpdir)
+	err := suite.d.Stop()
+	if err != nil {
+		suite.T().Fatalf("failed to stop daemon: %v", err)
+	}
+	err = os.RemoveAll(suite.tmpdir)
 	if err != nil {
 		suite.T().Fatalf("failed to remove temp dir: %v", err)
 	}
diff --git a/lxd/sys/fs.go b/lxd/sys/fs.go
index d3eff1edf..c8550fc3a 100644
--- a/lxd/sys/fs.go
+++ b/lxd/sys/fs.go
@@ -13,6 +13,7 @@ func (s *OS) initDirs() error {
 	}{
 		{s.VarDir, 0711},
 		{s.CacheDir, 0700},
+		{filepath.Join(s.VarDir, "raft"), 0700},
 		{filepath.Join(s.VarDir, "containers"), 0711},
 		{filepath.Join(s.VarDir, "devices"), 0711},
 		{filepath.Join(s.VarDir, "devlxd"), 0755},
diff --git a/lxd/util/net.go b/lxd/util/net.go
index 38d6acb8b..ae5913652 100644
--- a/lxd/util/net.go
+++ b/lxd/util/net.go
@@ -1,10 +1,13 @@
 package util
 
 import (
+	"crypto/tls"
+	"crypto/x509"
 	"fmt"
 	"net"
 
 	"github.com/lxc/lxd/shared"
+	"github.com/lxc/lxd/shared/logger"
 )
 
 // InMemoryNetwork creates a fully in-memory listener and dial function.
@@ -69,3 +72,24 @@ func CanonicalNetworkAddress(address string) string {
 	}
 	return address
 }
+
+// ServerTLSConfig returns a new server-side tls.Config generated from the give
+// certificate info.
+func ServerTLSConfig(cert *shared.CertInfo) *tls.Config {
+	config := shared.InitTLSConfig()
+	config.ClientAuth = tls.RequestClientCert
+	config.Certificates = []tls.Certificate{cert.KeyPair()}
+	config.NextProtos = []string{"h2"} // Required by gRPC
+
+	if cert.CA() != nil {
+		pool := x509.NewCertPool()
+		pool.AddCert(cert.CA())
+		config.RootCAs = pool
+		config.ClientCAs = pool
+
+		logger.Infof("LXD is in CA mode, only CA-signed certificates will be allowed")
+	}
+
+	config.BuildNameToCertificate()
+	return config
+}

From 2842457436540a0656c3c1cacfde46f4f7473ea5 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Sat, 14 Oct 2017 11:39:18 +0000
Subject: [PATCH 017/116] Add actual dqlite backend to cluster.Gateway

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/api.go                  |   4 +
 lxd/cluster/gateway.go      | 197 ++++++++++++++++++++++++++++++++++++++++++--
 lxd/cluster/gateway_test.go |  39 +++++++++
 lxd/cluster/raft.go         |   2 +-
 lxd/daemon.go               |   3 +-
 test/includes/lxd.sh        |  11 ++-
 6 files changed, 244 insertions(+), 12 deletions(-)

diff --git a/lxd/api.go b/lxd/api.go
index 94ba9c285..e038e76dc 100644
--- a/lxd/api.go
+++ b/lxd/api.go
@@ -21,6 +21,10 @@ func RestServer(d *Daemon) *http.Server {
 		SyncResponse(true, []string{"/1.0"}).Render(w)
 	})
 
+	for endpoint, f := range d.gateway.HandlerFuncs() {
+		mux.HandleFunc(endpoint, f)
+	}
+
 	for _, c := range api10 {
 		d.createCmd(mux, "1.0", c)
 	}
diff --git a/lxd/cluster/gateway.go b/lxd/cluster/gateway.go
index 10a560aca..1c8d65924 100644
--- a/lxd/cluster/gateway.go
+++ b/lxd/cluster/gateway.go
@@ -1,15 +1,22 @@
 package cluster
 
 import (
+	"fmt"
 	"net"
+	"net/http"
 	"time"
 
+	"github.com/CanonicalLtd/dqlite"
 	"github.com/CanonicalLtd/go-grpc-sql"
+	"github.com/hashicorp/raft"
 	"github.com/lxc/lxd/lxd/db"
 	"github.com/lxc/lxd/lxd/util"
 	"github.com/lxc/lxd/shared"
-	"github.com/mattn/go-sqlite3"
+	"github.com/lxc/lxd/shared/logger"
+	"github.com/pkg/errors"
+	"golang.org/x/net/context"
 	"google.golang.org/grpc"
+	"google.golang.org/grpc/credentials"
 )
 
 // NewGateway creates a new Gateway for managing access to the dqlite cluster.
@@ -45,6 +52,10 @@ type Gateway struct {
 	cert    *shared.CertInfo
 	latency float64
 
+	// The raft instance to use for creating the dqlite driver. It's nil if
+	// this LXD node is not supposed to be part of the raft cluster.
+	raft *raftInstance
+
 	// The gRPC server exposing the dqlite driver created by this
 	// gateway. It's nil if this LXD node is not supposed to be part of the
 	// raft cluster.
@@ -59,12 +70,77 @@ type Gateway struct {
 	memoryDial func() (*grpc.ClientConn, error)
 }
 
+// HandlerFuncs returns the HTTP handlers that should be added to the REST API
+// endpoint in order to handle database-related requests.
+//
+// There are two handlers, one for the /internal/raft endpoint and the other
+// for /internal/db, which handle respectively raft and gRPC-SQL requests.
+//
+// These handlers might return 404, either because this LXD node is a
+// non-clustered node not available over the network or because it is not a
+// database node part of the dqlite cluster.
+func (g *Gateway) HandlerFuncs() map[string]http.HandlerFunc {
+	grpc := func(w http.ResponseWriter, r *http.Request) {
+		if g.server == nil || g.memoryDial != nil {
+			http.NotFound(w, r)
+			return
+		}
+
+		// Before actually establishing the gRPC SQL connection, our
+		// dialer probes the node to see if it's currently the leader
+		// (otherwise it tries with another node or retry later).
+		if r.Method == "HEAD" {
+			if g.raft.Raft().State() != raft.Leader {
+				http.Error(w, "503 not leader", http.StatusServiceUnavailable)
+				return
+			}
+			return
+		}
+
+		g.server.ServeHTTP(w, r)
+	}
+	raft := func(w http.ResponseWriter, r *http.Request) {
+		if g.raft == nil || g.raft.HandlerFunc() == nil {
+			http.NotFound(w, r)
+			return
+		}
+		g.raft.HandlerFunc()(w, r)
+	}
+
+	return map[string]http.HandlerFunc{
+		grpcEndpoint: grpc,
+		raftEndpoint: raft,
+	}
+}
+
 // Dialer returns a gRPC dial function that can be used to connect to one of
 // the dqlite nodes via gRPC.
 func (g *Gateway) Dialer() grpcsql.Dialer {
 	return func() (*grpc.ClientConn, error) {
 		// Memory connection.
-		return g.memoryDial()
+		if g.memoryDial != nil {
+			return g.memoryDial()
+		}
+
+		// Network connection.
+		addresses, err := g.raftNodes()
+		if err != nil {
+			return nil, err
+		}
+
+		// FIXME: timeout should be configurable
+		remaining := 10 * time.Second
+		for remaining > 0 {
+			for _, address := range addresses {
+				var conn *grpc.ClientConn
+				conn, err = grpcNetworkDial(address, g.cert, time.Second)
+				if err == nil {
+					return conn, nil
+				}
+			}
+			time.Sleep(250 * time.Millisecond)
+		}
+		return nil, err
 	}
 }
 
@@ -76,19 +152,96 @@ func (g *Gateway) Shutdown() error {
 		// switching between in-memory and network mode.
 		g.memoryDial = nil
 	}
-	return nil
+	if g.raft == nil {
+		return nil
+	}
+	return g.raft.Shutdown()
 }
 
 // Initialize the gateway, creating a new raft factory and gRPC server (if this
 // node is a database node), and a gRPC dialer.
 func (g *Gateway) init() error {
-	g.server = grpcsql.NewServer(&sqlite3.SQLiteDriver{})
-	listener, dial := util.InMemoryNetwork()
-	go g.server.Serve(listener)
-	g.memoryDial = grpcMemoryDial(dial)
+	raft, err := newRaft(g.db, g.cert, g.latency)
+	if err != nil {
+		return errors.Wrap(err, "failed to create raft factory")
+	}
+
+	// If the resulting raft instance is not nil, it means that this node
+	// should serve as database node, so create a dqlite driver to be
+	// exposed it over gRPC.
+	if raft != nil {
+		driver, err := dqlite.NewDriver(raft.FSM(), raft.Raft(), dqlite.LogFunc(dqliteLog))
+		if err != nil {
+			return errors.Wrap(err, "failed to create dqlite driver")
+		}
+		server := grpcsql.NewServer(driver)
+		if raft.HandlerFunc() == nil {
+			// If no raft http handler is set, it means we are in
+			// single node mode and we don't have a network
+			// endpoint, so let's spin up a fully in-memory gRPC
+			// server.
+			listener, dial := util.InMemoryNetwork()
+			go server.Serve(listener)
+			g.memoryDial = grpcMemoryDial(dial)
+		}
+
+		g.server = server
+		g.raft = raft
+	}
 	return nil
 }
 
+// Wait for the raft node to become leader. Should only be used by Bootstrap,
+// since we know that we'll self elect.
+func (g *Gateway) waitLeadership() error {
+	for i := 0; i < 20; i++ {
+		if g.raft.raft.State() == raft.Leader {
+			return nil
+		}
+		time.Sleep(250 * time.Millisecond)
+	}
+	return fmt.Errorf("raft node did not self-elect within 5 seconds")
+}
+
+// Return the addresses of the current raft nodes.
+func (g *Gateway) raftNodes() ([]string, error) {
+	var addresses []string
+	err := g.db.Transaction(func(tx *db.NodeTx) error {
+		var err error
+		addresses, err = tx.RaftNodeAddresses()
+		return err
+	})
+	if err != nil {
+		return nil, errors.Wrap(err, "failed to fetch raft nodes")
+	}
+	return addresses, nil
+}
+
+func grpcNetworkDial(addr string, cert *shared.CertInfo, t time.Duration) (*grpc.ClientConn, error) {
+	config, err := tlsClientConfig(cert)
+	if err != nil {
+		return nil, err
+	}
+
+	// Make a probe HEAD request to check if the target node is the leader.
+	url := fmt.Sprintf("https://%s%s", addr, grpcEndpoint)
+	client := &http.Client{Transport: &http.Transport{TLSClientConfig: config}}
+	response, err := client.Head(url)
+	if err != nil {
+		return nil, err
+	}
+	if response.StatusCode != http.StatusOK {
+		return nil, fmt.Errorf(response.Status)
+	}
+
+	ctx, cancel := context.WithTimeout(context.Background(), t)
+	defer cancel()
+	options := []grpc.DialOption{
+		grpc.WithTransportCredentials(credentials.NewTLS(config)),
+	}
+	return grpc.DialContext(ctx, addr, options...)
+}
+
 // Convert a raw in-memory dial function into a gRPC one.
 func grpcMemoryDial(dial func() net.Conn) func() (*grpc.ClientConn, error) {
 	options := []grpc.DialOption{
@@ -102,3 +255,33 @@ func grpcMemoryDial(dial func() net.Conn) func() (*grpc.ClientConn, error) {
 		return grpc.Dial("", options...)
 	}
 }
+
+// The LXD API endpoint path that gets routed to a gRPC server handler for
+// performing SQL queries against the dqlite driver running on this node.
+//
+// FIXME: figure out if there's a way to configure the gRPC client to add a
+//        prefix to this url, e.g. /internal/db/protocol.SQL/Conn.
+const grpcEndpoint = "/protocol.SQL/Conn"
+
+// Redirect dqlite's logs to our own logger
+func dqliteLog(level, message string) {
+	if level == "TRACE" {
+		// Ignore TRACE level.
+		//
+		// TODO: lxd has no TRACE level, which is quite verbose in dqlite,
+		//       we'll need to take this level into account if we need to
+		//       do some deep debugging.
+		return
+	}
+
+	switch level {
+	case "DEBUG":
+		logger.Debug(message)
+	case "INFO":
+		logger.Info(message)
+	case "WARN":
+		logger.Warn(message)
+	default:
+		// Ignore any other log level.
+	}
+}
diff --git a/lxd/cluster/gateway_test.go b/lxd/cluster/gateway_test.go
index 33072e993..cb5c500e2 100644
--- a/lxd/cluster/gateway_test.go
+++ b/lxd/cluster/gateway_test.go
@@ -1,10 +1,13 @@
 package cluster_test
 
 import (
+	"net/http"
+	"net/http/httptest"
 	"os"
 	"path/filepath"
 	"testing"
 
+	grpcsql "github.com/CanonicalLtd/go-grpc-sql"
 	"github.com/lxc/lxd/lxd/cluster"
 	"github.com/lxc/lxd/lxd/db"
 	"github.com/lxc/lxd/shared"
@@ -23,12 +26,48 @@ func TestGateway_Single(t *testing.T) {
 	gateway := newGateway(t, db, cert)
 	defer gateway.Shutdown()
 
+	handlerFuncs := gateway.HandlerFuncs()
+	assert.Len(t, handlerFuncs, 2)
+	for endpoint, f := range handlerFuncs {
+		w := httptest.NewRecorder()
+		r := &http.Request{}
+		f(w, r)
+		assert.Equal(t, 404, w.Code, endpoint)
+	}
+
 	dialer := gateway.Dialer()
 	conn, err := dialer()
 	assert.NoError(t, err)
 	assert.NotNil(t, conn)
 }
 
+// If there's a network address configured, we expose the gRPC endpoint with
+// an HTTP handler.
+func TestGateway_SingleWithNetworkAddress(t *testing.T) {
+	db, cleanup := db.NewTestNode(t)
+	defer cleanup()
+
+	cert := shared.TestingKeyPair()
+	mux := http.NewServeMux()
+	server := newServer(cert, mux)
+	defer server.Close()
+
+	address := server.Listener.Addr().String()
+	setRaftRole(t, db, address)
+
+	gateway := newGateway(t, db, cert)
+	defer gateway.Shutdown()
+
+	for path, handler := range gateway.HandlerFuncs() {
+		mux.HandleFunc(path, handler)
+	}
+
+	driver := grpcsql.NewDriver(gateway.Dialer())
+	conn, err := driver.Open("test.db")
+	require.NoError(t, err)
+	require.NoError(t, conn.Close())
+}
+
 // Create a new test Gateway with the given parameters, and ensure no error
 // happens.
 func newGateway(t *testing.T, db *db.Node, certInfo *shared.CertInfo) *cluster.Gateway {
diff --git a/lxd/cluster/raft.go b/lxd/cluster/raft.go
index 0b24ff8b9..7db15baf9 100644
--- a/lxd/cluster/raft.go
+++ b/lxd/cluster/raft.go
@@ -24,9 +24,9 @@ import (
 	"github.com/lxc/lxd/lxd/node"
 	"github.com/lxc/lxd/lxd/util"
 	"github.com/lxc/lxd/shared"
+	"github.com/lxc/lxd/shared/log15"
 	"github.com/lxc/lxd/shared/logger"
 	"github.com/pkg/errors"
-	log15 "gopkg.in/inconshreveable/log15.v2"
 )
 
 // Create a raft instance and all its dependencies, to be used as backend for
diff --git a/lxd/daemon.go b/lxd/daemon.go
index a122a62d0..b3ee58e30 100644
--- a/lxd/daemon.go
+++ b/lxd/daemon.go
@@ -405,8 +405,7 @@ func (d *Daemon) init() error {
 	address := daemonConfig["core.https_address"].Get()
 
 	/* Open the cluster database */
-	clusterFilename := filepath.Join(d.os.VarDir, "db.bin")
-	d.cluster, err = db.OpenCluster(clusterFilename, d.gateway.Dialer(), address)
+	d.cluster, err = db.OpenCluster("db.bin", d.gateway.Dialer(), address)
 	if err != nil {
 		return errors.Wrap(err, "failed to open cluster database")
 	}
diff --git a/test/includes/lxd.sh b/test/includes/lxd.sh
index bc6349a25..bf0eb230e 100644
--- a/test/includes/lxd.sh
+++ b/test/includes/lxd.sh
@@ -14,6 +14,9 @@ spawn_lxd() {
     storage=${1}
     shift
 
+    # Link to local sqlite with replication patch for dqlite
+    sqlite="$(pwd)/../lxd/sqlite/.libs"
+
     # shellcheck disable=SC2153
     if [ "$LXD_BACKEND" = "random" ]; then
         lxd_backend="$(random_storage_backend)"
@@ -36,7 +39,8 @@ spawn_lxd() {
 
     echo "==> Spawning lxd in ${lxddir}"
     # shellcheck disable=SC2086
-    LXD_DIR="${lxddir}" lxd --logfile "${lxddir}/lxd.log" ${DEBUG-} "$@" 2>&1 &
+
+    LD_LIBRARY_PATH="${sqlite}" LXD_DIR="${lxddir}" lxd --logfile "${lxddir}/lxd.log" "${DEBUG-}" "$@" 2>&1 &
     LXD_PID=$!
     echo "${LXD_PID}" > "${lxddir}/lxd.pid"
     # shellcheck disable=SC2153
@@ -82,9 +86,12 @@ respawn_lxd() {
     lxddir=${1}
     shift
 
+    # Link to local sqlite with replication patch for dqlite
+    sqlite="$(pwd)/../lxd/sqlite/.libs"
+
     echo "==> Spawning lxd in ${lxddir}"
     # shellcheck disable=SC2086
-    LXD_DIR="${lxddir}" lxd --logfile "${lxddir}/lxd.log" ${DEBUG-} "$@" 2>&1 &
+    LD_LIBRARY_PATH="${sqlite}" LXD_DIR="${lxddir}" lxd --logfile "${lxddir}/lxd.log" "${DEBUG-}" "$@" 2>&1 &
     LXD_PID=$!
     echo "${LXD_PID}" > "${lxddir}/lxd.pid"
     echo "==> Spawned LXD (PID is ${LXD_PID})"

From 03ce33ec6660a042b940a8246e15aa3c5811f39f Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Sat, 14 Oct 2017 17:54:57 +0000
Subject: [PATCH 018/116] Add APIs to modify the cluster database nodes table

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/db/node.go      | 50 ++++++++++++++++++++++++++++++++++++++++++++++++++
 lxd/db/node_test.go | 31 +++++++++++++++++++++++++++++++
 2 files changed, 81 insertions(+)
 create mode 100644 lxd/db/node.go
 create mode 100644 lxd/db/node_test.go

diff --git a/lxd/db/node.go b/lxd/db/node.go
new file mode 100644
index 000000000..ca02779c1
--- /dev/null
+++ b/lxd/db/node.go
@@ -0,0 +1,50 @@
+package db
+
+import (
+	"github.com/lxc/lxd/lxd/db/cluster"
+	"github.com/lxc/lxd/lxd/db/query"
+	"github.com/lxc/lxd/shared/version"
+	"github.com/pkg/errors"
+)
+
+// NodeInfo holds information about a single LXD instance in a cluster.
+type NodeInfo struct {
+	ID            int64  // Stable node identifier
+	Name          string // User-assigned name of the node
+	Address       string // Network address of the node
+	Description   string // Node description (optional)
+	Schema        int    // Schema version of the LXD code running the node
+	APIExtensions int    // Number of API extensions of the LXD code running on the node
+}
+
+// Nodes returns all LXD nodes part of the cluster.
+//
+// If this LXD instance is not clustered, an empty list is returned.
+func (c *ClusterTx) Nodes() ([]NodeInfo, error) {
+	nodes := []NodeInfo{}
+	dest := func(i int) []interface{} {
+		nodes = append(nodes, NodeInfo{})
+		return []interface{}{
+			&nodes[i].ID,
+			&nodes[i].Name,
+			&nodes[i].Address,
+			&nodes[i].Description,
+			&nodes[i].Schema,
+			&nodes[i].APIExtensions,
+		}
+	}
+	stmt := "SELECT id, name, address, description, schema, api_extensions FROM nodes ORDER BY id"
+	err := query.SelectObjects(c.tx, dest, stmt)
+	if err != nil {
+		return nil, errors.Wrap(err, "failed to fecth nodes")
+	}
+	return nodes, nil
+}
+
+// NodeAdd adds a node to the current list of LXD nodes that are part of the
+// cluster. It returns the ID of the newly inserted row.
+func (c *ClusterTx) NodeAdd(name string, address string) (int64, error) {
+	columns := []string{"name", "address", "schema", "api_extensions"}
+	values := []interface{}{name, address, cluster.SchemaVersion, len(version.APIExtensions)}
+	return query.UpsertObject(c.tx, "nodes", columns, values)
+}
diff --git a/lxd/db/node_test.go b/lxd/db/node_test.go
new file mode 100644
index 000000000..82d3af111
--- /dev/null
+++ b/lxd/db/node_test.go
@@ -0,0 +1,31 @@
+package db_test
+
+import (
+	"testing"
+
+	"github.com/lxc/lxd/lxd/db"
+	"github.com/lxc/lxd/lxd/db/cluster"
+	"github.com/lxc/lxd/shared/version"
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
+)
+
+// Add a new raft node.
+func TestNodeAdd(t *testing.T) {
+	tx, cleanup := db.NewTestClusterTx(t)
+	defer cleanup()
+
+	id, err := tx.NodeAdd("buzz", "1.2.3.4:666")
+	require.NoError(t, err)
+	assert.Equal(t, int64(1), id)
+
+	nodes, err := tx.Nodes()
+	require.NoError(t, err)
+	require.Len(t, nodes, 1)
+
+	node := nodes[0]
+	assert.Equal(t, "buzz", node.Name)
+	assert.Equal(t, "1.2.3.4:666", node.Address)
+	assert.Equal(t, cluster.SchemaVersion, node.Schema)
+	assert.Equal(t, len(version.APIExtensions), node.APIExtensions)
+}

From 1ff028d15ba51ebc22a074ad47b3cc5aa473b412 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Sat, 14 Oct 2017 20:55:25 +0000
Subject: [PATCH 019/116] Conditionally load the server or cluster certificate

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/daemon.go          |  2 +-
 lxd/util/encryption.go | 19 +++++++++++++++++++
 2 files changed, 20 insertions(+), 1 deletion(-)

diff --git a/lxd/daemon.go b/lxd/daemon.go
index b3ee58e30..78c25e208 100644
--- a/lxd/daemon.go
+++ b/lxd/daemon.go
@@ -379,7 +379,7 @@ func (d *Daemon) init() error {
 	}
 
 	/* Setup server certificate */
-	certInfo, err := shared.KeyPairAndCA(d.os.VarDir, "server", shared.CertServer)
+	certInfo, err := util.LoadCert(d.os.VarDir)
 	if err != nil {
 		return err
 	}
diff --git a/lxd/util/encryption.go b/lxd/util/encryption.go
index a015bf514..43e7aecaf 100644
--- a/lxd/util/encryption.go
+++ b/lxd/util/encryption.go
@@ -4,6 +4,10 @@ import (
 	"bytes"
 	"encoding/hex"
 	"fmt"
+	"path/filepath"
+
+	"github.com/lxc/lxd/shared"
+	"github.com/pkg/errors"
 
 	"golang.org/x/crypto/scrypt"
 )
@@ -32,3 +36,18 @@ func PasswordCheck(secret, password string) error {
 
 	return nil
 }
+
+// LoadCert reads the LXD server certificate from the given var dir.
+//
+// If a cluster certificate is found it will be loaded instead.
+func LoadCert(dir string) (*shared.CertInfo, error) {
+	prefix := "server"
+	if shared.PathExists(filepath.Join(dir, "cluster.crt")) {
+		prefix = "cluster"
+	}
+	cert, err := shared.KeyPairAndCA(dir, prefix, shared.CertServer)
+	if err != nil {
+		return nil, errors.Wrap(err, "failed to load TLS certificate")
+	}
+	return cert, nil
+}

From c06ddb4c3b55c354d5361249dfccbedc9da03e88 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Mon, 16 Oct 2017 12:23:07 +0000
Subject: [PATCH 020/116] Make NewTestOS also setup the testing certificates

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/daemon_integration_test.go | 48 ++++--------------------------------------
 lxd/devlxd_test.go             |  8 +++----
 lxd/main_test.go               |  3 ++-
 lxd/sys/testing.go             | 29 +++++++++++++++++++++++++
 4 files changed, 39 insertions(+), 49 deletions(-)

diff --git a/lxd/daemon_integration_test.go b/lxd/daemon_integration_test.go
index 0f689dfa5..f18c0e78c 100644
--- a/lxd/daemon_integration_test.go
+++ b/lxd/daemon_integration_test.go
@@ -1,9 +1,6 @@
 package main
 
 import (
-	"io/ioutil"
-	"os"
-	"path/filepath"
 	"testing"
 
 	lxd "github.com/lxc/lxd/client"
@@ -32,21 +29,16 @@ func newDaemon(t *testing.T) (*Daemon, func()) {
 	// Logging
 	resetLogger := logging.Testing(t)
 
-	// Test directory
-	dir, err := ioutil.TempDir("", "lxd-integration-test")
-	require.NoError(t, err)
-
-	// Test certificates
-	require.NoError(t, os.Mkdir(filepath.Join(dir, "var"), 0755))
-	require.NoError(t, setupTestCerts(filepath.Join(dir, "var")))
+	// OS
+	os, osCleanup := sys.NewTestOS(t)
 
 	// Daemon
-	daemon := NewDaemon(newConfig(), newOS(dir))
+	daemon := NewDaemon(newConfig(), os)
 	require.NoError(t, daemon.Init())
 
 	cleanup := func() {
 		require.NoError(t, daemon.Stop())
-		require.NoError(t, os.RemoveAll(dir))
+		osCleanup()
 		resetLogger()
 	}
 
@@ -59,35 +51,3 @@ func newConfig() *DaemonConfig {
 		RaftLatency: 0.2,
 	}
 }
-
-// Create a new sys.OS object for testing purposes.
-func newOS(dir string) *sys.OS {
-	return &sys.OS{
-		// FIXME: setting mock mode can be avoided once daemon tasks
-		// are fixed to exit gracefully. See daemon.go.
-		MockMode: true,
-
-		VarDir:   filepath.Join(dir, "var"),
-		CacheDir: filepath.Join(dir, "cache"),
-		LogDir:   filepath.Join(dir, "log"),
-	}
-}
-
-// Populate the given test LXD directory with server certificates.
-//
-// Since generating certificates is CPU intensive, they will be simply
-// symlink'ed from the test/deps/ directory.
-func setupTestCerts(dir string) error {
-	cwd, err := os.Getwd()
-	if err != nil {
-		return err
-	}
-	deps := filepath.Join(cwd, "..", "test", "deps")
-	for _, f := range []string{"server.crt", "server.key"} {
-		err := os.Symlink(filepath.Join(deps, f), filepath.Join(dir, f))
-		if err != nil {
-			return err
-		}
-	}
-	return nil
-}
diff --git a/lxd/devlxd_test.go b/lxd/devlxd_test.go
index 6a029859e..b3978e1a5 100644
--- a/lxd/devlxd_test.go
+++ b/lxd/devlxd_test.go
@@ -9,6 +9,8 @@ import (
 	"path/filepath"
 	"strings"
 	"testing"
+
+	"github.com/lxc/lxd/lxd/sys"
 )
 
 var testDir string
@@ -38,7 +40,7 @@ func setupDir() error {
 	if err != nil {
 		return err
 	}
-	err = setupTestCerts(testDir)
+	err = sys.SetupTestCerts(testDir)
 	if err != nil {
 		return err
 	}
@@ -129,9 +131,7 @@ func TestCredsSendRecv(t *testing.T) {
  * point where it realizes the pid isn't in a container without crashing).
  */
 func TestHttpRequest(t *testing.T) {
-	if err := setupDir(); err != nil {
-		t.Fatal(err)
-	}
+	setupDir()
 	defer os.RemoveAll(testDir)
 
 	d := DefaultDaemon()
diff --git a/lxd/main_test.go b/lxd/main_test.go
index 5555e199e..2c1acfd54 100644
--- a/lxd/main_test.go
+++ b/lxd/main_test.go
@@ -6,6 +6,7 @@ import (
 	"os"
 
 	"github.com/lxc/lxd/lxd/db"
+	"github.com/lxc/lxd/lxd/sys"
 	"github.com/stretchr/testify/require"
 	"github.com/stretchr/testify/suite"
 
@@ -20,7 +21,7 @@ func mockStartDaemon() (*Daemon, error) {
 	// Setup test certificates. We re-use the ones already on disk under
 	// the test/ directory, to avoid generating new ones, which is
 	// expensive.
-	err := setupTestCerts(shared.VarPath())
+	err := sys.SetupTestCerts(shared.VarPath())
 	if err != nil {
 		return nil, err
 	}
diff --git a/lxd/sys/testing.go b/lxd/sys/testing.go
index b0bb8a42a..537fd1c77 100644
--- a/lxd/sys/testing.go
+++ b/lxd/sys/testing.go
@@ -4,6 +4,7 @@ import (
 	"io/ioutil"
 	"os"
 	"path/filepath"
+	"runtime"
 	"testing"
 
 	"github.com/stretchr/testify/require"
@@ -13,16 +14,44 @@ import (
 func NewTestOS(t *testing.T) (*OS, func()) {
 	dir, err := ioutil.TempDir("", "lxd-sys-os-test-")
 	require.NoError(t, err)
+	require.NoError(t, SetupTestCerts(dir))
 
 	cleanup := func() {
 		require.NoError(t, os.RemoveAll(dir))
 	}
 
 	os := &OS{
+		// FIXME: setting mock mode can be avoided once daemon tasks
+		// are fixed to exit gracefully. See daemon.go.
+		MockMode: true,
+
 		VarDir:   dir,
 		CacheDir: filepath.Join(dir, "cache"),
 		LogDir:   filepath.Join(dir, "log"),
 	}
 
+	require.NoError(t, os.Init())
+
 	return os, cleanup
 }
+
+// SetupTestCerts populates the given test LXD directory with server
+// certificates.
+//
+// Since generating certificates is CPU intensive, they will be simply
+// symlink'ed from the test/deps/ directory.
+//
+// FIXME: this function is exported because some tests use it
+//        directly. Eventually we should rework those tests to use NewTestOS
+//        instead.
+func SetupTestCerts(dir string) error {
+	_, filename, _, _ := runtime.Caller(0)
+	deps := filepath.Join(filepath.Dir(filename), "..", "..", "test", "deps")
+	for _, f := range []string{"server.crt", "server.key"} {
+		err := os.Symlink(filepath.Join(deps, f), filepath.Join(dir, f))
+		if err != nil {
+			return err
+		}
+	}
+	return nil
+}

From c7bec117c2ab5b5039b3775a40f23e8edfff8655 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Thu, 12 Oct 2017 18:21:52 +0000
Subject: [PATCH 021/116] Add cluster.Bootstrap

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/cluster/membership.go      | 166 +++++++++++++++++++++++++++++++++++++++++
 lxd/cluster/membership_test.go | 163 ++++++++++++++++++++++++++++++++++++++++
 2 files changed, 329 insertions(+)
 create mode 100644 lxd/cluster/membership.go
 create mode 100644 lxd/cluster/membership_test.go

diff --git a/lxd/cluster/membership.go b/lxd/cluster/membership.go
new file mode 100644
index 000000000..89e847e5a
--- /dev/null
+++ b/lxd/cluster/membership.go
@@ -0,0 +1,166 @@
+package cluster
+
+import (
+	"fmt"
+	"os"
+	"path/filepath"
+
+	"github.com/lxc/lxd/lxd/db"
+	"github.com/lxc/lxd/lxd/node"
+	"github.com/lxc/lxd/lxd/state"
+	"github.com/lxc/lxd/shared"
+	"github.com/pkg/errors"
+)
+
+// Bootstrap turns a non-clustered LXD instance into the first (and leader)
+// node of a new LXD cluster.
+//
+// This instance must already have its core.https_address set and be listening
+// on the associated network address.
+func Bootstrap(state *state.State, gateway *Gateway, name string) error {
+	// Check parameters
+	if name == "" {
+		return fmt.Errorf("node name must not be empty")
+	}
+
+	// Sanity check that there's no leftover cluster certificate
+	for _, basename := range []string{"cluster.crt", "cluster.key", "cluster.ca"} {
+		if shared.PathExists(filepath.Join(state.OS.VarDir, basename)) {
+			return fmt.Errorf("inconsistent state: found leftover cluster certificate")
+		}
+	}
+
+	var address string
+	err := state.Node.Transaction(func(tx *db.NodeTx) error {
+		// Fetch current network address and raft nodes
+		config, err := node.ConfigLoad(tx)
+		if err != nil {
+			return errors.Wrap(err, "failed to fetch node configuration")
+		}
+		address = config.HTTPSAddress()
+
+		// Make sure node-local database state is in order.
+		err = membershipCheckNodeStateForBootstrapOrJoin(tx, address)
+		if err != nil {
+			return err
+		}
+
+		// Add ourselves as first raft node
+		err = tx.RaftNodeFirst(address)
+		if err != nil {
+			return errors.Wrap(err, "failed to insert first raft node")
+		}
+
+		return nil
+	})
+	if err != nil {
+		return err
+	}
+
+	// Insert ourselves into the nodes table.
+	err = state.Cluster.Transaction(func(tx *db.ClusterTx) error {
+		// Make sure cluster database state is in order.
+		err := membershipCheckClusterStateForBootstrapOrJoin(tx)
+		if err != nil {
+			return err
+		}
+
+		// Add ourselves to the nodes table.
+		_, err = tx.NodeAdd(name, address)
+		if err != nil {
+			return errors.Wrap(err, "failed to insert cluster node")
+		}
+
+		return nil
+	})
+	if err != nil {
+		return err
+	}
+
+	// Shutdown the gateway. This will trash any gRPC SQL connection
+	// against our in-memory dqlite driver and shutdown the associated raft
+	// instance.
+	err = gateway.Shutdown()
+	if err != nil {
+		return errors.Wrap(err, "failed to shutdown gRPC SQL gateway")
+	}
+
+	// Re-initialize the gateway. This will create a new raft factory an
+	// dqlite driver instance, which will be exposed over gRPC by the
+	// gateway handlers.
+	err = gateway.init()
+	if err != nil {
+		return errors.Wrap(err, "failed to re-initialize gRPC SQL gateway")
+	}
+	err = gateway.waitLeadership()
+	if err != nil {
+		return err
+	}
+
+	// The cluster certificates are symlinks against the regular node
+	// certificate.
+	for _, ext := range []string{".crt", ".key", ".ca"} {
+		if ext == ".ca" && !shared.PathExists(filepath.Join(state.OS.VarDir, "server.ca")) {
+			continue
+		}
+		err := os.Symlink("server"+ext, filepath.Join(state.OS.VarDir, "cluster"+ext))
+		if err != nil {
+			return errors.Wrap(err, "failed to create cluster cert symlink")
+		}
+	}
+
+	// Make sure we can actually connect to the cluster database through
+	// the network endpoint. This also makes the Go SQL pooling system
+	// invalidate the old connection, so new queries will be executed over
+	// the new gRPC network connection.
+	err = state.Cluster.Transaction(func(tx *db.ClusterTx) error {
+		_, err := tx.Nodes()
+		return err
+	})
+	if err != nil {
+		return errors.Wrap(err, "cluster database initialization failed")
+	}
+
+	return nil
+}
+
+// Check that node-related preconditions are met for bootstrapping or joining a
+// cluster.
+func membershipCheckNodeStateForBootstrapOrJoin(tx *db.NodeTx, address string) error {
+	nodes, err := tx.RaftNodes()
+	if err != nil {
+		return errors.Wrap(err, "failed to fetch current raft nodes")
+	}
+
+	hasNetworkAddress := address != ""
+	hasRaftNodes := len(nodes) > 0
+
+	// Sanity check that we're not in an inconsistent situation, where no
+	// network address is set, but still there are entries in the
+	// raft_nodes table.
+	if !hasNetworkAddress && hasRaftNodes {
+		return fmt.Errorf("inconsistent state: found leftover entries in raft_nodes")
+	}
+
+	if !hasNetworkAddress {
+		return fmt.Errorf("no core.https_address config is set on this node")
+	}
+	if hasRaftNodes {
+		return fmt.Errorf("the node is already part of a cluster")
+	}
+
+	return nil
+}
+
+// Check that cluster-related preconditions are met for bootstrapping or
+// joining a cluster.
+func membershipCheckClusterStateForBootstrapOrJoin(tx *db.ClusterTx) error {
+	nodes, err := tx.Nodes()
+	if err != nil {
+		return errors.Wrap(err, "failed to fetch current cluster nodes")
+	}
+	if len(nodes) > 0 {
+		return fmt.Errorf("inconsistent state: found leftover entries in nodes")
+	}
+	return nil
+}
diff --git a/lxd/cluster/membership_test.go b/lxd/cluster/membership_test.go
new file mode 100644
index 000000000..670aecfad
--- /dev/null
+++ b/lxd/cluster/membership_test.go
@@ -0,0 +1,163 @@
+package cluster_test
+
+import (
+	"io/ioutil"
+	"net/http"
+	"path/filepath"
+	"testing"
+
+	"github.com/CanonicalLtd/go-grpc-sql"
+	"github.com/lxc/lxd/lxd/cluster"
+	"github.com/lxc/lxd/lxd/db"
+	"github.com/lxc/lxd/lxd/state"
+	"github.com/lxc/lxd/shared"
+	"github.com/mpvl/subtest"
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
+)
+
+func TestBootstrap_UnmetPreconditions(t *testing.T) {
+	cases := []struct {
+		setup func(*membershipFixtures)
+		error string
+	}{
+		{
+			func(f *membershipFixtures) {
+				f.NetworkAddress("1.2.3.4:666")
+				filename := filepath.Join(f.state.OS.VarDir, "cluster.crt")
+				ioutil.WriteFile(filename, []byte{}, 0644)
+			},
+			"inconsistent state: found leftover cluster certificate",
+		},
+		{
+			func(*membershipFixtures) {},
+			"no core.https_address config is set on this node",
+		},
+		{
+			func(f *membershipFixtures) {
+				f.NetworkAddress("1.2.3.4:666")
+				f.RaftNode("5.6.7.8:666")
+			},
+			"the node is already part of a cluster",
+		},
+		{
+			func(f *membershipFixtures) {
+				f.RaftNode("5.6.7.8:666")
+			},
+			"inconsistent state: found leftover entries in raft_nodes",
+		},
+		{
+			func(f *membershipFixtures) {
+				f.NetworkAddress("1.2.3.4:666")
+				f.ClusterNode("5.6.7.8:666")
+			},
+			"inconsistent state: found leftover entries in nodes",
+		},
+	}
+
+	for _, c := range cases {
+		subtest.Run(t, c.error, func(t *testing.T) {
+			state, cleanup := state.NewTestState(t)
+			defer cleanup()
+
+			c.setup(&membershipFixtures{t: t, state: state})
+
+			cert := shared.TestingKeyPair()
+			gateway := newGateway(t, state.Node, cert)
+			defer gateway.Shutdown()
+
+			err := cluster.Bootstrap(state, gateway, "buzz")
+			assert.EqualError(t, err, c.error)
+		})
+	}
+}
+
+func TestBootstrap(t *testing.T) {
+	state, cleanup := state.NewTestState(t)
+	defer cleanup()
+
+	cert := shared.TestingKeyPair()
+	gateway := newGateway(t, state.Node, cert)
+	defer gateway.Shutdown()
+
+	mux := http.NewServeMux()
+	server := newServer(cert, mux)
+	defer server.Close()
+
+	address := server.Listener.Addr().String()
+	f := &membershipFixtures{t: t, state: state}
+	f.NetworkAddress(address)
+
+	err := cluster.Bootstrap(state, gateway, "buzz")
+	require.NoError(t, err)
+
+	// The node-local database has now an entry in the raft_nodes table
+	err = state.Node.Transaction(func(tx *db.NodeTx) error {
+		nodes, err := tx.RaftNodes()
+		require.NoError(t, err)
+		require.Len(t, nodes, 1)
+		assert.Equal(t, int64(1), nodes[0].ID)
+		assert.Equal(t, address, nodes[0].Address)
+		return nil
+	})
+	require.NoError(t, err)
+
+	// The cluster database has now an entry in the nodes table
+	err = state.Cluster.Transaction(func(tx *db.ClusterTx) error {
+		nodes, err := tx.Nodes()
+		require.NoError(t, err)
+		require.Len(t, nodes, 1)
+		assert.Equal(t, "buzz", nodes[0].Name)
+		assert.Equal(t, address, nodes[0].Address)
+		return nil
+	})
+	require.NoError(t, err)
+
+	// The cluster certificate is in place.
+	assert.True(t, shared.PathExists(filepath.Join(state.OS.VarDir, "cluster.crt")))
+
+	// The dqlite driver is now exposed over the network.
+	for path, handler := range gateway.HandlerFuncs() {
+		mux.HandleFunc(path, handler)
+	}
+
+	driver := grpcsql.NewDriver(gateway.Dialer())
+	conn, err := driver.Open("test.db")
+	require.NoError(t, err)
+	require.NoError(t, conn.Close())
+}
+
+// Helper for setting fixtures for Bootstrap tests.
+type membershipFixtures struct {
+	t     *testing.T
+	state *state.State
+}
+
+// Set core.https_address to the given value.
+func (h *membershipFixtures) NetworkAddress(address string) {
+	err := h.state.Node.Transaction(func(tx *db.NodeTx) error {
+		config := map[string]string{
+			"core.https_address": address,
+		}
+		return tx.UpdateConfig(config)
+	})
+	require.NoError(h.t, err)
+}
+
+// Add the given address to the raft_nodes table.
+func (h *membershipFixtures) RaftNode(address string) {
+	err := h.state.Node.Transaction(func(tx *db.NodeTx) error {
+		_, err := tx.RaftNodeAdd(address)
+		return err
+	})
+	require.NoError(h.t, err)
+}
+
+// Add the given address to the nodes table of the cluster database.
+func (h *membershipFixtures) ClusterNode(address string) {
+	err := h.state.Cluster.Transaction(func(tx *db.ClusterTx) error {
+		_, err := tx.NodeAdd("rusp", address)
+		return err
+	})
+	require.NoError(h.t, err)
+}

From c0880348b88ab8dc79c00ac4ba1b44c089906871 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Sat, 14 Oct 2017 19:35:13 +0000
Subject: [PATCH 022/116] Add BootstrapCluster REST API command

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 client/interfaces.go    |  3 +++
 client/lxd_cluster.go   | 14 ++++++++++++++
 lxd/api_1.0.go          |  1 +
 lxd/api_cluster.go      | 41 +++++++++++++++++++++++++++++++++++++++++
 lxd/api_cluster_test.go | 35 +++++++++++++++++++++++++++++++++++
 shared/api/cluster.go   |  9 +++++++++
 6 files changed, 103 insertions(+)
 create mode 100644 client/lxd_cluster.go
 create mode 100644 lxd/api_cluster.go
 create mode 100644 lxd/api_cluster_test.go
 create mode 100644 shared/api/cluster.go

diff --git a/client/interfaces.go b/client/interfaces.go
index b88fbef2d..53813cc82 100644
--- a/client/interfaces.go
+++ b/client/interfaces.go
@@ -160,6 +160,9 @@ type ContainerServer interface {
 	DeleteStoragePoolVolume(pool string, volType string, name string) (err error)
 	RenameStoragePoolVolume(pool string, volType string, name string, volume api.StorageVolumePost) (err error)
 
+	// Cluster functions ("cluster" API extensions)
+	BootstrapCluster(name string) (op *Operation, err error)
+
 	// Internal functions (for internal use)
 	RawQuery(method string, path string, data interface{}, queryETag string) (resp *api.Response, ETag string, err error)
 	RawWebsocket(path string) (conn *websocket.Conn, err error)
diff --git a/client/lxd_cluster.go b/client/lxd_cluster.go
new file mode 100644
index 000000000..672665ccb
--- /dev/null
+++ b/client/lxd_cluster.go
@@ -0,0 +1,14 @@
+package lxd
+
+import "github.com/lxc/lxd/shared/api"
+
+// BootstrapCluster request to bootstrap a new cluster.
+func (r *ProtocolLXD) BootstrapCluster(name string) (*Operation, error) {
+	cluster := api.ClusterPost{Name: name}
+	op, _, err := r.queryOperation("POST", "/cluster", cluster, "")
+	if err != nil {
+		return nil, err
+	}
+
+	return op, nil
+}
diff --git a/lxd/api_1.0.go b/lxd/api_1.0.go
index ab48696c8..6c342552b 100644
--- a/lxd/api_1.0.go
+++ b/lxd/api_1.0.go
@@ -56,6 +56,7 @@ var api10 = []Command{
 	storagePoolVolumesTypeCmd,
 	storagePoolVolumeTypeCmd,
 	serverResourceCmd,
+	clusterCmd,
 }
 
 func api10Get(d *Daemon, r *http.Request) Response {
diff --git a/lxd/api_cluster.go b/lxd/api_cluster.go
new file mode 100644
index 000000000..4e8bb0419
--- /dev/null
+++ b/lxd/api_cluster.go
@@ -0,0 +1,41 @@
+package main
+
+import (
+	"encoding/json"
+	"fmt"
+	"net/http"
+
+	"github.com/lxc/lxd/lxd/cluster"
+	"github.com/lxc/lxd/shared/api"
+)
+
+var clusterCmd = Command{name: "cluster", post: clusterPost}
+
+func clusterPost(d *Daemon, r *http.Request) Response {
+	req := api.ClusterPost{}
+
+	// Parse the request
+	err := json.NewDecoder(r.Body).Decode(&req)
+	if err != nil {
+		return BadRequest(err)
+	}
+
+	// Sanity checks
+	if req.Name == "" {
+		return BadRequest(fmt.Errorf("No name provided"))
+	}
+
+	run := func(op *operation) error {
+		return cluster.Bootstrap(d.State(), d.gateway, req.Name)
+	}
+
+	resources := map[string][]string{}
+	resources["cluster"] = []string{}
+
+	op, err := operationCreate(operationClassTask, resources, nil, run, nil, nil)
+	if err != nil {
+		return InternalError(err)
+	}
+
+	return OperationResponse(op)
+}
diff --git a/lxd/api_cluster_test.go b/lxd/api_cluster_test.go
new file mode 100644
index 000000000..aa096b9d6
--- /dev/null
+++ b/lxd/api_cluster_test.go
@@ -0,0 +1,35 @@
+package main
+
+import (
+	"fmt"
+	"testing"
+
+	lxd "github.com/lxc/lxd/client"
+	"github.com/lxc/lxd/shared"
+	"github.com/stretchr/testify/require"
+)
+
+// A LXD node which is already configured for networking can be coverted to a
+// single-node LXD cluster.
+func TestCluster_Bootstrap(t *testing.T) {
+	daemon, cleanup := newDaemon(t)
+	defer cleanup()
+
+	client, err := lxd.ConnectLXDUnix(daemon.UnixSocket(), nil)
+	require.NoError(t, err)
+
+	server, _, err := client.GetServer()
+	require.NoError(t, err)
+
+	port, err := shared.AllocatePort()
+	require.NoError(t, err)
+
+	serverPut := server.Writable()
+	serverPut.Config["core.https_address"] = fmt.Sprintf("localhost:%d", port)
+
+	require.NoError(t, client.UpdateServer(serverPut, ""))
+
+	op, err := client.BootstrapCluster("buzz")
+	require.NoError(t, err)
+	require.NoError(t, op.Wait())
+}
diff --git a/shared/api/cluster.go b/shared/api/cluster.go
new file mode 100644
index 000000000..847264214
--- /dev/null
+++ b/shared/api/cluster.go
@@ -0,0 +1,9 @@
+package api
+
+// ClusterPost represents the fields required to bootstrap or join a LXD
+// cluster.
+//
+// API extension: cluster
+type ClusterPost struct {
+	Name string `json:"name" yaml:"name"`
+}

From feced0f39384558bdf9ab9b5eccb04d2696db37b Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Sat, 14 Oct 2017 22:17:56 +0000
Subject: [PATCH 023/116] Add support for bootstrapping a cluster in lxd init

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/main_init.go                | 105 +++++++++++++++++++++++++++++++++++++++-
 lxd/main_init_test.go           |  34 ++++++++++++-
 lxd/util/net.go                 |  29 +++++++++++
 test/suites/init_interactive.sh |   1 +
 4 files changed, 167 insertions(+), 2 deletions(-)

diff --git a/lxd/main_init.go b/lxd/main_init.go
index c08db11f2..65ffc0a22 100644
--- a/lxd/main_init.go
+++ b/lxd/main_init.go
@@ -3,6 +3,7 @@ package main
 import (
 	"fmt"
 	"net"
+	"os"
 	"os/exec"
 	"strconv"
 	"strings"
@@ -131,12 +132,29 @@ func (cmd *CmdInit) fillDataAuto(data *cmdInitData, client lxd.ContainerServer,
 // Fill the given configuration data with parameters collected with
 // interactive questions.
 func (cmd *CmdInit) fillDataInteractive(data *cmdInitData, client lxd.ContainerServer, backendsAvailable []string, existingPools []string) error {
+	clustering, err := cmd.askClustering()
+	if err != nil {
+		return err
+	}
 	storage, err := cmd.askStorage(client, existingPools, backendsAvailable)
 	if err != nil {
 		return err
 	}
 	defaultPrivileged := cmd.askDefaultPrivileged()
-	networking := cmd.askNetworking()
+
+	// Ask about networking only if we skipped the clustering questions.
+	var networking *cmdInitNetworkingParams
+	if clustering == nil {
+		networking = cmd.askNetworking()
+	} else {
+		// Re-use the answers to the clustering questions.
+		networking = &cmdInitNetworkingParams{
+			Address:       clustering.Address,
+			Port:          clustering.Port,
+			TrustPassword: clustering.TrustPassword,
+		}
+	}
+
 	imagesAutoUpdate := cmd.askImages()
 	bridge := cmd.askBridge(client)
 
@@ -145,6 +163,8 @@ func (cmd *CmdInit) fillDataInteractive(data *cmdInitData, client lxd.ContainerS
 		return fmt.Errorf("LXD managed bridges require \"dnsmasq\". Install it and try again.")
 	}
 
+	cmd.fillDataWithClustering(data, clustering)
+
 	err = cmd.fillDataWithStorage(data, storage, existingPools)
 	if err != nil {
 		return err
@@ -198,6 +218,15 @@ func (cmd *CmdInit) fillDataWithCurrentDefaultProfile(data *cmdInitData, client
 	}
 }
 
+// Fill the given init data with clustering details matching the given
+// clustering parameters.
+func (cmd *CmdInit) fillDataWithClustering(data *cmdInitData, clustering *cmdInitClusteringParams) {
+	if clustering == nil {
+		return
+	}
+	data.Cluster.Name = clustering.Name
+}
+
 // Fill the given init data with a new storage pool structure matching the
 // given storage parameters.
 func (cmd *CmdInit) fillDataWithStorage(data *cmdInitData, storage *cmdInitStorageParams, existingPools []string) error {
@@ -382,6 +411,13 @@ func (cmd *CmdInit) apply(client lxd.ContainerServer, data *cmdInitData) error {
 		return cmd.initConfig(client, data.Config)
 	})
 
+	// Cluster changers
+	if data.Cluster.Name != "" {
+		changers = append(changers, func() (reverter, error) {
+			return cmd.initCluster(client, data.Cluster)
+		})
+	}
+
 	// Storage pool changers
 	for i := range data.Pools {
 		pool := data.Pools[i] // Local variable for the closure
@@ -465,6 +501,17 @@ func (cmd *CmdInit) initConfig(client lxd.ContainerServer, config map[string]int
 	return reverter, nil
 }
 
+// Turn on clustering.
+func (cmd *CmdInit) initCluster(client lxd.ContainerServer, cluster api.ClusterPost) (reverter, error) {
+	var reverter func() error
+	op, err := client.BootstrapCluster(cluster.Name)
+	if err != nil {
+		return nil, err
+	}
+	op.Wait()
+	return reverter, nil
+}
+
 // Create or update a single pool, and return a revert function in case of success.
 func (cmd *CmdInit) initPool(client lxd.ContainerServer, pool api.StoragePoolsPost) (reverter, error) {
 	var reverter func() error
@@ -669,6 +716,52 @@ func (cmd *CmdInit) profileDeviceAlreadyExists(profile *api.ProfilesPost, device
 	return nil
 }
 
+// Ask if the user wants to enable clustering
+func (cmd *CmdInit) askClustering() (*cmdInitClusteringParams, error) {
+	askWants := "Would you like to use LXD clustering? (yes/no) [default=no]: "
+	if !cmd.Context.AskBool(askWants, "no") {
+		return nil, nil
+	}
+
+	params := &cmdInitClusteringParams{}
+
+	// Node name
+	hostname, err := os.Hostname()
+	if err != nil {
+		hostname = "lxd"
+	}
+	askName := fmt.Sprintf(
+		"What name should be used to identify this node in the cluster? [default=%s]: ",
+		hostname)
+	params.Name = cmd.Context.AskString(askName, hostname, nil)
+
+	// Network address
+	address := util.NetworkInterfaceAddress()
+	askAddress := fmt.Sprintf(
+		"What IP address or DNS name should be used to reach this node? [default=%s]: ",
+		address)
+	address = util.CanonicalNetworkAddress(cmd.Context.AskString(askAddress, address, nil))
+	host, port, err := net.SplitHostPort(address)
+	if err != nil {
+		return nil, err
+	}
+	portN, err := strconv.Atoi(port)
+	if err != nil {
+		return nil, err
+	}
+	params.Address = host
+	params.Port = int64(portN)
+
+	// Join existing cluster
+	if !cmd.Context.AskBool("Are you joining an existing cluster? (yes/no) [default=no]: ", "no") {
+		params.TrustPassword = cmd.Context.AskPassword(
+			"Trust password for new clients: ", cmd.PasswordReader)
+		return params, nil
+	}
+
+	return nil, fmt.Errorf("joining cluster not yet implemented")
+}
+
 // Ask if the user wants to create a new storage pool, and return
 // the relevant parameters if so.
 func (cmd *CmdInit) askStorage(client lxd.ContainerServer, existingPools []string, availableBackends []string) (*cmdInitStorageParams, error) {
@@ -939,6 +1032,16 @@ type cmdInitData struct {
 	Pools         []api.StoragePoolsPost `yaml:"storage_pools"`
 	Networks      []api.NetworksPost
 	Profiles      []api.ProfilesPost
+	Cluster       api.ClusterPost
+}
+
+// Parameters needed when enbling clustering in interactive mode.
+type cmdInitClusteringParams struct {
+	Name          string // Name of the new node
+	Address       string // Network address of the new node
+	Port          int64  // Network port of the new node
+	Join          string // Network address of existing node to join.
+	TrustPassword string // Trust password
 }
 
 // Parameters needed when creating a storage pool in interactive or auto
diff --git a/lxd/main_init_test.go b/lxd/main_init_test.go
index 378927efd..36829b5d4 100644
--- a/lxd/main_init_test.go
+++ b/lxd/main_init_test.go
@@ -115,6 +115,26 @@ func (suite *cmdInitTestSuite) TestCmdInit_InteractiveHTTPSAddressAndTrustPasswo
 	suite.Req.Nil(util.PasswordCheck(secret, "sekret"))
 }
 
+// Enable clustering interactively.
+func (suite *cmdInitTestSuite) TestCmdInit_InteractiveClustering() {
+	suite.command.PasswordReader = func(int) ([]byte, error) {
+		return []byte("sekret"), nil
+	}
+	port, err := shared.AllocatePort()
+	suite.Req.Nil(err)
+	answers := &cmdInitAnswers{
+		WantClustering: true,
+		ClusterName:    "buzz",
+		ClusterAddress: fmt.Sprintf("127.0.0.1:%d", port),
+	}
+	answers.Render(suite.streams)
+
+	suite.Req.Nil(suite.command.Run())
+	state := suite.d.State()
+	certfile := filepath.Join(state.OS.VarDir, "cluster.crt")
+	suite.Req.True(shared.PathExists(certfile))
+}
+
 // Pass network address and trust password via command line arguments.
 func (suite *cmdInitTestSuite) TestCmdInit_AutoHTTPSAddressAndTrustPassword() {
 	port, err := shared.AllocatePort()
@@ -631,6 +651,10 @@ func (suite *cmdInitTestSuite) TestCmdInit_ProfilesPreseedUpdate() {
 // Convenience for building the input text a user would enter for a certain
 // sequence of answers.
 type cmdInitAnswers struct {
+	WantClustering           bool
+	WantJoinCluster          bool
+	ClusterName              string
+	ClusterAddress           string
 	WantStoragePool          bool
 	WantAvailableOverNetwork bool
 	BindToAddress            string
@@ -645,8 +669,16 @@ type cmdInitAnswers struct {
 // Render the input text the user would type for the desired answers, populating
 // the stdin of the given streams.
 func (answers *cmdInitAnswers) Render(streams *cmd.MemoryStreams) {
+	streams.InputAppendBoolAnswer(answers.WantClustering)
+	if answers.WantClustering {
+		streams.InputAppendLine(answers.ClusterName)
+		streams.InputAppendLine(answers.ClusterAddress)
+		streams.InputAppendBoolAnswer(answers.WantJoinCluster)
+	}
 	streams.InputAppendBoolAnswer(answers.WantStoragePool)
-	streams.InputAppendBoolAnswer(answers.WantAvailableOverNetwork)
+	if !answers.WantClustering {
+		streams.InputAppendBoolAnswer(answers.WantAvailableOverNetwork)
+	}
 	if answers.WantAvailableOverNetwork {
 		streams.InputAppendLine(answers.BindToAddress)
 		streams.InputAppendLine(answers.BindToPort)
diff --git a/lxd/util/net.go b/lxd/util/net.go
index ae5913652..aca772547 100644
--- a/lxd/util/net.go
+++ b/lxd/util/net.go
@@ -93,3 +93,32 @@ func ServerTLSConfig(cert *shared.CertInfo) *tls.Config {
 	config.BuildNameToCertificate()
 	return config
 }
+
+// NetworkInterfaceAddress returns the first non-loopback address of any of the
+// system network interfaces.
+//
+// Return the empty string if none is found.
+func NetworkInterfaceAddress() string {
+	ifaces, err := net.Interfaces()
+	if err != nil {
+		return ""
+	}
+	for _, iface := range ifaces {
+		if shared.IsLoopback(&iface) {
+			continue
+		}
+		addrs, err := iface.Addrs()
+		if err != nil {
+			continue
+		}
+		if len(addrs) == 0 {
+			continue
+		}
+		addr, ok := addrs[0].(*net.IPNet)
+		if !ok {
+			continue
+		}
+		return addr.IP.String()
+	}
+	return ""
+}
diff --git a/test/suites/init_interactive.sh b/test/suites/init_interactive.sh
index 0e70663b6..ab7ec40db 100644
--- a/test/suites/init_interactive.sh
+++ b/test/suites/init_interactive.sh
@@ -16,6 +16,7 @@ test_init_interactive() {
     fi
 
     cat <<EOF | lxd init
+no
 yes
 my-storage-pool
 dir

From 07ab4412cf0012dcae527bc74dbf9113b36f8f0d Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Mon, 16 Oct 2017 09:23:03 +0000
Subject: [PATCH 024/116] Add cluster.Accept to accept a new cluster node

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/cluster/membership.go      | 119 ++++++++++++++++++++++++++++++++++++++---
 lxd/cluster/membership_test.go | 113 ++++++++++++++++++++++++++++++++++++++
 shared/cert.go                 |  10 ++++
 3 files changed, 236 insertions(+), 6 deletions(-)

diff --git a/lxd/cluster/membership.go b/lxd/cluster/membership.go
index 89e847e5a..f137dd26d 100644
--- a/lxd/cluster/membership.go
+++ b/lxd/cluster/membership.go
@@ -6,6 +6,7 @@ import (
 	"path/filepath"
 
 	"github.com/lxc/lxd/lxd/db"
+	"github.com/lxc/lxd/lxd/db/cluster"
 	"github.com/lxc/lxd/lxd/node"
 	"github.com/lxc/lxd/lxd/state"
 	"github.com/lxc/lxd/shared"
@@ -23,15 +24,13 @@ func Bootstrap(state *state.State, gateway *Gateway, name string) error {
 		return fmt.Errorf("node name must not be empty")
 	}
 
-	// Sanity check that there's no leftover cluster certificate
-	for _, basename := range []string{"cluster.crt", "cluster.key", "cluster.ca"} {
-		if shared.PathExists(filepath.Join(state.OS.VarDir, basename)) {
-			return fmt.Errorf("inconsistent state: found leftover cluster certificate")
-		}
+	err := membershipCheckNoLeftoverClusterCert(state.OS.VarDir)
+	if err != nil {
+		return err
 	}
 
 	var address string
-	err := state.Node.Transaction(func(tx *db.NodeTx) error {
+	err = state.Node.Transaction(func(tx *db.NodeTx) error {
 		// Fetch current network address and raft nodes
 		config, err := node.ConfigLoad(tx)
 		if err != nil {
@@ -124,6 +123,66 @@ func Bootstrap(state *state.State, gateway *Gateway, name string) error {
 	return nil
 }
 
+// Accept a new node and add it to the cluster.
+//
+// This instance must already be clustered.
+//
+// Return an updated list raft database nodes (possibly including the newly
+// accepted node).
+func Accept(state *state.State, name, address string, schema, api int) ([]db.RaftNode, error) {
+	// Check parameters
+	if name == "" {
+		return nil, fmt.Errorf("node name must not be empty")
+	}
+	if address == "" {
+		return nil, fmt.Errorf("node address must not be empty")
+	}
+
+	// Insert the new node into the nodes table.
+	err := state.Cluster.Transaction(func(tx *db.ClusterTx) error {
+		// Check that the node can be accepted with these parameters.
+		err := membershipCheckClusterStateForAccept(tx, name, address, schema, api)
+		if err != nil {
+			return err
+		}
+		// Add the new node
+		_, err = tx.NodeAdd(name, address)
+		if err != nil {
+			return errors.Wrap(err, "failed to insert first raft node")
+		}
+
+		return nil
+	})
+	if err != nil {
+		return nil, err
+	}
+
+	// Possibly insert the new node into the raft_nodes table (if we have
+	// less than 3 database nodes).
+	var nodes []db.RaftNode
+	err = state.Node.Transaction(func(tx *db.NodeTx) error {
+		var err error
+		nodes, err = tx.RaftNodes()
+		if err != nil {
+			return errors.Wrap(err, "failed to fetch current raft nodes")
+		}
+		if len(nodes) >= membershipMaxRaftNodes {
+			return nil
+		}
+		id, err := tx.RaftNodeAdd(address)
+		if err != nil {
+			return err
+		}
+		nodes = append(nodes, db.RaftNode{ID: id, Address: address})
+		return nil
+	})
+	if err != nil {
+		return nil, err
+	}
+
+	return nodes, nil
+}
+
 // Check that node-related preconditions are met for bootstrapping or joining a
 // cluster.
 func membershipCheckNodeStateForBootstrapOrJoin(tx *db.NodeTx, address string) error {
@@ -164,3 +223,51 @@ func membershipCheckClusterStateForBootstrapOrJoin(tx *db.ClusterTx) error {
 	}
 	return nil
 }
+
+// Check that cluster-related preconditions are met for accepting a new node.
+func membershipCheckClusterStateForAccept(tx *db.ClusterTx, name string, address string, schema int, api int) error {
+	nodes, err := tx.Nodes()
+	if err != nil {
+		return errors.Wrap(err, "failed to fetch current cluster nodes")
+	}
+	if len(nodes) == 0 {
+		return fmt.Errorf("clustering not enabled")
+	}
+
+	for _, node := range nodes {
+		if node.Name == name {
+			return fmt.Errorf("cluster already has node with name %s", name)
+		}
+		if node.Address == address {
+			return fmt.Errorf("cluster already has node with address %s", address)
+		}
+		if node.Schema != schema {
+			return fmt.Errorf("schema version mismatch: cluster has %d", node.Schema)
+		}
+		if node.APIExtensions != api {
+			return fmt.Errorf("API version mismatch: cluster has %d", node.APIExtensions)
+		}
+	}
+
+	return nil
+}
+
+// Check that there is no left-over cluster certificate in the LXD var dir of
+// this node.
+func membershipCheckNoLeftoverClusterCert(dir string) error {
+	// Sanity check that there's no leftover cluster certificate
+	for _, basename := range []string{"cluster.crt", "cluster.key", "cluster.ca"} {
+		if shared.PathExists(filepath.Join(dir, basename)) {
+			return fmt.Errorf("inconsistent state: found leftover cluster certificate")
+		}
+	}
+	return nil
+}
+
+// SchemaVersion holds the version of the cluster database schema.
+var SchemaVersion = cluster.SchemaVersion
+
+// We currently aim at having 3 nodes part of the raft dqlite cluster.
+//
+// TODO: this number should probably be configurable.
+const membershipMaxRaftNodes = 3
diff --git a/lxd/cluster/membership_test.go b/lxd/cluster/membership_test.go
index 670aecfad..f6e9fe61d 100644
--- a/lxd/cluster/membership_test.go
+++ b/lxd/cluster/membership_test.go
@@ -1,6 +1,7 @@
 package cluster_test
 
 import (
+	"fmt"
 	"io/ioutil"
 	"net/http"
 	"path/filepath"
@@ -11,6 +12,7 @@ import (
 	"github.com/lxc/lxd/lxd/db"
 	"github.com/lxc/lxd/lxd/state"
 	"github.com/lxc/lxd/shared"
+	"github.com/lxc/lxd/shared/version"
 	"github.com/mpvl/subtest"
 	"github.com/stretchr/testify/assert"
 	"github.com/stretchr/testify/require"
@@ -127,6 +129,117 @@ func TestBootstrap(t *testing.T) {
 	require.NoError(t, conn.Close())
 }
 
+// If pre-conditions are not met, a descriptive error is returned.
+func TestAccept_UnmetPreconditions(t *testing.T) {
+	cases := []struct {
+		name    string
+		address string
+		schema  int
+		api     int
+		setup   func(*membershipFixtures)
+		error   string
+	}{
+		{
+			"buzz",
+			"1.2.3.4:666",
+			cluster.SchemaVersion,
+			len(version.APIExtensions),
+			func(f *membershipFixtures) {},
+			"clustering not enabled",
+		},
+		{
+			"rusp",
+			"1.2.3.4:666",
+			cluster.SchemaVersion,
+			len(version.APIExtensions),
+			func(f *membershipFixtures) {
+				f.ClusterNode("5.6.7.8:666")
+			},
+			"cluster already has node with name rusp",
+		},
+		{
+			"buzz",
+			"5.6.7.8:666",
+			cluster.SchemaVersion,
+			len(version.APIExtensions),
+			func(f *membershipFixtures) {
+				f.ClusterNode("5.6.7.8:666")
+			},
+			"cluster already has node with address 5.6.7.8:666",
+		},
+		{
+			"buzz",
+			"1.2.3.4:666",
+			cluster.SchemaVersion - 1,
+			len(version.APIExtensions),
+			func(f *membershipFixtures) {
+				f.ClusterNode("5.6.7.8:666")
+			},
+			fmt.Sprintf("schema version mismatch: cluster has %d", cluster.SchemaVersion),
+		},
+		{
+			"buzz",
+			"1.2.3.4:666",
+			cluster.SchemaVersion,
+			len(version.APIExtensions) - 1,
+			func(f *membershipFixtures) {
+				f.ClusterNode("5.6.7.8:666")
+			},
+			fmt.Sprintf("API version mismatch: cluster has %d", len(version.APIExtensions)),
+		},
+	}
+	for _, c := range cases {
+		subtest.Run(t, c.error, func(t *testing.T) {
+			state, cleanup := state.NewTestState(t)
+			defer cleanup()
+
+			c.setup(&membershipFixtures{t: t, state: state})
+
+			_, err := cluster.Accept(state, c.name, c.address, c.schema, c.api)
+			assert.EqualError(t, err, c.error)
+		})
+	}
+}
+
+// When a node gets accepted, it gets included in the raft nodes.
+func TestAccept(t *testing.T) {
+	state, cleanup := state.NewTestState(t)
+	defer cleanup()
+
+	f := &membershipFixtures{t: t, state: state}
+	f.RaftNode("1.2.3.4:666")
+	f.ClusterNode("1.2.3.4:666")
+
+	nodes, err := cluster.Accept(
+		state, "buzz", "5.6.7.8:666", cluster.SchemaVersion, len(version.APIExtensions))
+	assert.NoError(t, err)
+	assert.Len(t, nodes, 2)
+	assert.Equal(t, int64(1), nodes[0].ID)
+	assert.Equal(t, int64(2), nodes[1].ID)
+	assert.Equal(t, "1.2.3.4:666", nodes[0].Address)
+	assert.Equal(t, "5.6.7.8:666", nodes[1].Address)
+}
+
+// If the cluster has already reached its maximum number of raft nodes, the
+// joining node is not included in the returned raft nodes list.
+func TestAccept_MaxRaftNodes(t *testing.T) {
+	state, cleanup := state.NewTestState(t)
+	defer cleanup()
+
+	f := &membershipFixtures{t: t, state: state}
+	f.RaftNode("1.1.1.1:666")
+	f.RaftNode("2.2.2.2:666")
+	f.RaftNode("3.3.3.3:666")
+	f.ClusterNode("1.2.3.4:666")
+
+	nodes, err := cluster.Accept(
+		state, "buzz", "4.5.6.7:666", cluster.SchemaVersion, len(version.APIExtensions))
+	assert.NoError(t, err)
+	for _, node := range nodes {
+		assert.NotEqual(t, "4.5.6.7:666", node.Address)
+	}
+}
+
 // Helper for setting fixtures for Bootstrap tests.
 type membershipFixtures struct {
 	t     *testing.T
diff --git a/shared/cert.go b/shared/cert.go
index eee7c91a7..4f6f23af4 100644
--- a/shared/cert.go
+++ b/shared/cert.go
@@ -101,6 +101,16 @@ func (c *CertInfo) PublicKey() []byte {
 	return pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: data})
 }
 
+// PrivateKey is a convenience to encode the underlying private key.
+func (c *CertInfo) PrivateKey() []byte {
+	key, ok := c.KeyPair().PrivateKey.(*rsa.PrivateKey)
+	if !ok {
+		return nil
+	}
+	data := x509.MarshalPKCS1PrivateKey(key)
+	return pem.EncodeToMemory(&pem.Block{Type: "RSA PRIVATE KEY", Bytes: data})
+}
+
 // CertKind defines the kind of certificate to generate from scratch in
 // KeyPairAndCA when it's not there.
 //

From 248669f193c3856900b1292d77b624b0cb32e3b2 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Mon, 16 Oct 2017 19:53:02 +0000
Subject: [PATCH 025/116] Add cluster.Join

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/cluster/membership.go      | 102 +++++++++++++++++++++++++++++++++++++++++
 lxd/cluster/membership_test.go |  54 ++++++++++++++++++++++
 2 files changed, 156 insertions(+)

diff --git a/lxd/cluster/membership.go b/lxd/cluster/membership.go
index f137dd26d..e923f0e05 100644
--- a/lxd/cluster/membership.go
+++ b/lxd/cluster/membership.go
@@ -4,12 +4,17 @@ import (
 	"fmt"
 	"os"
 	"path/filepath"
+	"strconv"
+	"time"
 
+	"github.com/hashicorp/raft"
 	"github.com/lxc/lxd/lxd/db"
 	"github.com/lxc/lxd/lxd/db/cluster"
 	"github.com/lxc/lxd/lxd/node"
 	"github.com/lxc/lxd/lxd/state"
 	"github.com/lxc/lxd/shared"
+	"github.com/lxc/lxd/shared/log15"
+	"github.com/lxc/lxd/shared/logger"
 	"github.com/pkg/errors"
 )
 
@@ -183,6 +188,103 @@ func Accept(state *state.State, name, address string, schema, api int) ([]db.Raf
 	return nodes, nil
 }
 
+// Join makes a non-clustered LXD node join an existing cluster.
+//
+// It's assumed that Accept() was previously called against the target node,
+// which handed the raft server ID.
+//
+// The cert parameter must contain the keypair/CA material of the cluster being
+// joined.
+func Join(state *state.State, gateway *Gateway, cert *shared.CertInfo, name string, nodes []db.RaftNode) error {
+	// Check parameters
+	if name == "" {
+		return fmt.Errorf("node name must not be empty")
+	}
+
+	var address string
+	err := state.Node.Transaction(func(tx *db.NodeTx) error {
+		// Fetch current network address and raft nodes
+		config, err := node.ConfigLoad(tx)
+		if err != nil {
+			return errors.Wrap(err, "failed to fetch node configuration")
+		}
+		address = config.HTTPSAddress()
+
+		// Make sure node-local database state is in order.
+		err = membershipCheckNodeStateForBootstrapOrJoin(tx, address)
+		if err != nil {
+			return err
+		}
+
+		// Set the raft nodes list to the one that was returned by Accept().
+		err = tx.RaftNodesReplace(nodes)
+		if err != nil {
+			return errors.Wrap(err, "failed to set raft nodes")
+		}
+
+		return nil
+	})
+	if err != nil {
+		return err
+	}
+
+	// Shutdown the gateway and wipe any raft data. This will trash any
+	// gRPC SQL connection against our in-memory dqlite driver and shutdown
+	// the associated raft instance.
+	err = gateway.Shutdown()
+	if err != nil {
+		return errors.Wrap(err, "failed to shutdown gRPC SQL gateway")
+	}
+	err = os.RemoveAll(filepath.Join(state.OS.VarDir, "raft"))
+	if err != nil {
+		return errors.Wrap(err, "failed to remove existing raft data")
+	}
+
+	// Re-initialize the gateway. This will create a new raft factory an
+	// dqlite driver instance, which will be exposed over gRPC by the
+	// gateway handlers.
+	gateway.cert = cert
+	err = gateway.init()
+	if err != nil {
+		return errors.Wrap(err, "failed to re-initialize gRPC SQL gateway")
+	}
+
+	// If we are listed among the database nodes, join the raft cluster.
+	id := ""
+	target := ""
+	for _, node := range nodes {
+		if node.Address == address {
+			id = strconv.Itoa(int(node.ID))
+		} else {
+			target = node.Address
+		}
+	}
+	if id != "" {
+		logger.Info(
+			"Joining dqlite raft cluster",
+			log15.Ctx{"id": id, "address": address, "target": target})
+		changer := gateway.raft.MembershipChanger()
+		err := changer.Join(raft.ServerID(id), raft.ServerAddress(target), 5*time.Second)
+		if err != nil {
+			return err
+		}
+	}
+
+	// Make sure we can actually connect to the cluster database through
+	// the network endpoint. This also makes the Go SQL pooling system
+	// invalidate the old connection, so new queries will be executed over
+	// the new gRPC network connection.
+	err = state.Cluster.Transaction(func(tx *db.ClusterTx) error {
+		_, err := tx.Nodes()
+		return err
+	})
+	if err != nil {
+		return errors.Wrap(err, "cluster database initialization failed")
+	}
+
+	return nil
+}
+
 // Check that node-related preconditions are met for bootstrapping or joining a
 // cluster.
 func membershipCheckNodeStateForBootstrapOrJoin(tx *db.NodeTx, address string) error {
diff --git a/lxd/cluster/membership_test.go b/lxd/cluster/membership_test.go
index f6e9fe61d..70e3ad224 100644
--- a/lxd/cluster/membership_test.go
+++ b/lxd/cluster/membership_test.go
@@ -240,6 +240,60 @@ func TestAccept_MaxRaftNodes(t *testing.T) {
 	}
 }
 
+func TestJoin(t *testing.T) {
+	// Setup a target node running as leader of a cluster.
+	targetState, cleanup := state.NewTestState(t)
+	defer cleanup()
+
+	targetCert := shared.TestingKeyPair()
+	targetGateway := newGateway(t, targetState.Node, targetCert)
+	defer targetGateway.Shutdown()
+
+	targetMux := http.NewServeMux()
+	targetServer := newServer(targetCert, targetMux)
+	defer targetServer.Close()
+
+	for path, handler := range targetGateway.HandlerFuncs() {
+		targetMux.HandleFunc(path, handler)
+	}
+
+	targetAddress := targetServer.Listener.Addr().String()
+	targetF := &membershipFixtures{t: t, state: targetState}
+	targetF.NetworkAddress(targetAddress)
+
+	err := cluster.Bootstrap(targetState, targetGateway, "buzz")
+	require.NoError(t, err)
+
+	// Setup a joining node
+	state, cleanup := state.NewTestState(t)
+	defer cleanup()
+
+	cert := shared.TestingAltKeyPair()
+	gateway := newGateway(t, state.Node, cert)
+	defer gateway.Shutdown()
+
+	mux := http.NewServeMux()
+	server := newServer(cert, mux)
+	defer server.Close()
+
+	for path, handler := range gateway.HandlerFuncs() {
+		mux.HandleFunc(path, handler)
+	}
+
+	address := server.Listener.Addr().String()
+	f := &membershipFixtures{t: t, state: state}
+	f.NetworkAddress(address)
+
+	// Accept the joining node.
+	nodes, err := cluster.Accept(
+		targetState, "rusp", address, cluster.SchemaVersion, len(version.APIExtensions))
+	require.NoError(t, err)
+
+	// Actually join the cluster.
+	err = cluster.Join(state, gateway, targetCert, "rusp", nodes)
+	require.NoError(t, err)
+}
+
 // Helper for setting fixtures for Bootstrap tests.
 type membershipFixtures struct {
 	t     *testing.T

From e8fcd121c4d369ea4d75b926ea1ac1a2e55dcefa Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Mon, 16 Oct 2017 19:56:02 +0000
Subject: [PATCH 026/116] Add LXC client AcceptNode() and JoinCluster()

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 client/interfaces.go     |   2 +
 client/lxd_cluster.go    |  37 +++++++++++++++-
 lxd/api_cluster.go       | 113 ++++++++++++++++++++++++++++++++++++++++++++++-
 lxd/endpoints/network.go |   9 ++++
 lxd/main_init.go         |   5 ++-
 lxd/util/encryption.go   |  24 ++++++++++
 shared/api/cluster.go    |  21 ++++++++-
 7 files changed, 207 insertions(+), 4 deletions(-)

diff --git a/client/interfaces.go b/client/interfaces.go
index 53813cc82..a0856fe9d 100644
--- a/client/interfaces.go
+++ b/client/interfaces.go
@@ -162,6 +162,8 @@ type ContainerServer interface {
 
 	// Cluster functions ("cluster" API extensions)
 	BootstrapCluster(name string) (op *Operation, err error)
+	AcceptNode(targetPassword, name, address string, schema, api int) (info *api.ClusterNodeAccepted, err error)
+	JoinCluster(targetAddress, targetPassword string, targetCert []byte, name string) (op *Operation, err error)
 
 	// Internal functions (for internal use)
 	RawQuery(method string, path string, data interface{}, queryETag string) (resp *api.Response, ETag string, err error)
diff --git a/client/lxd_cluster.go b/client/lxd_cluster.go
index 672665ccb..4c8de7bad 100644
--- a/client/lxd_cluster.go
+++ b/client/lxd_cluster.go
@@ -2,7 +2,7 @@ package lxd
 
 import "github.com/lxc/lxd/shared/api"
 
-// BootstrapCluster request to bootstrap a new cluster.
+// BootstrapCluster requests to bootstrap a new cluster.
 func (r *ProtocolLXD) BootstrapCluster(name string) (*Operation, error) {
 	cluster := api.ClusterPost{Name: name}
 	op, _, err := r.queryOperation("POST", "/cluster", cluster, "")
@@ -12,3 +12,38 @@ func (r *ProtocolLXD) BootstrapCluster(name string) (*Operation, error) {
 
 	return op, nil
 }
+
+// AcceptNode requests to accept a new node into the cluster.
+func (r *ProtocolLXD) AcceptNode(targetPassword, name, address string, schema, apiExt int) (*api.ClusterNodeAccepted, error) {
+	cluster := api.ClusterPost{
+		Name:           name,
+		Address:        address,
+		Schema:         schema,
+		API:            apiExt,
+		TargetPassword: targetPassword,
+	}
+	info := &api.ClusterNodeAccepted{}
+	_, err := r.queryStruct("POST", "/cluster", cluster, "", &info)
+
+	if err != nil {
+		return nil, err
+	}
+
+	return info, nil
+}
+
+// JoinCluster requests to join an existing cluster.
+func (r *ProtocolLXD) JoinCluster(targetAddress, targetPassword string, targetCert []byte, name string) (*Operation, error) {
+	cluster := api.ClusterPost{
+		TargetAddress:  targetAddress,
+		TargetPassword: targetPassword,
+		TargetCert:     targetCert,
+		Name:           name,
+	}
+	op, _, err := r.queryOperation("POST", "/cluster", cluster, "")
+	if err != nil {
+		return nil, err
+	}
+
+	return op, nil
+}
diff --git a/lxd/api_cluster.go b/lxd/api_cluster.go
index 4e8bb0419..6cfcd4606 100644
--- a/lxd/api_cluster.go
+++ b/lxd/api_cluster.go
@@ -5,11 +5,16 @@ import (
 	"fmt"
 	"net/http"
 
+	lxd "github.com/lxc/lxd/client"
 	"github.com/lxc/lxd/lxd/cluster"
+	"github.com/lxc/lxd/lxd/db"
+	"github.com/lxc/lxd/lxd/util"
 	"github.com/lxc/lxd/shared/api"
+	"github.com/lxc/lxd/shared/version"
+	"github.com/pkg/errors"
 )
 
-var clusterCmd = Command{name: "cluster", post: clusterPost}
+var clusterCmd = Command{name: "cluster", untrustedPost: true, post: clusterPost}
 
 func clusterPost(d *Daemon, r *http.Request) Response {
 	req := api.ClusterPost{}
@@ -25,10 +30,116 @@ func clusterPost(d *Daemon, r *http.Request) Response {
 		return BadRequest(fmt.Errorf("No name provided"))
 	}
 
+	// Depending on the provided parameters we either bootstrap a brand new
+	// cluster with this node as first node, or accept a node into our
+	// cluster, or perform a request to join a given cluster.
+	trusted := d.checkTrustedClient(r) == nil
+	if req.Address == "" && req.TargetAddress == "" {
+		// Bootstrapping a node requires the client to be trusted.
+		if !trusted {
+			return Forbidden
+		}
+		return clusterPostBootstrap(d, req)
+	} else if req.TargetAddress == "" {
+		return clusterPostAccept(d, req)
+	} else {
+		// Joining an existing cluster requires the client to be
+		// trusted.
+		if !trusted {
+			return Forbidden
+		}
+		return clusterPostJoin(d, req)
+	}
+}
+
+func clusterPostBootstrap(d *Daemon, req api.ClusterPost) Response {
 	run := func(op *operation) error {
 		return cluster.Bootstrap(d.State(), d.gateway, req.Name)
 	}
+	resources := map[string][]string{}
+	resources["cluster"] = []string{}
+
+	op, err := operationCreate(operationClassTask, resources, nil, run, nil, nil)
+	if err != nil {
+		return InternalError(err)
+	}
+
+	return OperationResponse(op)
+}
+
+func clusterPostAccept(d *Daemon, req api.ClusterPost) Response {
+	// Accepting a node requires the client to provide the correct
+	// trust password.
+	secret := daemonConfig["core.trust_password"].Get()
+	if util.PasswordCheck(secret, req.TargetPassword) != nil {
+		return Forbidden
+	}
+	nodes, err := cluster.Accept(d.State(), req.Name, req.Address, req.Schema, req.API)
+	if err != nil {
+		return BadRequest(err)
+	}
+	accepted := api.ClusterNodeAccepted{
+		RaftNodes:  make([]api.RaftNode, len(nodes)),
+		PrivateKey: d.endpoints.NetworkPrivateKey(),
+	}
+	for i, node := range nodes {
+		accepted.RaftNodes[i].ID = node.ID
+		accepted.RaftNodes[i].Address = node.Address
+	}
+	return SyncResponse(true, accepted)
+}
 
+func clusterPostJoin(d *Daemon, req api.ClusterPost) Response {
+	// Make sure basic pre-conditions are ment.
+	if len(req.TargetCert) == 0 {
+		return BadRequest(fmt.Errorf("No target cluster node certificate provided"))
+	}
+	address := daemonConfig["core.https_address"].Get()
+	if address == "" {
+		return BadRequest(fmt.Errorf("No core.https_address config key is set on this node"))
+	}
+
+	// Client parameters to connect to the target cluster node.
+	args := &lxd.ConnectionArgs{
+		TLSServerCert: string(req.TargetCert),
+		TLSCA:         string(req.TargetCA),
+	}
+
+	// Asynchronously join the cluster.
+	run := func(op *operation) error {
+		// First request for this node to be added to the list of
+		// cluster nodes.
+		client, err := lxd.ConnectLXD(req.TargetAddress, args)
+		if err != nil {
+			return err
+		}
+		info, err := client.AcceptNode(
+			req.TargetPassword, req.Name, address, cluster.SchemaVersion,
+			len(version.APIExtensions))
+		if err != nil {
+			return errors.Wrap(err, "failed to request to add node")
+		}
+
+		// Update our TLS configuration using the returned cluster certificate.
+		err = util.WriteCert(d.os.VarDir, "cluster", req.TargetCert, info.PrivateKey, req.TargetCA)
+		if err != nil {
+			return errors.Wrap(err, "failed to save cluster certificate")
+		}
+		cert, err := util.LoadCert(d.os.VarDir)
+		if err != nil {
+			return errors.Wrap(err, "failed to parse cluster certificate")
+		}
+		d.endpoints.NetworkUpdateCert(cert)
+
+		// Update local setup and possibly join the raft dqlite
+		// cluster.
+		nodes := make([]db.RaftNode, len(info.RaftNodes))
+		for i, node := range info.RaftNodes {
+			nodes[i].ID = node.ID
+			nodes[i].Address = node.Address
+		}
+		return cluster.Join(d.State(), d.gateway, cert, req.Name, nodes)
+	}
 	resources := map[string][]string{}
 	resources["cluster"] = []string{}
 
diff --git a/lxd/endpoints/network.go b/lxd/endpoints/network.go
index 5da1bc573..6d6ddb42d 100644
--- a/lxd/endpoints/network.go
+++ b/lxd/endpoints/network.go
@@ -22,6 +22,15 @@ func (e *Endpoints) NetworkPublicKey() []byte {
 	return e.cert.PublicKey()
 }
 
+// NetworkPrivateKey returns the private key of the TLS certificate used by the
+// network endpoint.
+func (e *Endpoints) NetworkPrivateKey() []byte {
+	e.mu.RLock()
+	defer e.mu.RUnlock()
+
+	return e.cert.PrivateKey()
+}
+
 // NetworkAddress returns the network addresss of the network endpoint, or an
 // empty string if there's no network endpoint
 func (e *Endpoints) NetworkAddress() string {
diff --git a/lxd/main_init.go b/lxd/main_init.go
index 65ffc0a22..e7f76f6c2 100644
--- a/lxd/main_init.go
+++ b/lxd/main_init.go
@@ -508,7 +508,10 @@ func (cmd *CmdInit) initCluster(client lxd.ContainerServer, cluster api.ClusterP
 	if err != nil {
 		return nil, err
 	}
-	op.Wait()
+	err = op.Wait()
+	if err != nil {
+		return nil, err
+	}
 	return reverter, nil
 }
 
diff --git a/lxd/util/encryption.go b/lxd/util/encryption.go
index 43e7aecaf..cb5f939ea 100644
--- a/lxd/util/encryption.go
+++ b/lxd/util/encryption.go
@@ -4,6 +4,7 @@ import (
 	"bytes"
 	"encoding/hex"
 	"fmt"
+	"io/ioutil"
 	"path/filepath"
 
 	"github.com/lxc/lxd/shared"
@@ -51,3 +52,26 @@ func LoadCert(dir string) (*shared.CertInfo, error) {
 	}
 	return cert, nil
 }
+
+// WriteCert writes the given material to the appropriate certificate files in
+// the given LXD var directory.
+func WriteCert(dir, prefix string, cert, key, ca []byte) error {
+	err := ioutil.WriteFile(filepath.Join(dir, prefix+".crt"), cert, 0644)
+	if err != nil {
+		return err
+	}
+
+	err = ioutil.WriteFile(filepath.Join(dir, prefix+".key"), key, 0600)
+	if err != nil {
+		return err
+	}
+
+	if ca != nil {
+		err = ioutil.WriteFile(filepath.Join(dir, prefix+".ca"), ca, 0644)
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
diff --git a/shared/api/cluster.go b/shared/api/cluster.go
index 847264214..4f54d2ada 100644
--- a/shared/api/cluster.go
+++ b/shared/api/cluster.go
@@ -5,5 +5,24 @@ package api
 //
 // API extension: cluster
 type ClusterPost struct {
-	Name string `json:"name" yaml:"name"`
+	Name           string `json:"name" yaml:"name"`
+	Address        string `json:"address" yaml:"address"`
+	Schema         int    `json:"schema" yaml:"schema"`
+	API            int    `json:"api" yaml:"api"`
+	TargetAddress  string `json:"target_address" yaml:"target_address"`
+	TargetCert     []byte `json:"target_cert" yaml:"target_cert"`
+	TargetCA       []byte `json:"target_ca" yaml:"target_ca"`
+	TargetPassword string `json:"target_password" yaml:"target_password"`
+}
+
+// ClusterNodeAccepted represents the response of a request to join a cluster.
+type ClusterNodeAccepted struct {
+	RaftNodes  []RaftNode `json:"raft_nodes" yaml:"raft_nodes"`
+	PrivateKey []byte     `json:"private_key" yaml:"private_key"`
+}
+
+// RaftNode represents the a LXD node that is part of the dqlite raft cluster.
+type RaftNode struct {
+	ID      int64  `json:"id" yaml:"id"`
+	Address string `json:"address" yaml:"address"`
 }

From 9c631d8425f0cc9f3d648b767ae9c00955865621 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Mon, 16 Oct 2017 21:39:02 +0000
Subject: [PATCH 027/116] Change lxd init to support joining a cluster

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/api_cluster.go |  2 +-
 lxd/main_init.go   | 95 ++++++++++++++++++++++++++++++++++++++++++------------
 2 files changed, 75 insertions(+), 22 deletions(-)

diff --git a/lxd/api_cluster.go b/lxd/api_cluster.go
index 6cfcd4606..62d9fab0e 100644
--- a/lxd/api_cluster.go
+++ b/lxd/api_cluster.go
@@ -109,7 +109,7 @@ func clusterPostJoin(d *Daemon, req api.ClusterPost) Response {
 	run := func(op *operation) error {
 		// First request for this node to be added to the list of
 		// cluster nodes.
-		client, err := lxd.ConnectLXD(req.TargetAddress, args)
+		client, err := lxd.ConnectLXD(fmt.Sprintf("https://%s", req.TargetAddress), args)
 		if err != nil {
 			return err
 		}
diff --git a/lxd/main_init.go b/lxd/main_init.go
index e7f76f6c2..131b17810 100644
--- a/lxd/main_init.go
+++ b/lxd/main_init.go
@@ -1,6 +1,7 @@
 package main
 
 import (
+	"encoding/pem"
 	"fmt"
 	"net"
 	"os"
@@ -136,17 +137,31 @@ func (cmd *CmdInit) fillDataInteractive(data *cmdInitData, client lxd.ContainerS
 	if err != nil {
 		return err
 	}
-	storage, err := cmd.askStorage(client, existingPools, backendsAvailable)
-	if err != nil {
-		return err
-	}
-	defaultPrivileged := cmd.askDefaultPrivileged()
 
-	// Ask about networking only if we skipped the clustering questions.
+	// Ask to create basic entities only if we are not joining an existing
+	// cluster.
+	var storage *cmdInitStorageParams
+	var defaultPrivileged int
 	var networking *cmdInitNetworkingParams
-	if clustering == nil {
-		networking = cmd.askNetworking()
-	} else {
+	var imagesAutoUpdate bool
+	var bridge *cmdInitBridgeParams
+
+	if clustering == nil || clustering.TargetAddress == "" {
+		storage, err = cmd.askStorage(client, existingPools, backendsAvailable)
+		if err != nil {
+			return err
+		}
+		defaultPrivileged = cmd.askDefaultPrivileged()
+
+		// Ask about networking only if we skipped the clustering questions.
+		if clustering == nil {
+			networking = cmd.askNetworking()
+		}
+
+		imagesAutoUpdate = cmd.askImages()
+		bridge = cmd.askBridge(client)
+	}
+	if clustering != nil {
 		// Re-use the answers to the clustering questions.
 		networking = &cmdInitNetworkingParams{
 			Address:       clustering.Address,
@@ -155,9 +170,6 @@ func (cmd *CmdInit) fillDataInteractive(data *cmdInitData, client lxd.ContainerS
 		}
 	}
 
-	imagesAutoUpdate := cmd.askImages()
-	bridge := cmd.askBridge(client)
-
 	_, err = exec.LookPath("dnsmasq")
 	if err != nil && bridge != nil {
 		return fmt.Errorf("LXD managed bridges require \"dnsmasq\". Install it and try again.")
@@ -225,6 +237,9 @@ func (cmd *CmdInit) fillDataWithClustering(data *cmdInitData, clustering *cmdIni
 		return
 	}
 	data.Cluster.Name = clustering.Name
+	data.Cluster.TargetAddress = clustering.TargetAddress
+	data.Cluster.TargetCert = clustering.TargetCert
+	data.Cluster.TargetPassword = clustering.TargetPassword
 }
 
 // Fill the given init data with a new storage pool structure matching the
@@ -504,9 +519,19 @@ func (cmd *CmdInit) initConfig(client lxd.ContainerServer, config map[string]int
 // Turn on clustering.
 func (cmd *CmdInit) initCluster(client lxd.ContainerServer, cluster api.ClusterPost) (reverter, error) {
 	var reverter func() error
-	op, err := client.BootstrapCluster(cluster.Name)
-	if err != nil {
-		return nil, err
+	var op *lxd.Operation
+	var err error
+	if cluster.TargetAddress == "" {
+		op, err = client.BootstrapCluster(cluster.Name)
+		if err != nil {
+			return nil, err
+		}
+	} else {
+		op, err = client.JoinCluster(
+			cluster.TargetAddress, cluster.TargetPassword, cluster.TargetCert, cluster.Name)
+		if err != nil {
+			return nil, err
+		}
 	}
 	err = op.Wait()
 	if err != nil {
@@ -762,7 +787,33 @@ func (cmd *CmdInit) askClustering() (*cmdInitClusteringParams, error) {
 		return params, nil
 	}
 
-	return nil, fmt.Errorf("joining cluster not yet implemented")
+	// Target node address, password and certificate.
+join:
+	params.TargetAddress = cmd.Context.AskString("IP address or FQDN of an existing cluster node: ", "", nil)
+	params.TargetPassword = cmd.Context.AskPassword(
+		"Trust password for the existing cluster: ", cmd.PasswordReader)
+
+	url := fmt.Sprintf("https://%s", params.TargetAddress)
+	certificate, err := shared.GetRemoteCertificate(url)
+	if err != nil {
+		cmd.Context.Output("Error connecting to existing cluster node: %v\n", err)
+		goto join
+	}
+	digest := shared.CertFingerprint(certificate)
+	askFingerprint := fmt.Sprintf("Remote node fingerprint: %s ok (y/n)? ", digest)
+	if !cmd.Context.AskBool(askFingerprint, "") {
+		return nil, fmt.Errorf("Cluster certificate NACKed by user")
+	}
+	params.TargetCert = pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: certificate.Raw})
+
+	// Confirm wipe this node
+	askConfirm := ("All existing data is lost when joining a cluster, " +
+		"continue? (yes/no) [default=no] ")
+	if !cmd.Context.AskBool(askConfirm, "") {
+		return nil, fmt.Errorf("User did not confirm erasing data")
+	}
+
+	return params, nil
 }
 
 // Ask if the user wants to create a new storage pool, and return
@@ -1040,11 +1091,13 @@ type cmdInitData struct {
 
 // Parameters needed when enbling clustering in interactive mode.
 type cmdInitClusteringParams struct {
-	Name          string // Name of the new node
-	Address       string // Network address of the new node
-	Port          int64  // Network port of the new node
-	Join          string // Network address of existing node to join.
-	TrustPassword string // Trust password
+	Name           string // Name of the new node
+	Address        string // Network address of the new node
+	Port           int64  // Network port of the new node
+	TrustPassword  string // Trust password
+	TargetAddress  string // Network address of cluster node to join.
+	TargetCert     []byte // Public key of the cluster to join.
+	TargetPassword string // Trust password of the cluster to join.
 }
 
 // Parameters needed when creating a storage pool in interactive or auto

From 8321d1933c9a04783a1cd9985003e67418737c5b Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Tue, 17 Oct 2017 11:58:56 +0000
Subject: [PATCH 028/116] Add config table to cluster schema

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/db/cluster/schema.go      |  8 +++++++-
 lxd/db/cluster/update.go      | 14 ++++++++++++++
 lxd/db/cluster/update_test.go | 13 +++++++++++++
 3 files changed, 34 insertions(+), 1 deletion(-)

diff --git a/lxd/db/cluster/schema.go b/lxd/db/cluster/schema.go
index 90a358e96..76302fbf7 100644
--- a/lxd/db/cluster/schema.go
+++ b/lxd/db/cluster/schema.go
@@ -6,6 +6,12 @@ package cluster
 // modify the database schema, please add a new schema update to update.go
 // and the run 'make update-schema'.
 const freshSchema = `
+CREATE TABLE config (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    key VARCHAR(255) NOT NULL,
+    value TEXT,
+    UNIQUE (key)
+);
 CREATE TABLE nodes (
     id INTEGER PRIMARY KEY,
     name TEXT NOT NULL,
@@ -18,5 +24,5 @@ CREATE TABLE nodes (
     UNIQUE (address)
 );
 
-INSERT INTO schema (version, updated_at) VALUES (1, strftime("%s"))
+INSERT INTO schema (version, updated_at) VALUES (2, strftime("%s"))
 `
diff --git a/lxd/db/cluster/update.go b/lxd/db/cluster/update.go
index 3d43e9b2e..33006db06 100644
--- a/lxd/db/cluster/update.go
+++ b/lxd/db/cluster/update.go
@@ -24,8 +24,22 @@ var SchemaVersion = len(updates)
 
 var updates = map[int]schema.Update{
 	1: updateFromV0,
+	2: updateFromV1,
 }
 
+func updateFromV1(tx *sql.Tx) error {
+	// config table
+	stmt := `
+CREATE TABLE config (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    key VARCHAR(255) NOT NULL,
+    value TEXT,
+    UNIQUE (key)
+);
+`
+	_, err := tx.Exec(stmt)
+	return err
+}
 func updateFromV0(tx *sql.Tx) error {
 	// v0..v1 the dawn of clustering
 	stmt := `
diff --git a/lxd/db/cluster/update_test.go b/lxd/db/cluster/update_test.go
index c80a51574..f637f5083 100644
--- a/lxd/db/cluster/update_test.go
+++ b/lxd/db/cluster/update_test.go
@@ -24,3 +24,16 @@ func TestUpdateFromV0(t *testing.T) {
 	_, err = db.Exec("INSERT INTO nodes VALUES (3, 'bar', 'gasp', '1.2.3.4:666', 9, 11), ?)", time.Now())
 	require.Error(t, err)
 }
+
+func TestUpdateFromV1(t *testing.T) {
+	schema := cluster.Schema()
+	db, err := schema.ExerciseUpdate(2, nil)
+	require.NoError(t, err)
+
+	_, err = db.Exec("INSERT INTO config VALUES (1, 'foo', 'blah')")
+	require.NoError(t, err)
+
+	// Unique constraint on key.
+	_, err = db.Exec("INSERT INTO config VALUES (2, 'foo', 'gosh')")
+	require.Error(t, err)
+}

From 81c113197c810831f542e58c9372aef155490f5a Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Tue, 17 Oct 2017 12:37:16 +0000
Subject: [PATCH 029/116] Always use the node db to get core.https_address

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/api_1.0.go               |  7 ++++++-
 lxd/api_cluster.go           |  6 +++++-
 lxd/daemon.go                |  6 +++++-
 lxd/main_activateifneeded.go |  8 ++++----
 lxd/main_init_test.go        | 16 ++++++++++------
 lxd/node/config.go           | 15 +++++++++++++++
 lxd/node/config_test.go      | 24 ++++++++++++++++++++++++
 7 files changed, 69 insertions(+), 13 deletions(-)

diff --git a/lxd/api_1.0.go b/lxd/api_1.0.go
index 6c342552b..afd8d97e7 100644
--- a/lxd/api_1.0.go
+++ b/lxd/api_1.0.go
@@ -9,6 +9,7 @@ import (
 	"gopkg.in/lxc/go-lxc.v2"
 
 	"github.com/lxc/lxd/lxd/db"
+	"github.com/lxc/lxd/lxd/node"
 	"github.com/lxc/lxd/lxd/util"
 	"github.com/lxc/lxd/shared"
 	"github.com/lxc/lxd/shared/api"
@@ -85,7 +86,11 @@ func api10Get(d *Daemon, r *http.Request) Response {
 		return InternalError(err)
 	}
 
-	addresses, err := util.ListenAddresses(daemonConfig["core.https_address"].Get())
+	address, err := node.HTTPSAddress(d.db)
+	if err != nil {
+		return InternalError(err)
+	}
+	addresses, err := util.ListenAddresses(address)
 	if err != nil {
 		return InternalError(err)
 	}
diff --git a/lxd/api_cluster.go b/lxd/api_cluster.go
index 62d9fab0e..00ff4a0ff 100644
--- a/lxd/api_cluster.go
+++ b/lxd/api_cluster.go
@@ -8,6 +8,7 @@ import (
 	lxd "github.com/lxc/lxd/client"
 	"github.com/lxc/lxd/lxd/cluster"
 	"github.com/lxc/lxd/lxd/db"
+	"github.com/lxc/lxd/lxd/node"
 	"github.com/lxc/lxd/lxd/util"
 	"github.com/lxc/lxd/shared/api"
 	"github.com/lxc/lxd/shared/version"
@@ -94,7 +95,10 @@ func clusterPostJoin(d *Daemon, req api.ClusterPost) Response {
 	if len(req.TargetCert) == 0 {
 		return BadRequest(fmt.Errorf("No target cluster node certificate provided"))
 	}
-	address := daemonConfig["core.https_address"].Get()
+	address, err := node.HTTPSAddress(d.db)
+	if err != nil {
+		return InternalError(err)
+	}
 	if address == "" {
 		return BadRequest(fmt.Errorf("No core.https_address config key is set on this node"))
 	}
diff --git a/lxd/daemon.go b/lxd/daemon.go
index 78c25e208..5aa6583ab 100644
--- a/lxd/daemon.go
+++ b/lxd/daemon.go
@@ -29,6 +29,7 @@ import (
 	"github.com/lxc/lxd/lxd/cluster"
 	"github.com/lxc/lxd/lxd/db"
 	"github.com/lxc/lxd/lxd/endpoints"
+	"github.com/lxc/lxd/lxd/node"
 	"github.com/lxc/lxd/lxd/state"
 	"github.com/lxc/lxd/lxd/sys"
 	"github.com/lxc/lxd/lxd/task"
@@ -402,7 +403,10 @@ func (d *Daemon) init() error {
 		}
 	}
 
-	address := daemonConfig["core.https_address"].Get()
+	address, err := node.HTTPSAddress(d.db)
+	if err != nil {
+		return errors.Wrap(err, "failed to fetch node address")
+	}
 
 	/* Open the cluster database */
 	d.cluster, err = db.OpenCluster("db.bin", d.gateway.Dialer(), address)
diff --git a/lxd/main_activateifneeded.go b/lxd/main_activateifneeded.go
index 4300f96b5..8c08ded18 100644
--- a/lxd/main_activateifneeded.go
+++ b/lxd/main_activateifneeded.go
@@ -6,6 +6,7 @@ import (
 
 	"github.com/lxc/lxd/client"
 	"github.com/lxc/lxd/lxd/db"
+	"github.com/lxc/lxd/lxd/node"
 	"github.com/lxc/lxd/shared"
 	"github.com/lxc/lxd/shared/idmap"
 	"github.com/lxc/lxd/shared/logger"
@@ -30,15 +31,14 @@ func cmdActivateIfNeeded(args *Args) error {
 		return err
 	}
 
-	/* Load all config values from the database */
-	err = daemonConfigInit(d.db.DB())
+	/* Load the configured address the database */
+	address, err := node.HTTPSAddress(d.db)
 	if err != nil {
 		return err
 	}
 
 	// Look for network socket
-	value := daemonConfig["core.https_address"].Get()
-	if value != "" {
+	if address != "" {
 		logger.Debugf("Daemon has core.https_address set, activating...")
 		_, err := lxd.ConnectLXDUnix("", nil)
 		return err
diff --git a/lxd/main_init_test.go b/lxd/main_init_test.go
index 36829b5d4..8b3a2ad9a 100644
--- a/lxd/main_init_test.go
+++ b/lxd/main_init_test.go
@@ -8,6 +8,7 @@ import (
 	"testing"
 
 	"github.com/lxc/lxd/client"
+	"github.com/lxc/lxd/lxd/node"
 	"github.com/lxc/lxd/lxd/util"
 
 	"github.com/lxc/lxd/shared"
@@ -87,8 +88,9 @@ func (suite *cmdInitTestSuite) TestCmdInit_PreseedHTTPSAddressAndTrustPassword()
 `, port))
 	suite.Req.Nil(suite.command.Run())
 
-	key, _ := daemonConfig["core.https_address"]
-	suite.Req.Equal(fmt.Sprintf("127.0.0.1:%d", port), key.Get())
+	address, err := node.HTTPSAddress(suite.d.db)
+	suite.Req.NoError(err)
+	suite.Req.Equal(fmt.Sprintf("127.0.0.1:%d", port), address)
 	secret := daemonConfig["core.trust_password"].Get()
 	suite.Req.Nil(util.PasswordCheck(secret, "sekret"))
 }
@@ -109,8 +111,9 @@ func (suite *cmdInitTestSuite) TestCmdInit_InteractiveHTTPSAddressAndTrustPasswo
 
 	suite.Req.Nil(suite.command.Run())
 
-	key, _ := daemonConfig["core.https_address"]
-	suite.Req.Equal(fmt.Sprintf("127.0.0.1:%d", port), key.Get())
+	address, err := node.HTTPSAddress(suite.d.db)
+	suite.Req.NoError(err)
+	suite.Req.Equal(fmt.Sprintf("127.0.0.1:%d", port), address)
 	secret := daemonConfig["core.trust_password"].Get()
 	suite.Req.Nil(util.PasswordCheck(secret, "sekret"))
 }
@@ -147,8 +150,9 @@ func (suite *cmdInitTestSuite) TestCmdInit_AutoHTTPSAddressAndTrustPassword() {
 
 	suite.Req.Nil(suite.command.Run())
 
-	key, _ := daemonConfig["core.https_address"]
-	suite.Req.Equal(fmt.Sprintf("127.0.0.1:%d", port), key.Get())
+	address, err := node.HTTPSAddress(suite.d.db)
+	suite.Req.NoError(err)
+	suite.Req.Equal(fmt.Sprintf("127.0.0.1:%d", port), address)
 	secret := daemonConfig["core.trust_password"].Get()
 	suite.Req.Nil(util.PasswordCheck(secret, "sekret"))
 }
diff --git a/lxd/node/config.go b/lxd/node/config.go
index 25f3d1f63..c3fb62990 100644
--- a/lxd/node/config.go
+++ b/lxd/node/config.go
@@ -57,6 +57,21 @@ func (c *Config) Patch(patch map[string]interface{}) error {
 	return c.update(values)
 }
 
+// HTTPSAddress is a convenience for loading the node configuration and
+// returning the value of core.https_address.
+func HTTPSAddress(node *db.Node) (string, error) {
+	var config *Config
+	err := node.Transaction(func(tx *db.NodeTx) error {
+		var err error
+		config, err = ConfigLoad(tx)
+		return err
+	})
+	if err != nil {
+		return "", err
+	}
+	return config.HTTPSAddress(), nil
+}
+
 func (c *Config) update(values map[string]interface{}) error {
 	changed, err := c.m.Change(values)
 	if err != nil {
diff --git a/lxd/node/config_test.go b/lxd/node/config_test.go
index 7a701b204..b7ed60768 100644
--- a/lxd/node/config_test.go
+++ b/lxd/node/config_test.go
@@ -92,3 +92,27 @@ func TestConfig_PatchKeepsValues(t *testing.T) {
 	require.NoError(t, err)
 	assert.Equal(t, map[string]string{"core.https_address": "127.0.0.1:666"}, values)
 }
+
+// The core.https_address config key is fetched from the db with a new
+// transaction.
+func TestHTTPSAddress(t *testing.T) {
+	nodeDB, cleanup := db.NewTestNode(t)
+	defer cleanup()
+
+	address, err := node.HTTPSAddress(nodeDB)
+	require.NoError(t, err)
+	assert.Equal(t, "", address)
+
+	err = nodeDB.Transaction(func(tx *db.NodeTx) error {
+		config, err := node.ConfigLoad(tx)
+		require.NoError(t, err)
+		err = config.Replace(map[string]interface{}{"core.https_address": "127.0.0.1:666"})
+		require.NoError(t, err)
+		return nil
+	})
+	require.NoError(t, err)
+
+	address, err = node.HTTPSAddress(nodeDB)
+	require.NoError(t, err)
+	assert.Equal(t, "127.0.0.1:666", address)
+}

From 03db7e9b4ecd859f7ec30d84361aaced302f5752 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Tue, 17 Oct 2017 13:43:47 +0000
Subject: [PATCH 030/116] Delete core.https_address from daemonConfig

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/api_1.0.go       | 45 ++++++++++++++++++++++++++++++++++++++++++---
 lxd/daemon_config.go | 35 +++++++++++++++++++++++------------
 lxd/daemon_test.go   |  6 ++++--
 lxd/db/config.go     |  5 +++++
 4 files changed, 74 insertions(+), 17 deletions(-)

diff --git a/lxd/api_1.0.go b/lxd/api_1.0.go
index afd8d97e7..a4bcc5a06 100644
--- a/lxd/api_1.0.go
+++ b/lxd/api_1.0.go
@@ -8,6 +8,7 @@ import (
 
 	"gopkg.in/lxc/go-lxc.v2"
 
+	"github.com/lxc/lxd/lxd/config"
 	"github.com/lxc/lxd/lxd/db"
 	"github.com/lxc/lxd/lxd/node"
 	"github.com/lxc/lxd/lxd/util"
@@ -15,6 +16,7 @@ import (
 	"github.com/lxc/lxd/shared/api"
 	"github.com/lxc/lxd/shared/osarch"
 	"github.com/lxc/lxd/shared/version"
+	"github.com/pkg/errors"
 )
 
 var api10 = []Command{
@@ -153,7 +155,10 @@ func api10Get(d *Daemon, r *http.Request) Response {
 
 	fullSrv := api.Server{ServerUntrusted: srv}
 	fullSrv.Environment = env
-	fullSrv.Config = daemonConfigRender()
+	fullSrv.Config, err = daemonConfigRender(d.State())
+	if err != nil {
+		return InternalError(err)
+	}
 
 	return SyncResponseETag(true, fullSrv, fullSrv.Config)
 }
@@ -164,7 +169,11 @@ func api10Put(d *Daemon, r *http.Request) Response {
 		return SmartError(err)
 	}
 
-	err = util.EtagCheck(r, daemonConfigRender())
+	render, err := daemonConfigRender(d.State())
+	if err != nil {
+		return InternalError(err)
+	}
+	err = util.EtagCheck(r, render)
 	if err != nil {
 		return PreconditionFailed(err)
 	}
@@ -183,7 +192,11 @@ func api10Patch(d *Daemon, r *http.Request) Response {
 		return SmartError(err)
 	}
 
-	err = util.EtagCheck(r, daemonConfigRender())
+	render, err := daemonConfigRender(d.State())
+	if err != nil {
+		return InternalError(err)
+	}
+	err = util.EtagCheck(r, render)
 	if err != nil {
 		return PreconditionFailed(err)
 	}
@@ -208,6 +221,32 @@ func api10Patch(d *Daemon, r *http.Request) Response {
 }
 
 func doApi10Update(d *Daemon, oldConfig map[string]string, req api.ServerPut) Response {
+	// The HTTPS address is the only config key that we want to save in the
+	// node-level database, so handle it here.
+	nodeValues := map[string]interface{}{}
+	address, ok := req.Config["core.https_address"]
+	if ok {
+		nodeValues["core.https_address"] = address
+		delete(req.Config, "core.https_address")
+	}
+	err := d.db.Transaction(func(tx *db.NodeTx) error {
+		trigger := config.Trigger{
+			Key: "core.https_address",
+			Func: func(value string) error {
+				return d.endpoints.NetworkUpdateAddress(value)
+			},
+		}
+		config, err := node.ConfigLoad(tx, trigger)
+		if err != nil {
+			return errors.Wrap(err, "failed to load node config")
+		}
+		err = config.Replace(nodeValues)
+		return err
+	})
+	if err != nil {
+		return InternalError(err)
+	}
+
 	// Deal with special keys
 	for k, v := range req.Config {
 		config := daemonConfig[k]
diff --git a/lxd/daemon_config.go b/lxd/daemon_config.go
index c0b7fa722..b2f90a295 100644
--- a/lxd/daemon_config.go
+++ b/lxd/daemon_config.go
@@ -15,6 +15,8 @@ import (
 	"golang.org/x/crypto/scrypt"
 
 	dbapi "github.com/lxc/lxd/lxd/db"
+	"github.com/lxc/lxd/lxd/node"
+	"github.com/lxc/lxd/lxd/state"
 	"github.com/lxc/lxd/shared"
 	"github.com/lxc/lxd/shared/logger"
 )
@@ -180,7 +182,6 @@ func (k *daemonConfigKey) GetInt64() int64 {
 func daemonConfigInit(db *sql.DB) error {
 	// Set all the keys
 	daemonConfig = map[string]*daemonConfigKey{
-		"core.https_address":             {valueType: "string", setter: daemonConfigSetAddress},
 		"core.https_allowed_headers":     {valueType: "string"},
 		"core.https_allowed_methods":     {valueType: "string"},
 		"core.https_allowed_origin":      {valueType: "string"},
@@ -228,7 +229,7 @@ func daemonConfigInit(db *sql.DB) error {
 	return nil
 }
 
-func daemonConfigRender() map[string]interface{} {
+func daemonConfigRender(state *state.State) (map[string]interface{}, error) {
 	config := map[string]interface{}{}
 
 	// Turn the config into a JSON-compatible map
@@ -243,7 +244,26 @@ func daemonConfigRender() map[string]interface{} {
 		}
 	}
 
-	return config
+	err := state.Node.Transaction(func(tx *dbapi.NodeTx) error {
+		nodeConfig, err := node.ConfigLoad(tx)
+		if err != nil {
+			return err
+		}
+		for key, value := range nodeConfig.Dump() {
+			// FIXME: we can drop this conditional as soon as we
+			//        migrate all non-node-local keys to the cluster db
+			if key != "core.https_address" {
+				continue
+			}
+			config[key] = value
+		}
+		return nil
+	})
+	if err != nil {
+		return nil, err
+	}
+
+	return config, nil
 }
 
 func daemonConfigSetPassword(d *Daemon, key string, value string) (string, error) {
@@ -270,15 +290,6 @@ func daemonConfigSetPassword(d *Daemon, key string, value string) (string, error
 	return value, nil
 }
 
-func daemonConfigSetAddress(d *Daemon, key string, value string) (string, error) {
-	err := d.endpoints.NetworkUpdateAddress(value)
-	if err != nil {
-		return "", err
-	}
-
-	return value, nil
-}
-
 func daemonConfigSetMacaroonEndpoint(d *Daemon, key string, value string) (string, error) {
 	err := d.setupExternalAuthentication(value)
 	if err != nil {
diff --git a/lxd/daemon_test.go b/lxd/daemon_test.go
index fa98ec393..9ce47e92b 100644
--- a/lxd/daemon_test.go
+++ b/lxd/daemon_test.go
@@ -20,7 +20,8 @@ func (suite *daemonTestSuite) Test_config_value_set_empty_removes_val() {
 	val := daemonConfig["core.trust_password"].Get()
 	suite.Req.Equal(len(val), 192)
 
-	valMap := daemonConfigRender()
+	valMap, err := daemonConfigRender(d.State())
+	suite.Req.NoError(err)
 	value, present := valMap["core.trust_password"]
 	suite.Req.True(present)
 	suite.Req.Equal(value, true)
@@ -31,7 +32,8 @@ func (suite *daemonTestSuite) Test_config_value_set_empty_removes_val() {
 	val = daemonConfig["core.trust_password"].Get()
 	suite.Req.Equal(val, "")
 
-	valMap = daemonConfigRender()
+	valMap, err = daemonConfigRender(d.State())
+	suite.Req.NoError(err)
 	_, present = valMap["core.trust_password"]
 	suite.Req.False(present)
 }
diff --git a/lxd/db/config.go b/lxd/db/config.go
index e068c75ef..83eff3437 100644
--- a/lxd/db/config.go
+++ b/lxd/db/config.go
@@ -30,6 +30,11 @@ func ConfigValuesGet(db *sql.DB) (map[string]string, error) {
 	for rows.Next() {
 		var key, value string
 		rows.Scan(&key, &value)
+		// FIXME: we can get rid of this special casing as soon as we
+		//        move config keys to the cluster database.
+		if key == "core.https_address" {
+			continue
+		}
 		results[key] = value
 	}
 

From 692c4aca1329e015242908f4ef342c43c65a14cb Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Tue, 17 Oct 2017 15:11:10 +0000
Subject: [PATCH 031/116] Add utilities to migrate data from the node db to the
 cluster db

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/db/config.go         |  11 ++
 lxd/db/migration.go      | 101 ++++++++++++++++++
 lxd/db/migration_test.go | 260 +++++++++++++++++++++++++++++++++++++++++++++++
 lxd/db/query/config.go   |   2 +-
 lxd/db/query/expr.go     |   8 +-
 lxd/db/query/objects.go  |   2 +-
 6 files changed, 378 insertions(+), 6 deletions(-)
 create mode 100644 lxd/db/migration.go
 create mode 100644 lxd/db/migration_test.go

diff --git a/lxd/db/config.go b/lxd/db/config.go
index 83eff3437..36136ea5b 100644
--- a/lxd/db/config.go
+++ b/lxd/db/config.go
@@ -17,6 +17,17 @@ func (n *NodeTx) UpdateConfig(values map[string]string) error {
 	return query.UpdateConfig(n.tx, "config", values)
 }
 
+// Config fetches all LXD cluster config keys.
+func (c *ClusterTx) Config() (map[string]string, error) {
+	return query.SelectConfig(c.tx, "config")
+}
+
+// UpdateConfig updates the given LXD cluster configuration keys in the
+// config table. Config keys set to empty values will be deleted.
+func (c *ClusterTx) UpdateConfig(values map[string]string) error {
+	return query.UpdateConfig(c.tx, "config", values)
+}
+
 func ConfigValuesGet(db *sql.DB) (map[string]string, error) {
 	q := "SELECT key, value FROM config"
 	rows, err := dbQuery(db, q)
diff --git a/lxd/db/migration.go b/lxd/db/migration.go
new file mode 100644
index 000000000..af9284d10
--- /dev/null
+++ b/lxd/db/migration.go
@@ -0,0 +1,101 @@
+package db
+
+import (
+	"database/sql"
+	"fmt"
+	"strings"
+
+	"github.com/lxc/lxd/lxd/db/query"
+	"github.com/pkg/errors"
+)
+
+// LoadPreClusteringData loads all the data that before the introduction of
+// LXD clustering used to live in the node-level database.
+//
+// This is used for performing a one-off data migration when a LXD instance is
+// upgraded from a version without clustering to a version that supports
+// clustering, since in those version all data lives in the cluster database
+// (regardless of whether clustering is actually on or off).
+func LoadPreClusteringData(tx *sql.Tx) (*Dump, error) {
+	// Dump all tables.
+	tables := []string{
+		"config",
+	}
+	dump := &Dump{
+		Schema: map[string][]string{},
+		Data:   map[string][][]interface{}{},
+	}
+	for _, table := range tables {
+		data := [][]interface{}{}
+		stmt := fmt.Sprintf("SELECT * FROM %s", table)
+		rows, err := tx.Query(stmt)
+		if err != nil {
+			return nil, errors.Wrapf(err, "failed to fetch rows from %s", table)
+		}
+		columns, err := rows.Columns()
+		if err != nil {
+			return nil, errors.Wrapf(err, "failed to get columns of %s", table)
+		}
+		dump.Schema[table] = columns
+
+		for rows.Next() {
+			values := make([]interface{}, len(columns))
+			row := make([]interface{}, len(columns))
+			for i := range values {
+				row[i] = &values[i]
+			}
+			err := rows.Scan(row...)
+			if err != nil {
+				return nil, errors.Wrapf(err, "failed to scan row from %s", table)
+			}
+			data = append(data, values)
+		}
+		err = rows.Err()
+		if err != nil {
+			return nil, errors.Wrapf(err, "error while fetching rows from %s", table)
+		}
+
+		dump.Data[table] = data
+	}
+
+	return dump, nil
+}
+
+// ImportPreClusteringData imports the data loaded with LoadPreClusteringData.
+func (c *Cluster) ImportPreClusteringData(dump *Dump) error {
+	tx, err := c.db.Begin()
+	if err != nil {
+		return errors.Wrap(err, "failed to start cluster database transaction")
+	}
+
+	for table, columns := range dump.Schema {
+		stmt := fmt.Sprintf("INSERT INTO %s(%s)", table, strings.Join(columns, ", "))
+		stmt += fmt.Sprintf(" VALUES %s", query.Params(len(columns)))
+		for i, row := range dump.Data[table] {
+			result, err := tx.Exec(stmt, row...)
+			if err != nil {
+				return errors.Wrapf(err, "failed to insert row %d into %s", i, table)
+			}
+			n, err := result.RowsAffected()
+			if err != nil {
+				return errors.Wrapf(err, "no result count for row %d of %s", i, table)
+			}
+			if n != 1 {
+				return fmt.Errorf("could not insert %d int %s", i, table)
+			}
+		}
+	}
+
+	return tx.Commit()
+}
+
+// Dump is a dump of all the user data in lxd.db prior the migration to the
+// cluster db.
+type Dump struct {
+	// Map table names to the names or their columns.
+	Schema map[string][]string
+
+	// Map a table name to all the rows it contains. Each row is a slice
+	// of interfaces.
+	Data map[string][][]interface{}
+}
diff --git a/lxd/db/migration_test.go b/lxd/db/migration_test.go
new file mode 100644
index 000000000..0719a8f61
--- /dev/null
+++ b/lxd/db/migration_test.go
@@ -0,0 +1,260 @@
+package db_test
+
+import (
+	"database/sql"
+	"testing"
+
+	"github.com/lxc/lxd/lxd/db"
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
+)
+
+func TestLoadPreClusteringData(t *testing.T) {
+	tx := newPreClusteringTx(t)
+
+	dump, err := db.LoadPreClusteringData(tx)
+	require.NoError(t, err)
+
+	assert.Equal(t, []string{"id", "key", "value"}, dump.Schema["config"])
+	assert.Len(t, dump.Data["config"], 1)
+	rows := []interface{}{int64(1), []byte("core.https_address"), []byte("1.2.3.4:666")}
+	assert.Equal(t, rows, dump.Data["config"][0])
+}
+
+func TestImportPreClusteringData(t *testing.T) {
+	tx := newPreClusteringTx(t)
+
+	dump, err := db.LoadPreClusteringData(tx)
+	require.NoError(t, err)
+
+	cluster, cleanup := db.NewTestCluster(t)
+	defer cleanup()
+
+	err = cluster.ImportPreClusteringData(dump)
+	require.NoError(t, err)
+
+	cluster.Transaction(func(tx *db.ClusterTx) error {
+		config, err := tx.Config()
+		require.NoError(t, err)
+		values := map[string]string{"core.https_address": "1.2.3.4:666"}
+		assert.Equal(t, values, config)
+		return nil
+	})
+}
+
+// Return a sql.Tx against a memory database populated with pre-clustering
+// data.
+func newPreClusteringTx(t *testing.T) *sql.Tx {
+	db, err := sql.Open("sqlite3", ":memory:")
+	require.NoError(t, err)
+
+	tx, err := db.Begin()
+	require.NoError(t, err)
+
+	stmts := []string{
+		preClusteringNodeSchema,
+		"INSERT INTO config VALUES(1, 'core.https_address', '1.2.3.4:666')",
+	}
+	for _, stmt := range stmts {
+		_, err := tx.Exec(stmt)
+		require.NoError(t, err)
+	}
+	return tx
+}
+
+const preClusteringNodeSchema = `
+CREATE TABLE schema (
+    id         INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    version    INTEGER NOT NULL,
+    updated_at DATETIME NOT NULL,
+    UNIQUE (version)
+);
+CREATE TABLE certificates (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    fingerprint VARCHAR(255) NOT NULL,
+    type INTEGER NOT NULL,
+    name VARCHAR(255) NOT NULL,
+    certificate TEXT NOT NULL,
+    UNIQUE (fingerprint)
+);
+CREATE TABLE config (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    key VARCHAR(255) NOT NULL,
+    value TEXT,
+    UNIQUE (key)
+);
+CREATE TABLE "containers" (
+    id INTEGER primary key AUTOINCREMENT NOT NULL,
+    name VARCHAR(255) NOT NULL,
+    architecture INTEGER NOT NULL,
+    type INTEGER NOT NULL,
+    ephemeral INTEGER NOT NULL DEFAULT 0,
+    creation_date DATETIME NOT NULL DEFAULT 0,
+    stateful INTEGER NOT NULL DEFAULT 0,
+    last_use_date DATETIME,
+    description TEXT,
+    UNIQUE (name)
+);
+CREATE TABLE containers_config (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    container_id INTEGER NOT NULL,
+    key VARCHAR(255) NOT NULL,
+    value TEXT,
+    FOREIGN KEY (container_id) REFERENCES containers (id) ON DELETE CASCADE,
+    UNIQUE (container_id, key)
+);
+CREATE TABLE containers_devices (
+    id INTEGER primary key AUTOINCREMENT NOT NULL,
+    container_id INTEGER NOT NULL,
+    name VARCHAR(255) NOT NULL,
+    type INTEGER NOT NULL default 0,
+    FOREIGN KEY (container_id) REFERENCES containers (id) ON DELETE CASCADE,
+    UNIQUE (container_id, name)
+);
+CREATE TABLE containers_devices_config (
+    id INTEGER primary key AUTOINCREMENT NOT NULL,
+    container_device_id INTEGER NOT NULL,
+    key VARCHAR(255) NOT NULL,
+    value TEXT,
+    FOREIGN KEY (container_device_id) REFERENCES containers_devices (id) ON DELETE CASCADE,
+    UNIQUE (container_device_id, key)
+);
+CREATE TABLE containers_profiles (
+    id INTEGER primary key AUTOINCREMENT NOT NULL,
+    container_id INTEGER NOT NULL,
+    profile_id INTEGER NOT NULL,
+    apply_order INTEGER NOT NULL default 0,
+    UNIQUE (container_id, profile_id),
+    FOREIGN KEY (container_id) REFERENCES containers(id) ON DELETE CASCADE,
+    FOREIGN KEY (profile_id) REFERENCES profiles(id) ON DELETE CASCADE
+);
+CREATE TABLE images (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    fingerprint VARCHAR(255) NOT NULL,
+    filename VARCHAR(255) NOT NULL,
+    size INTEGER NOT NULL,
+    public INTEGER NOT NULL DEFAULT 0,
+    architecture INTEGER NOT NULL,
+    creation_date DATETIME,
+    expiry_date DATETIME,
+    upload_date DATETIME NOT NULL,
+    cached INTEGER NOT NULL DEFAULT 0,
+    last_use_date DATETIME,
+    auto_update INTEGER NOT NULL DEFAULT 0,
+    UNIQUE (fingerprint)
+);
+CREATE TABLE "images_aliases" (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    name VARCHAR(255) NOT NULL,
+    image_id INTEGER NOT NULL,
+    description TEXT,
+    FOREIGN KEY (image_id) REFERENCES images (id) ON DELETE CASCADE,
+    UNIQUE (name)
+);
+CREATE TABLE images_properties (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    image_id INTEGER NOT NULL,
+    type INTEGER NOT NULL,
+    key VARCHAR(255) NOT NULL,
+    value TEXT,
+    FOREIGN KEY (image_id) REFERENCES images (id) ON DELETE CASCADE
+);
+CREATE TABLE images_source (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    image_id INTEGER NOT NULL,
+    server TEXT NOT NULL,
+    protocol INTEGER NOT NULL,
+    certificate TEXT NOT NULL,
+    alias VARCHAR(255) NOT NULL,
+    FOREIGN KEY (image_id) REFERENCES images (id) ON DELETE CASCADE
+);
+CREATE TABLE networks (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    name VARCHAR(255) NOT NULL,
+    description TEXT,
+    UNIQUE (name)
+);
+CREATE TABLE networks_config (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    network_id INTEGER NOT NULL,
+    key VARCHAR(255) NOT NULL,
+    value TEXT,
+    UNIQUE (network_id, key),
+    FOREIGN KEY (network_id) REFERENCES networks (id) ON DELETE CASCADE
+);
+CREATE TABLE patches (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    name VARCHAR(255) NOT NULL,
+    applied_at DATETIME NOT NULL,
+    UNIQUE (name)
+);
+CREATE TABLE profiles (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    name VARCHAR(255) NOT NULL,
+    description TEXT,
+    UNIQUE (name)
+);
+CREATE TABLE profiles_config (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    profile_id INTEGER NOT NULL,
+    key VARCHAR(255) NOT NULL,
+    value VARCHAR(255),
+    UNIQUE (profile_id, key),
+    FOREIGN KEY (profile_id) REFERENCES profiles(id) ON DELETE CASCADE
+);
+CREATE TABLE profiles_devices (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    profile_id INTEGER NOT NULL,
+    name VARCHAR(255) NOT NULL,
+    type INTEGER NOT NULL default 0,
+    UNIQUE (profile_id, name),
+    FOREIGN KEY (profile_id) REFERENCES profiles (id) ON DELETE CASCADE
+);
+CREATE TABLE profiles_devices_config (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    profile_device_id INTEGER NOT NULL,
+    key VARCHAR(255) NOT NULL,
+    value TEXT,
+    UNIQUE (profile_device_id, key),
+    FOREIGN KEY (profile_device_id) REFERENCES profiles_devices (id) ON DELETE CASCADE
+);
+CREATE TABLE raft_nodes (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    address TEXT NOT NULL,
+    UNIQUE (address)
+);
+CREATE TABLE storage_pools (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    name VARCHAR(255) NOT NULL,
+    driver VARCHAR(255) NOT NULL,
+    description TEXT,
+    UNIQUE (name)
+);
+CREATE TABLE storage_pools_config (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    storage_pool_id INTEGER NOT NULL,
+    key VARCHAR(255) NOT NULL,
+    value TEXT,
+    UNIQUE (storage_pool_id, key),
+    FOREIGN KEY (storage_pool_id) REFERENCES storage_pools (id) ON DELETE CASCADE
+);
+CREATE TABLE storage_volumes (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    name VARCHAR(255) NOT NULL,
+    storage_pool_id INTEGER NOT NULL,
+    type INTEGER NOT NULL,
+    description TEXT,
+    UNIQUE (storage_pool_id, name, type),
+    FOREIGN KEY (storage_pool_id) REFERENCES storage_pools (id) ON DELETE CASCADE
+);
+CREATE TABLE storage_volumes_config (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    storage_volume_id INTEGER NOT NULL,
+    key VARCHAR(255) NOT NULL,
+    value TEXT,
+    UNIQUE (storage_volume_id, key),
+    FOREIGN KEY (storage_volume_id) REFERENCES storage_volumes (id) ON DELETE CASCADE
+);
+
+INSERT INTO schema (version, updated_at) VALUES (37, strftime("%s"))
+`
diff --git a/lxd/db/query/config.go b/lxd/db/query/config.go
index f970a405b..878b6d8f0 100644
--- a/lxd/db/query/config.go
+++ b/lxd/db/query/config.go
@@ -98,7 +98,7 @@ func deleteConfig(tx *sql.Tx, table string, keys []string) error {
 		return nil // Nothing to delete.
 	}
 
-	query := fmt.Sprintf("DELETE FROM %s WHERE key IN %s", table, exprParams(n))
+	query := fmt.Sprintf("DELETE FROM %s WHERE key IN %s", table, Params(n))
 	values := make([]interface{}, n)
 	for i, key := range keys {
 		values[i] = key
diff --git a/lxd/db/query/expr.go b/lxd/db/query/expr.go
index 3f249c173..393bc166b 100644
--- a/lxd/db/query/expr.go
+++ b/lxd/db/query/expr.go
@@ -7,10 +7,10 @@ import (
 	"strings"
 )
 
-// Return a parameters expression with the given number of '?'
-// placeholders. E.g. exprParams(2) -> "(?, ?)". Useful for
-// IN expressions.
-func exprParams(n int) string {
+// Params returns a parameters expression with the given number of '?'
+// placeholders. E.g. Params(2) -> "(?, ?)". Useful for IN and VALUES
+// expressions.
+func Params(n int) string {
 	tokens := make([]string, n)
 	for i := 0; i < n; i++ {
 		tokens[i] = "?"
diff --git a/lxd/db/query/objects.go b/lxd/db/query/objects.go
index f6dcdad09..edb628964 100644
--- a/lxd/db/query/objects.go
+++ b/lxd/db/query/objects.go
@@ -51,7 +51,7 @@ func UpsertObject(tx *sql.Tx, table string, columns []string, values []interface
 
 	stmt := fmt.Sprintf(
 		"INSERT OR REPLACE INTO %s (%s) VALUES %s",
-		table, strings.Join(columns, ", "), exprParams(n))
+		table, strings.Join(columns, ", "), Params(n))
 	result, err := tx.Exec(stmt, values...)
 	if err != nil {
 		return -1, err

From ed2dd7509aa4b9113939d6aac920e61138532dba Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Tue, 17 Oct 2017 16:57:13 +0000
Subject: [PATCH 032/116] Migrate node data to cluster db upon startup

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/daemon.go                | 32 +++++++++++++++++++++-----------
 lxd/db/db.go                 | 33 ++++++++++++++++++++++++++-------
 lxd/db/db_internal_test.go   |  2 +-
 lxd/db/node/update.go        |  4 ++++
 lxd/db/testing.go            |  2 +-
 lxd/main_activateifneeded.go |  7 ++++++-
 lxd/profiles_test.go         |  2 +-
 7 files changed, 60 insertions(+), 22 deletions(-)

diff --git a/lxd/daemon.go b/lxd/daemon.go
index 5aa6583ab..1ed521902 100644
--- a/lxd/daemon.go
+++ b/lxd/daemon.go
@@ -368,13 +368,7 @@ func (d *Daemon) init() error {
 	}
 
 	/* Initialize the database */
-	err = initializeDbObject(d)
-	if err != nil {
-		return err
-	}
-
-	/* Load all config values from the database */
-	err = daemonConfigInit(d.db.DB())
+	dump, err := initializeDbObject(d)
 	if err != nil {
 		return err
 	}
@@ -428,6 +422,21 @@ func (d *Daemon) init() error {
 		return err
 	}
 
+	/* Migrate the node local data to the cluster database, if needed */
+	if dump != nil {
+		logger.Infof("Migrating data from lxd.db to db.bin")
+		err = d.cluster.ImportPreClusteringData(dump)
+		if err != nil {
+			return fmt.Errorf("Failed to migrate data to db.bin: %v", err)
+		}
+	}
+
+	/* Load all config values from the database */
+	err = daemonConfigInit(d.db.DB())
+	if err != nil {
+		return err
+	}
+
 	/* Read the storage pools */
 	err = SetupStorageDriver(d.State(), false)
 	if err != nil {
@@ -644,7 +653,7 @@ func (d *Daemon) setupExternalAuthentication(authEndpoint string) error {
 }
 
 // Create a database connection and perform any updates needed.
-func initializeDbObject(d *Daemon) error {
+func initializeDbObject(d *Daemon) (*db.Dump, error) {
 	// NOTE: we use the legacyPatches parameter to run a few
 	// legacy non-db updates that were in place before the
 	// patches mechanism was introduced in lxd/patches.go. The
@@ -678,10 +687,11 @@ func initializeDbObject(d *Daemon) error {
 		return nil
 	}
 	var err error
-	d.db, err = db.OpenNode(d.os.VarDir, freshHook, legacy)
+	var dump *db.Dump
+	d.db, dump, err = db.OpenNode(d.os.VarDir, freshHook, legacy)
 	if err != nil {
-		return fmt.Errorf("Error creating database: %s", err)
+		return nil, fmt.Errorf("Error creating database: %s", err)
 	}
 
-	return nil
+	return dump, nil
 }
diff --git a/lxd/db/db.go b/lxd/db/db.go
index 6b4a49b6d..356727be3 100644
--- a/lxd/db/db.go
+++ b/lxd/db/db.go
@@ -46,16 +46,35 @@ type Node struct {
 // The legacyPatches parameter is used as a mean to apply the legacy V10, V11,
 // V15, V29 and V30 non-db updates during the database upgrade sequence, to
 // avoid any change in semantics wrt the old logic (see PR #3322).
-func OpenNode(dir string, fresh func(*Node) error, legacyPatches map[int]*LegacyPatch) (*Node, error) {
+//
+// Return the newly created Node object, and a Dump of the pre-clustering data
+// if we've migrating to a cluster-aware version.
+func OpenNode(dir string, fresh func(*Node) error, legacyPatches map[int]*LegacyPatch) (*Node, *Dump, error) {
+	// When updating the node database schema we'll detect if we're
+	// transitioning to the dqlite-based database and dump all the data
+	// before purging the schema. This data will be then imported by the
+	// daemon into the dqlite database.
+	var dump *Dump
+
 	db, err := node.Open(dir)
 	if err != nil {
-		return nil, err
+		return nil, nil, err
 	}
 
-	hook := legacyPatchHook(db, legacyPatches)
+	legacyHook := legacyPatchHook(db, legacyPatches)
+	hook := func(version int, tx *sql.Tx) error {
+		if version == node.UpdateFromPreClustering {
+			var err error
+			dump, err = LoadPreClusteringData(tx)
+			if err != nil {
+				return err
+			}
+		}
+		return legacyHook(version, tx)
+	}
 	initial, err := node.EnsureSchema(db, dir, hook)
 	if err != nil {
-		return nil, err
+		return nil, nil, err
 	}
 
 	node := &Node{
@@ -66,17 +85,17 @@ func OpenNode(dir string, fresh func(*Node) error, legacyPatches map[int]*Legacy
 	if initial == 0 {
 		err := node.ProfileCreateDefault()
 		if err != nil {
-			return nil, err
+			return nil, nil, err
 		}
 		if fresh != nil {
 			err := fresh(node)
 			if err != nil {
-				return nil, err
+				return nil, nil, err
 			}
 		}
 	}
 
-	return node, nil
+	return node, dump, nil
 }
 
 // ForLegacyPatches is a aid for the hack in initializeDbObject, which sets
diff --git a/lxd/db/db_internal_test.go b/lxd/db/db_internal_test.go
index bdebdc3e9..9daf779a9 100644
--- a/lxd/db/db_internal_test.go
+++ b/lxd/db/db_internal_test.go
@@ -65,7 +65,7 @@ func (s *dbTestSuite) CreateTestDb() *Node {
 	s.dir, err = ioutil.TempDir("", "lxd-db-test")
 	s.Nil(err)
 
-	db, err := OpenNode(s.dir, nil, nil)
+	db, _, err := OpenNode(s.dir, nil, nil)
 	s.Nil(err)
 	return db
 }
diff --git a/lxd/db/node/update.go b/lxd/db/node/update.go
index 95a660202..ce1dd6b85 100644
--- a/lxd/db/node/update.go
+++ b/lxd/db/node/update.go
@@ -87,6 +87,10 @@ var updates = map[int]schema.Update{
 	37: updateFromV36,
 }
 
+// UpdateFromPreClustering is the last schema version where clustering support
+// was not available, and hence no cluster dqlite database is used.
+const UpdateFromPreClustering = 36
+
 // Schema updates begin here
 
 // Add a raft_nodes table to be used when running in clustered mode. It lists
diff --git a/lxd/db/testing.go b/lxd/db/testing.go
index 65c5ddcae..9f819f5b0 100644
--- a/lxd/db/testing.go
+++ b/lxd/db/testing.go
@@ -20,7 +20,7 @@ func NewTestNode(t *testing.T) (*Node, func()) {
 	dir, err := ioutil.TempDir("", "lxd-db-test-node-")
 	require.NoError(t, err)
 
-	db, err := OpenNode(dir, nil, nil)
+	db, _, err := OpenNode(dir, nil, nil)
 	require.NoError(t, err)
 
 	cleanup := func() {
diff --git a/lxd/main_activateifneeded.go b/lxd/main_activateifneeded.go
index 8c08ded18..5b43da9dc 100644
--- a/lxd/main_activateifneeded.go
+++ b/lxd/main_activateifneeded.go
@@ -1,8 +1,10 @@
 package main
 
 import (
+	"database/sql"
 	"fmt"
 	"os"
+	"path/filepath"
 
 	"github.com/lxc/lxd/client"
 	"github.com/lxc/lxd/lxd/db"
@@ -26,10 +28,13 @@ func cmdActivateIfNeeded(args *Args) error {
 		return nil
 	}
 
-	err := initializeDbObject(d)
+	// Open the database directly to avoid triggering any initialization
+	// code, in particular the data migration from node to cluster db.
+	sqldb, err := sql.Open("sqlite3", filepath.Join(d.os.VarDir, "lxd.db"))
 	if err != nil {
 		return err
 	}
+	d.db = db.ForLegacyPatches(sqldb)
 
 	/* Load the configured address the database */
 	address, err := node.HTTPSAddress(d.db)
diff --git a/lxd/profiles_test.go b/lxd/profiles_test.go
index b609da126..2f864cee4 100644
--- a/lxd/profiles_test.go
+++ b/lxd/profiles_test.go
@@ -18,7 +18,7 @@ func Test_removing_a_profile_deletes_associated_configuration_entries(t *testing
 	}
 	defer os.RemoveAll(d.os.VarDir)
 
-	err = initializeDbObject(d)
+	_, err = initializeDbObject(d)
 	if err != nil {
 		t.Fatal(err)
 	}

From f46bb041323b7346e3e2be48f91e871fe6209437 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Tue, 17 Oct 2017 17:23:43 +0000
Subject: [PATCH 033/116] Load and save config values to cluster db

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/api_1.0.go       | 14 ++++++++++++--
 lxd/daemon.go        |  2 +-
 lxd/daemon_config.go | 12 ++++++++----
 lxd/db/config.go     | 34 +++-------------------------------
 lxd/patches.go       |  2 +-
 5 files changed, 25 insertions(+), 39 deletions(-)

diff --git a/lxd/api_1.0.go b/lxd/api_1.0.go
index a4bcc5a06..dcb357ea1 100644
--- a/lxd/api_1.0.go
+++ b/lxd/api_1.0.go
@@ -164,7 +164,12 @@ func api10Get(d *Daemon, r *http.Request) Response {
 }
 
 func api10Put(d *Daemon, r *http.Request) Response {
-	oldConfig, err := db.ConfigValuesGet(d.db.DB())
+	var oldConfig map[string]string
+	err := d.cluster.Transaction(func(tx *db.ClusterTx) error {
+		var err error
+		oldConfig, err = tx.Config()
+		return err
+	})
 	if err != nil {
 		return SmartError(err)
 	}
@@ -187,7 +192,12 @@ func api10Put(d *Daemon, r *http.Request) Response {
 }
 
 func api10Patch(d *Daemon, r *http.Request) Response {
-	oldConfig, err := db.ConfigValuesGet(d.db.DB())
+	var oldConfig map[string]string
+	err := d.cluster.Transaction(func(tx *db.ClusterTx) error {
+		var err error
+		oldConfig, err = tx.Config()
+		return err
+	})
 	if err != nil {
 		return SmartError(err)
 	}
diff --git a/lxd/daemon.go b/lxd/daemon.go
index 1ed521902..030440b92 100644
--- a/lxd/daemon.go
+++ b/lxd/daemon.go
@@ -432,7 +432,7 @@ func (d *Daemon) init() error {
 	}
 
 	/* Load all config values from the database */
-	err = daemonConfigInit(d.db.DB())
+	err = daemonConfigInit(d.cluster)
 	if err != nil {
 		return err
 	}
diff --git a/lxd/daemon_config.go b/lxd/daemon_config.go
index b2f90a295..6ada99393 100644
--- a/lxd/daemon_config.go
+++ b/lxd/daemon_config.go
@@ -2,7 +2,6 @@ package main
 
 import (
 	"crypto/rand"
-	"database/sql"
 	"encoding/hex"
 	"fmt"
 	"io"
@@ -130,7 +129,7 @@ func (k *daemonConfigKey) Set(d *Daemon, value string) error {
 	k.currentValue = value
 	daemonConfigLock.Unlock()
 
-	err = dbapi.ConfigValueSet(d.db.DB(), name, value)
+	err = dbapi.ConfigValueSet(d.cluster, name, value)
 	if err != nil {
 		return err
 	}
@@ -179,7 +178,7 @@ func (k *daemonConfigKey) GetInt64() int64 {
 	return ret
 }
 
-func daemonConfigInit(db *sql.DB) error {
+func daemonConfigInit(cluster *dbapi.Cluster) error {
 	// Set all the keys
 	daemonConfig = map[string]*daemonConfigKey{
 		"core.https_allowed_headers":     {valueType: "string"},
@@ -209,7 +208,12 @@ func daemonConfigInit(db *sql.DB) error {
 	}
 
 	// Load the values from the DB
-	dbValues, err := dbapi.ConfigValuesGet(db)
+	var dbValues map[string]string
+	err := cluster.Transaction(func(tx *dbapi.ClusterTx) error {
+		var err error
+		dbValues, err = tx.Config()
+		return err
+	})
 	if err != nil {
 		return err
 	}
diff --git a/lxd/db/config.go b/lxd/db/config.go
index 36136ea5b..d76d8188a 100644
--- a/lxd/db/config.go
+++ b/lxd/db/config.go
@@ -1,10 +1,6 @@
 package db
 
-import (
-	"database/sql"
-
-	"github.com/lxc/lxd/lxd/db/query"
-)
+import "github.com/lxc/lxd/lxd/db/query"
 
 // Config fetches all LXD node-level config keys.
 func (n *NodeTx) Config() (map[string]string, error) {
@@ -28,32 +24,8 @@ func (c *ClusterTx) UpdateConfig(values map[string]string) error {
 	return query.UpdateConfig(c.tx, "config", values)
 }
 
-func ConfigValuesGet(db *sql.DB) (map[string]string, error) {
-	q := "SELECT key, value FROM config"
-	rows, err := dbQuery(db, q)
-	if err != nil {
-		return map[string]string{}, err
-	}
-	defer rows.Close()
-
-	results := map[string]string{}
-
-	for rows.Next() {
-		var key, value string
-		rows.Scan(&key, &value)
-		// FIXME: we can get rid of this special casing as soon as we
-		//        move config keys to the cluster database.
-		if key == "core.https_address" {
-			continue
-		}
-		results[key] = value
-	}
-
-	return results, nil
-}
-
-func ConfigValueSet(db *sql.DB, key string, value string) error {
-	tx, err := begin(db)
+func ConfigValueSet(cluster *Cluster, key string, value string) error {
+	tx, err := begin(cluster.db)
 	if err != nil {
 		return err
 	}
diff --git a/lxd/patches.go b/lxd/patches.go
index 932fdb56b..10a35610b 100644
--- a/lxd/patches.go
+++ b/lxd/patches.go
@@ -2768,7 +2768,7 @@ func patchUpdateFromV15(d *Daemon) error {
 		return err
 	}
 
-	err = daemonConfigInit(d.db.DB())
+	err = daemonConfigInit(d.cluster)
 	if err != nil {
 		return err
 	}

From ba8f35ce12d5ad103f88682e47b82663ac8f6779 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Tue, 17 Oct 2017 17:34:43 +0000
Subject: [PATCH 034/116] Add initial cluster.Config machinery

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/cluster/config.go      | 94 ++++++++++++++++++++++++++++++++++++++++++++++
 lxd/cluster/config_test.go | 94 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 188 insertions(+)
 create mode 100644 lxd/cluster/config.go
 create mode 100644 lxd/cluster/config_test.go

diff --git a/lxd/cluster/config.go b/lxd/cluster/config.go
new file mode 100644
index 000000000..f257c396a
--- /dev/null
+++ b/lxd/cluster/config.go
@@ -0,0 +1,94 @@
+package cluster
+
+import (
+	"fmt"
+
+	"github.com/lxc/lxd/lxd/config"
+	"github.com/lxc/lxd/lxd/db"
+)
+
+// Config holds cluster-wide configuration values.
+type Config struct {
+	tx *db.ClusterTx // DB transaction the values in this config are bound to.
+	m  config.Map    // Low-level map holding the config values.
+}
+
+// ConfigLoad loads a new Config object with the current cluster configuration
+// values fetched from the database.
+func ConfigLoad(tx *db.ClusterTx) (*Config, error) {
+	// Load current raw values from the database, any error is fatal.
+	values, err := tx.Config()
+	if err != nil {
+		return nil, fmt.Errorf("cannot fetch node config from database: %v", err)
+	}
+
+	m, err := config.SafeLoad(ConfigSchema, values)
+	if err != nil {
+		return nil, fmt.Errorf("failed to load node config: %v", err)
+	}
+
+	return &Config{tx: tx, m: m}, nil
+}
+
+// ProxyHTTP returns the configured HTTP proxy, if any.
+func (c *Config) ProxyHTTP() string {
+	return c.m.GetString("core.proxy_http")
+}
+
+// Dump current configuration keys and their values. Keys with values matching
+// their defaults are omitted.
+func (c *Config) Dump() map[string]interface{} {
+	return c.m.Dump()
+}
+
+// Replace the current configuration with the given values.
+func (c *Config) Replace(values map[string]interface{}) error {
+	return c.update(values)
+}
+
+// Patch changes only the configuration keys in the given map.
+func (c *Config) Patch(patch map[string]interface{}) error {
+	values := c.Dump() // Use current values as defaults
+	for name, value := range patch {
+		values[name] = value
+	}
+	return c.update(values)
+}
+
+func (c *Config) update(values map[string]interface{}) error {
+	changed, err := c.m.Change(values)
+	if err != nil {
+		return fmt.Errorf("invalid configuration changes: %s", err)
+	}
+
+	err = c.tx.UpdateConfig(changed)
+	if err != nil {
+		return fmt.Errorf("cannot persist confiuration changes: %v", err)
+	}
+
+	return nil
+}
+
+// ConfigSchema defines available server configuration keys.
+var ConfigSchema = config.Schema{
+	"core.https_allowed_headers":     {},
+	"core.https_allowed_methods":     {},
+	"core.https_allowed_origin":      {},
+	"core.https_allowed_credentials": {},
+	"core.proxy_http":                {},
+	"core.proxy_https":               {},
+	"core.proxy_ignore_hosts":        {},
+	"core.trust_password":            {},
+	"images.auto_update_cached":      {},
+	"images.auto_update_interval":    {},
+	"images.compression_algorithm":   {},
+	"images.remote_cache_expiry":     {},
+	"storage.lvm_fstype":             {},
+	"storage.lvm_mount_options":      {},
+	"storage.lvm_thinpool_name":      {},
+	"storage.lvm_vg_name":            {},
+	"storage.lvm_volume_size":        {},
+	"storage.zfs_pool_name":          {},
+	"storage.zfs_remove_snapshots":   {},
+	"storage.zfs_use_refquota":       {},
+}
diff --git a/lxd/cluster/config_test.go b/lxd/cluster/config_test.go
new file mode 100644
index 000000000..0da3979d8
--- /dev/null
+++ b/lxd/cluster/config_test.go
@@ -0,0 +1,94 @@
+package cluster_test
+
+import (
+	"testing"
+
+	"github.com/lxc/lxd/lxd/cluster"
+	"github.com/lxc/lxd/lxd/db"
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
+)
+
+// The server configuration is initially empty.
+func TestConfigLoad_Initial(t *testing.T) {
+	tx, cleanup := db.NewTestClusterTx(t)
+	defer cleanup()
+
+	config, err := cluster.ConfigLoad(tx)
+
+	require.NoError(t, err)
+	assert.Equal(t, map[string]interface{}{}, config.Dump())
+}
+
+// If the database contains invalid keys, they are ignored.
+func TestConfigLoad_IgnoreInvalidKeys(t *testing.T) {
+	tx, cleanup := db.NewTestClusterTx(t)
+	defer cleanup()
+
+	err := tx.UpdateConfig(map[string]string{
+		"foo":             "garbage",
+		"core.proxy_http": "foo.bar",
+	})
+	require.NoError(t, err)
+
+	config, err := cluster.ConfigLoad(tx)
+
+	require.NoError(t, err)
+	values := map[string]interface{}{"core.proxy_http": "foo.bar"}
+	assert.Equal(t, values, config.Dump())
+}
+
+// Triggers can be specified to execute custom code on config key changes.
+func TestConfigLoad_Triggers(t *testing.T) {
+	tx, cleanup := db.NewTestClusterTx(t)
+	defer cleanup()
+
+	config, err := cluster.ConfigLoad(tx)
+
+	require.NoError(t, err)
+	assert.Equal(t, map[string]interface{}{}, config.Dump())
+}
+
+// If some previously set values are missing from the ones passed to Replace(),
+// they are deleted from the configuration.
+func TestConfig_ReplaceDeleteValues(t *testing.T) {
+	tx, cleanup := db.NewTestClusterTx(t)
+	defer cleanup()
+
+	config, err := cluster.ConfigLoad(tx)
+	require.NoError(t, err)
+
+	err = config.Replace(map[string]interface{}{"core.proxy_http": "foo.bar"})
+	assert.NoError(t, err)
+
+	err = config.Replace(map[string]interface{}{})
+	assert.NoError(t, err)
+
+	assert.Equal(t, "", config.ProxyHTTP())
+
+	values, err := tx.Config()
+	require.NoError(t, err)
+	assert.Equal(t, map[string]string{}, values)
+}
+
+// If some previously set values are missing from the ones passed to Patch(),
+// they are kept as they are.
+func TestConfig_PatchKeepsValues(t *testing.T) {
+	tx, cleanup := db.NewTestClusterTx(t)
+	defer cleanup()
+
+	config, err := cluster.ConfigLoad(tx)
+	require.NoError(t, err)
+
+	err = config.Replace(map[string]interface{}{"core.proxy_http": "foo.bar"})
+	assert.NoError(t, err)
+
+	err = config.Patch(map[string]interface{}{})
+	assert.NoError(t, err)
+
+	assert.Equal(t, "foo.bar", config.ProxyHTTP())
+
+	values, err := tx.Config()
+	require.NoError(t, err)
+	assert.Equal(t, map[string]string{"core.proxy_http": "foo.bar"}, values)
+}

From 4e8d36775426deeff68086781aa3c4226b1cacbc Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Tue, 17 Oct 2017 17:35:53 +0000
Subject: [PATCH 035/116] Drop legacy node.Config keys

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/node/config.go | 24 ------------------------
 lxd/patches.go     | 12 +++++++++---
 2 files changed, 9 insertions(+), 27 deletions(-)

diff --git a/lxd/node/config.go b/lxd/node/config.go
index c3fb62990..26f06004d 100644
--- a/lxd/node/config.go
+++ b/lxd/node/config.go
@@ -90,28 +90,4 @@ func (c *Config) update(values map[string]interface{}) error {
 var ConfigSchema = config.Schema{
 	// Network address for this LXD server.
 	"core.https_address": {},
-
-	// FIXME: Legacy node-level config values. Will be migrated to
-	//        cluster-config, but we need them here just to avoid
-	//        spurious errors in the logs
-	"core.https_allowed_headers":     {},
-	"core.https_allowed_methods":     {},
-	"core.https_allowed_origin":      {},
-	"core.https_allowed_credentials": {},
-	"core.proxy_http":                {},
-	"core.proxy_https":               {},
-	"core.proxy_ignore_hosts":        {},
-	"core.trust_password":            {},
-	"images.auto_update_cached":      {},
-	"images.auto_update_interval":    {},
-	"images.compression_algorithm":   {},
-	"images.remote_cache_expiry":     {},
-	"storage.lvm_fstype":             {},
-	"storage.lvm_mount_options":      {},
-	"storage.lvm_thinpool_name":      {},
-	"storage.lvm_vg_name":            {},
-	"storage.lvm_volume_size":        {},
-	"storage.zfs_pool_name":          {},
-	"storage.zfs_remove_snapshots":   {},
-	"storage.zfs_use_refquota":       {},
 }
diff --git a/lxd/patches.go b/lxd/patches.go
index 10a35610b..065f193a5 100644
--- a/lxd/patches.go
+++ b/lxd/patches.go
@@ -2768,13 +2768,19 @@ func patchUpdateFromV15(d *Daemon) error {
 		return err
 	}
 
-	err = daemonConfigInit(d.cluster)
+	vgName := ""
+	err = d.db.Transaction(func(tx *db.NodeTx) error {
+		config, err := tx.Config()
+		if err != nil {
+			return err
+		}
+		vgName = config["storage.lvm_vg_name"]
+		return nil
+	})
 	if err != nil {
 		return err
 	}
 
-	vgName := daemonConfig["storage.lvm_vg_name"].Get()
-
 	for _, cName := range cNames {
 		var lvLinkPath string
 		if strings.Contains(cName, shared.SnapshotDelimiter) {

From 96d48e2e62cbf09ec54692f08fcac4455166d94f Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Tue, 17 Oct 2017 18:10:25 +0000
Subject: [PATCH 036/116] Load CORS headers settings from the database

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/api.go            | 45 +++++++++++++++++++++++++++++++++------------
 lxd/cluster/config.go | 22 +++++++++++++++++++++-
 lxd/daemon_config.go  | 14 +++++---------
 3 files changed, 59 insertions(+), 22 deletions(-)

diff --git a/lxd/api.go b/lxd/api.go
index e038e76dc..ba6285ce9 100644
--- a/lxd/api.go
+++ b/lxd/api.go
@@ -2,10 +2,13 @@ package main
 
 import (
 	"net/http"
+	"strings"
 
 	log "github.com/lxc/lxd/shared/log15"
 
 	"github.com/gorilla/mux"
+	"github.com/lxc/lxd/lxd/cluster"
+	"github.com/lxc/lxd/lxd/db"
 	"github.com/lxc/lxd/shared/logger"
 )
 
@@ -48,32 +51,50 @@ type lxdHttpServer struct {
 }
 
 func (s *lxdHttpServer) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
-	allowedOrigin := daemonConfig["core.https_allowed_origin"].Get()
+	// Set CORS headers, unless this is an internal or gRPC request.
+	if !strings.HasPrefix(req.URL.Path, "/internal") && !strings.HasPrefix(req.URL.Path, "/protocol.SQL") {
+		<-s.d.setupChan
+		err := s.d.cluster.Transaction(func(tx *db.ClusterTx) error {
+			config, err := cluster.ConfigLoad(tx)
+			if err != nil {
+				return err
+			}
+			setCORSHeaders(rw, req, config)
+			return nil
+		})
+		if err != nil {
+			http.Error(rw, err.Error(), http.StatusInternalServerError)
+		}
+	}
+
+	// OPTIONS request don't need any further processing
+	if req.Method == "OPTIONS" {
+		return
+	}
+
+	// Call the original server
+	s.r.ServeHTTP(rw, req)
+}
+
+func setCORSHeaders(rw http.ResponseWriter, req *http.Request, config *cluster.Config) {
+	allowedOrigin := config.HTTPSAllowedOrigin()
 	origin := req.Header.Get("Origin")
 	if allowedOrigin != "" && origin != "" {
 		rw.Header().Set("Access-Control-Allow-Origin", allowedOrigin)
 	}
 
-	allowedMethods := daemonConfig["core.https_allowed_methods"].Get()
+	allowedMethods := config.HTTPSAllowedMethods()
 	if allowedMethods != "" && origin != "" {
 		rw.Header().Set("Access-Control-Allow-Methods", allowedMethods)
 	}
 
-	allowedHeaders := daemonConfig["core.https_allowed_headers"].Get()
+	allowedHeaders := config.HTTPSAllowedHeaders()
 	if allowedHeaders != "" && origin != "" {
 		rw.Header().Set("Access-Control-Allow-Headers", allowedHeaders)
 	}
 
-	allowedCredentials := daemonConfig["core.https_allowed_credentials"].GetBool()
+	allowedCredentials := config.HTTPSAllowedCredentials()
 	if allowedCredentials {
 		rw.Header().Set("Access-Control-Allow-Credentials", "true")
 	}
-
-	// OPTIONS request don't need any further processing
-	if req.Method == "OPTIONS" {
-		return
-	}
-
-	// Call the original server
-	s.r.ServeHTTP(rw, req)
 }
diff --git a/lxd/cluster/config.go b/lxd/cluster/config.go
index f257c396a..1a589b37c 100644
--- a/lxd/cluster/config.go
+++ b/lxd/cluster/config.go
@@ -30,6 +30,26 @@ func ConfigLoad(tx *db.ClusterTx) (*Config, error) {
 	return &Config{tx: tx, m: m}, nil
 }
 
+// HTTPSAllowedHeaders returns the relevant CORS setting.
+func (c *Config) HTTPSAllowedHeaders() string {
+	return c.m.GetString("core.https_allowed_headers")
+}
+
+// HTTPSAllowedMethods returns the relevant CORS setting.
+func (c *Config) HTTPSAllowedMethods() string {
+	return c.m.GetString("core.https_allowed_methods")
+}
+
+// HTTPSAllowedOrigin returns the relevant CORS setting.
+func (c *Config) HTTPSAllowedOrigin() string {
+	return c.m.GetString("core.https_allowed_origin")
+}
+
+// HTTPSAllowedCredentials returns the relevant CORS setting.
+func (c *Config) HTTPSAllowedCredentials() bool {
+	return c.m.GetBool("core.https_allowed_credentials")
+}
+
 // ProxyHTTP returns the configured HTTP proxy, if any.
 func (c *Config) ProxyHTTP() string {
 	return c.m.GetString("core.proxy_http")
@@ -74,7 +94,7 @@ var ConfigSchema = config.Schema{
 	"core.https_allowed_headers":     {},
 	"core.https_allowed_methods":     {},
 	"core.https_allowed_origin":      {},
-	"core.https_allowed_credentials": {},
+	"core.https_allowed_credentials": {Type: config.Bool},
 	"core.proxy_http":                {},
 	"core.proxy_https":               {},
 	"core.proxy_ignore_hosts":        {},
diff --git a/lxd/daemon_config.go b/lxd/daemon_config.go
index 6ada99393..9be4e87f5 100644
--- a/lxd/daemon_config.go
+++ b/lxd/daemon_config.go
@@ -181,15 +181,11 @@ func (k *daemonConfigKey) GetInt64() int64 {
 func daemonConfigInit(cluster *dbapi.Cluster) error {
 	// Set all the keys
 	daemonConfig = map[string]*daemonConfigKey{
-		"core.https_allowed_headers":     {valueType: "string"},
-		"core.https_allowed_methods":     {valueType: "string"},
-		"core.https_allowed_origin":      {valueType: "string"},
-		"core.https_allowed_credentials": {valueType: "bool"},
-		"core.proxy_http":                {valueType: "string", setter: daemonConfigSetProxy},
-		"core.proxy_https":               {valueType: "string", setter: daemonConfigSetProxy},
-		"core.proxy_ignore_hosts":        {valueType: "string", setter: daemonConfigSetProxy},
-		"core.trust_password":            {valueType: "string", hiddenValue: true, setter: daemonConfigSetPassword},
-		"core.macaroon.endpoint":         {valueType: "string", setter: daemonConfigSetMacaroonEndpoint},
+		"core.proxy_http":         {valueType: "string", setter: daemonConfigSetProxy},
+		"core.proxy_https":        {valueType: "string", setter: daemonConfigSetProxy},
+		"core.proxy_ignore_hosts": {valueType: "string", setter: daemonConfigSetProxy},
+		"core.trust_password":     {valueType: "string", hiddenValue: true, setter: daemonConfigSetPassword},
+		"core.macaroon.endpoint":  {valueType: "string", setter: daemonConfigSetMacaroonEndpoint},
 
 		"images.auto_update_cached":    {valueType: "bool", defaultValue: "true"},
 		"images.auto_update_interval":  {valueType: "int", defaultValue: "6", trigger: daemonConfigTriggerAutoUpdateInterval},

From 97f21c6139c8f4064280cab7a5fe758ca9896a70 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Tue, 17 Oct 2017 19:53:36 +0000
Subject: [PATCH 037/116] Load deprecated storage config keys from database

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/cluster/config.go | 25 +++++++++++++-------
 lxd/daemon_config.go  | 18 ---------------
 lxd/patches.go        | 63 ++++++++++++++++++++++++++++++++++++++-------------
 3 files changed, 64 insertions(+), 42 deletions(-)

diff --git a/lxd/cluster/config.go b/lxd/cluster/config.go
index 1a589b37c..a1b15bbcc 100644
--- a/lxd/cluster/config.go
+++ b/lxd/cluster/config.go
@@ -103,12 +103,21 @@ var ConfigSchema = config.Schema{
 	"images.auto_update_interval":    {},
 	"images.compression_algorithm":   {},
 	"images.remote_cache_expiry":     {},
-	"storage.lvm_fstype":             {},
-	"storage.lvm_mount_options":      {},
-	"storage.lvm_thinpool_name":      {},
-	"storage.lvm_vg_name":            {},
-	"storage.lvm_volume_size":        {},
-	"storage.zfs_pool_name":          {},
-	"storage.zfs_remove_snapshots":   {},
-	"storage.zfs_use_refquota":       {},
+
+	// Keys deprecated since the implementation of the storage api.
+	"storage.lvm_fstype":           {Setter: deprecatedStorage, Default: "ext4"},
+	"storage.lvm_mount_options":    {Setter: deprecatedStorage, Default: "discard"},
+	"storage.lvm_thinpool_name":    {Setter: deprecatedStorage, Default: "LXDPool"},
+	"storage.lvm_vg_name":          {Setter: deprecatedStorage},
+	"storage.lvm_volume_size":      {Setter: deprecatedStorage, Default: "10GiB"},
+	"storage.zfs_pool_name":        {Setter: deprecatedStorage},
+	"storage.zfs_remove_snapshots": {Setter: deprecatedStorage, Type: config.Bool},
+	"storage.zfs_use_refquota":     {Setter: deprecatedStorage, Type: config.Bool},
+}
+
+func deprecatedStorage(value string) (string, error) {
+	if value == "" {
+		return "", nil
+	}
+	return "", fmt.Errorf("deprecated: use storage pool configuration")
 }
diff --git a/lxd/daemon_config.go b/lxd/daemon_config.go
index 9be4e87f5..e0f5a1c4d 100644
--- a/lxd/daemon_config.go
+++ b/lxd/daemon_config.go
@@ -191,16 +191,6 @@ func daemonConfigInit(cluster *dbapi.Cluster) error {
 		"images.auto_update_interval":  {valueType: "int", defaultValue: "6", trigger: daemonConfigTriggerAutoUpdateInterval},
 		"images.compression_algorithm": {valueType: "string", validator: daemonConfigValidateCompression, defaultValue: "gzip"},
 		"images.remote_cache_expiry":   {valueType: "int", defaultValue: "10", trigger: daemonConfigTriggerExpiry},
-
-		// Keys deprecated since the implementation of the storage api.
-		"storage.lvm_fstype":           {valueType: "string", defaultValue: "ext4", validValues: []string{"btrfs", "ext4", "xfs"}, validator: storageDeprecatedKeys},
-		"storage.lvm_mount_options":    {valueType: "string", defaultValue: "discard", validator: storageDeprecatedKeys},
-		"storage.lvm_thinpool_name":    {valueType: "string", defaultValue: "LXDPool", validator: storageDeprecatedKeys},
-		"storage.lvm_vg_name":          {valueType: "string", validator: storageDeprecatedKeys},
-		"storage.lvm_volume_size":      {valueType: "string", defaultValue: "10GiB", validator: storageDeprecatedKeys},
-		"storage.zfs_pool_name":        {valueType: "string", validator: storageDeprecatedKeys},
-		"storage.zfs_remove_snapshots": {valueType: "bool", validator: storageDeprecatedKeys},
-		"storage.zfs_use_refquota":     {valueType: "bool", validator: storageDeprecatedKeys},
 	}
 
 	// Load the values from the DB
@@ -344,11 +334,3 @@ func daemonConfigValidateCompression(d *Daemon, key string, value string) error
 	_, err := exec.LookPath(value)
 	return err
 }
-
-func storageDeprecatedKeys(d *Daemon, key string, value string) error {
-	if value == "" || daemonConfig[key].defaultValue == value {
-		return nil
-	}
-
-	return fmt.Errorf("Setting the key \"%s\" is deprecated in favor of storage pool configuration.", key)
-}
diff --git a/lxd/patches.go b/lxd/patches.go
index 065f193a5..f471cd70e 100644
--- a/lxd/patches.go
+++ b/lxd/patches.go
@@ -8,6 +8,7 @@ import (
 	"strings"
 	"syscall"
 
+	"github.com/lxc/lxd/lxd/cluster"
 	"github.com/lxc/lxd/lxd/db"
 	"github.com/lxc/lxd/shared"
 	"github.com/lxc/lxd/shared/logger"
@@ -163,8 +164,18 @@ func patchNetworkPermissions(name string, d *Daemon) error {
 }
 
 func patchStorageApi(name string, d *Daemon) error {
-	lvmVgName := daemonConfig["storage.lvm_vg_name"].Get()
-	zfsPoolName := daemonConfig["storage.zfs_pool_name"].Get()
+	var daemonConfig map[string]string
+	err := d.cluster.Transaction(func(tx *db.ClusterTx) error {
+		var err error
+		daemonConfig, err = tx.Config()
+		return err
+	})
+	if err != nil {
+		return err
+	}
+
+	lvmVgName := daemonConfig["storage.lvm_vg_name"]
+	zfsPoolName := daemonConfig["storage.zfs_pool_name"]
 	defaultPoolName := "default"
 	preStorageApiStorageType := storageTypeDir
 
@@ -268,14 +279,25 @@ func patchStorageApi(name string, d *Daemon) error {
 	}
 
 	// Unset deprecated storage keys.
-	daemonConfig["storage.lvm_fstype"].Set(d, "")
-	daemonConfig["storage.lvm_mount_options"].Set(d, "")
-	daemonConfig["storage.lvm_thinpool_name"].Set(d, "")
-	daemonConfig["storage.lvm_vg_name"].Set(d, "")
-	daemonConfig["storage.lvm_volume_size"].Set(d, "")
-	daemonConfig["storage.zfs_pool_name"].Set(d, "")
-	daemonConfig["storage.zfs_remove_snapshots"].Set(d, "")
-	daemonConfig["storage.zfs_use_refquota"].Set(d, "")
+	err = d.cluster.Transaction(func(tx *db.ClusterTx) error {
+		config, err := cluster.ConfigLoad(tx)
+		if err != nil {
+			return err
+		}
+		return config.Patch(map[string]interface{}{
+			"storage.lvm_fstype":           "",
+			"storage.lvm_mount_options":    "",
+			"storage.lvm_thinpool_name":    "",
+			"storage.lvm_vg_name":          "",
+			"storage.lvm_volume_size":      "",
+			"storage.zfs_pool_name":        "",
+			"storage.zfs_remove_snapshots": "",
+			"storage.zfs_use_refquota":     "",
+		})
+	})
+	if err != nil {
+		return err
+	}
 
 	return SetupStorageDriver(d.State(), true)
 }
@@ -831,26 +853,35 @@ func upgradeFromStorageTypeLvm(name string, d *Daemon, defaultPoolName string, d
 	poolConfig["source"] = defaultPoolName
 
 	// Set it only if it is not the default value.
-	fsType := daemonConfig["storage.lvm_fstype"].Get()
+	var daemonConfig map[string]string
+	err := d.cluster.Transaction(func(tx *db.ClusterTx) error {
+		var err error
+		daemonConfig, err = tx.Config()
+		return err
+	})
+	if err != nil {
+		return err
+	}
+	fsType := daemonConfig["storage.lvm_fstype"]
 	if fsType != "" && fsType != "ext4" {
 		poolConfig["volume.block.filesystem"] = fsType
 	}
 
 	// Set it only if it is not the default value.
-	fsMntOpts := daemonConfig["storage.lvm_mount_options"].Get()
+	fsMntOpts := daemonConfig["storage.lvm_mount_options"]
 	if fsMntOpts != "" && fsMntOpts != "discard" {
 		poolConfig["volume.block.mount_options"] = fsMntOpts
 	}
 
-	poolConfig["lvm.thinpool_name"] = daemonConfig["storage.lvm_thinpool_name"].Get()
+	poolConfig["lvm.thinpool_name"] = daemonConfig["storage.lvm_thinpool_name"]
 	if poolConfig["lvm.thinpool_name"] == "" {
 		// If empty we need to set it to the old default.
 		poolConfig["lvm.thinpool_name"] = "LXDPool"
 	}
 
-	poolConfig["lvm.vg_name"] = daemonConfig["storage.lvm_vg_name"].Get()
+	poolConfig["lvm.vg_name"] = daemonConfig["storage.lvm_vg_name"]
 
-	poolConfig["volume.size"] = daemonConfig["storage.lvm_volume_size"].Get()
+	poolConfig["volume.size"] = daemonConfig["storage.lvm_volume_size"]
 	if poolConfig["volume.size"] != "" {
 		// In case stuff like GiB is used which
 		// share.dParseByteSizeString() doesn't handle.
@@ -862,7 +893,7 @@ func upgradeFromStorageTypeLvm(name string, d *Daemon, defaultPoolName string, d
 	// "volume.size", so unset it.
 	poolConfig["size"] = ""
 
-	err := storagePoolValidateConfig(defaultPoolName, defaultStorageTypeName, poolConfig, nil)
+	err = storagePoolValidateConfig(defaultPoolName, defaultStorageTypeName, poolConfig, nil)
 	if err != nil {
 		return err
 	}

From ee355f0ad68b42b548f7001accdd869551b8b872 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Tue, 17 Oct 2017 20:03:50 +0000
Subject: [PATCH 038/116] Load daemon config from database when rendering REST
 responses

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/cluster/config.go | 11 ++++++-----
 lxd/daemon_config.go  | 27 +++++++++++++--------------
 2 files changed, 19 insertions(+), 19 deletions(-)

diff --git a/lxd/cluster/config.go b/lxd/cluster/config.go
index a1b15bbcc..a716f1448 100644
--- a/lxd/cluster/config.go
+++ b/lxd/cluster/config.go
@@ -98,11 +98,12 @@ var ConfigSchema = config.Schema{
 	"core.proxy_http":                {},
 	"core.proxy_https":               {},
 	"core.proxy_ignore_hosts":        {},
-	"core.trust_password":            {},
-	"images.auto_update_cached":      {},
-	"images.auto_update_interval":    {},
-	"images.compression_algorithm":   {},
-	"images.remote_cache_expiry":     {},
+	"core.trust_password":            {Hidden: true},
+	"core.macaroon.endpoint":         {},
+	"images.auto_update_cached":      {Type: config.Bool, Default: "true"},
+	"images.auto_update_interval":    {Type: config.Int64, Default: "6"},
+	"images.compression_algorithm":   {Default: "gzip"},
+	"images.remote_cache_expiry":     {Type: config.Int64, Default: "10"},
 
 	// Keys deprecated since the implementation of the storage api.
 	"storage.lvm_fstype":           {Setter: deprecatedStorage, Default: "ext4"},
diff --git a/lxd/daemon_config.go b/lxd/daemon_config.go
index e0f5a1c4d..b049f1589 100644
--- a/lxd/daemon_config.go
+++ b/lxd/daemon_config.go
@@ -13,6 +13,7 @@ import (
 	log "github.com/lxc/lxd/shared/log15"
 	"golang.org/x/crypto/scrypt"
 
+	"github.com/lxc/lxd/lxd/cluster"
 	dbapi "github.com/lxc/lxd/lxd/db"
 	"github.com/lxc/lxd/lxd/node"
 	"github.com/lxc/lxd/lxd/state"
@@ -223,28 +224,26 @@ func daemonConfigRender(state *state.State) (map[string]interface{}, error) {
 	config := map[string]interface{}{}
 
 	// Turn the config into a JSON-compatible map
-	for k, v := range daemonConfig {
-		value := v.Get()
-		if value != v.defaultValue {
-			if v.hiddenValue {
-				config[k] = true
-			} else {
-				config[k] = value
-			}
+	err := state.Cluster.Transaction(func(tx *dbapi.ClusterTx) error {
+		clusterConfig, err := cluster.ConfigLoad(tx)
+		if err != nil {
+			return err
+		}
+		for key, value := range clusterConfig.Dump() {
+			config[key] = value
 		}
+		return nil
+	})
+	if err != nil {
+		return nil, err
 	}
 
-	err := state.Node.Transaction(func(tx *dbapi.NodeTx) error {
+	err = state.Node.Transaction(func(tx *dbapi.NodeTx) error {
 		nodeConfig, err := node.ConfigLoad(tx)
 		if err != nil {
 			return err
 		}
 		for key, value := range nodeConfig.Dump() {
-			// FIXME: we can drop this conditional as soon as we
-			//        migrate all non-node-local keys to the cluster db
-			if key != "core.https_address" {
-				continue
-			}
 			config[key] = value
 		}
 		return nil

From cc81472e7122e5c4490308932eeba360e492bdcd Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Wed, 18 Oct 2017 14:53:46 +0000
Subject: [PATCH 039/116] Retry failed cluster transactions

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/db/db.go          | 24 ++++++++++++++++++++----
 lxd/main_init_test.go |  2 ++
 2 files changed, 22 insertions(+), 4 deletions(-)

diff --git a/lxd/db/db.go b/lxd/db/db.go
index 356727be3..420257762 100644
--- a/lxd/db/db.go
+++ b/lxd/db/db.go
@@ -3,6 +3,7 @@ package db
 import (
 	"database/sql"
 	"fmt"
+	"strings"
 	"time"
 
 	grpcsql "github.com/CanonicalLtd/go-grpc-sql"
@@ -182,10 +183,25 @@ func OpenCluster(name string, dialer grpcsql.Dialer, address string) (*Cluster,
 // database, otherwise they are rolled back.
 func (c *Cluster) Transaction(f func(*ClusterTx) error) error {
 	clusterTx := &ClusterTx{}
-	return query.Transaction(c.db, func(tx *sql.Tx) error {
-		clusterTx.tx = tx
-		return f(clusterTx)
-	})
+
+	// FIXME: the retry loop should be configurable.
+	var err error
+	for i := 0; i < 10; i++ {
+		err = query.Transaction(c.db, func(tx *sql.Tx) error {
+			clusterTx.tx = tx
+			return f(clusterTx)
+		})
+		if err != nil {
+			// FIXME: we should bubble errors using errors.Wrap()
+			// instead, and check for sql.ErrBadConnection.
+			if strings.Contains(err.Error(), "bad connection") {
+				time.Sleep(time.Second)
+				continue
+			}
+		}
+		break
+	}
+	return err
 }
 
 // Close the database facade.
diff --git a/lxd/main_init_test.go b/lxd/main_init_test.go
index 8b3a2ad9a..8ea05500f 100644
--- a/lxd/main_init_test.go
+++ b/lxd/main_init_test.go
@@ -14,6 +14,7 @@ import (
 	"github.com/lxc/lxd/shared"
 	"github.com/lxc/lxd/shared/api"
 	"github.com/lxc/lxd/shared/cmd"
+	"github.com/lxc/lxd/shared/logging"
 	"github.com/stretchr/testify/suite"
 )
 
@@ -27,6 +28,7 @@ type cmdInitTestSuite struct {
 }
 
 func (suite *cmdInitTestSuite) SetupTest() {
+	logging.Testing(suite.T())
 	suite.lxdTestSuite.SetupTest()
 	suite.streams = cmd.NewMemoryStreams("")
 	suite.context = cmd.NewMemoryContext(suite.streams)

From c42fd4d9a39067a259edc7d306962765a6a5c8df Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Thu, 19 Oct 2017 11:10:47 +0000
Subject: [PATCH 040/116] Add helpers to update the heartbeat column of a nodes
 row in the db

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/db/node.go      | 46 +++++++++++++++++++++++++++++++++++++++-------
 lxd/db/node_test.go | 21 +++++++++++++++++++++
 2 files changed, 60 insertions(+), 7 deletions(-)

diff --git a/lxd/db/node.go b/lxd/db/node.go
index ca02779c1..96fd70bf7 100644
--- a/lxd/db/node.go
+++ b/lxd/db/node.go
@@ -1,6 +1,9 @@
 package db
 
 import (
+	"fmt"
+	"time"
+
 	"github.com/lxc/lxd/lxd/db/cluster"
 	"github.com/lxc/lxd/lxd/db/query"
 	"github.com/lxc/lxd/shared/version"
@@ -9,12 +12,19 @@ import (
 
 // NodeInfo holds information about a single LXD instance in a cluster.
 type NodeInfo struct {
-	ID            int64  // Stable node identifier
-	Name          string // User-assigned name of the node
-	Address       string // Network address of the node
-	Description   string // Node description (optional)
-	Schema        int    // Schema version of the LXD code running the node
-	APIExtensions int    // Number of API extensions of the LXD code running on the node
+	ID            int64     // Stable node identifier
+	Name          string    // User-assigned name of the node
+	Address       string    // Network address of the node
+	Description   string    // Node description (optional)
+	Schema        int       // Schema version of the LXD code running the node
+	APIExtensions int       // Number of API extensions of the LXD code running on the node
+	Heartbeat     time.Time // Timestamp of the last heartbeat
+}
+
+// IsDown returns true if the last heartbeat time of the node is older than 20
+// seconds.
+func (n NodeInfo) IsDown() bool {
+	return n.Heartbeat.Before(time.Now().Add(-20 * time.Second))
 }
 
 // Nodes returns all LXD nodes part of the cluster.
@@ -31,9 +41,14 @@ func (c *ClusterTx) Nodes() ([]NodeInfo, error) {
 			&nodes[i].Description,
 			&nodes[i].Schema,
 			&nodes[i].APIExtensions,
+			&nodes[i].Heartbeat,
 		}
 	}
-	stmt := "SELECT id, name, address, description, schema, api_extensions FROM nodes ORDER BY id"
+	stmt := `
+SELECT id, name, address, description, schema, api_extensions, heartbeat
+  FROM nodes
+    ORDER BY id
+`
 	err := query.SelectObjects(c.tx, dest, stmt)
 	if err != nil {
 		return nil, errors.Wrap(err, "failed to fecth nodes")
@@ -48,3 +63,20 @@ func (c *ClusterTx) NodeAdd(name string, address string) (int64, error) {
 	values := []interface{}{name, address, cluster.SchemaVersion, len(version.APIExtensions)}
 	return query.UpsertObject(c.tx, "nodes", columns, values)
 }
+
+// NodeHeartbeat updates the heartbeat column of the node with the given address.
+func (c *ClusterTx) NodeHeartbeat(address string, heartbeat time.Time) error {
+	stmt := "UPDATE nodes SET heartbeat=? WHERE address=?"
+	result, err := c.tx.Exec(stmt, heartbeat, address)
+	if err != nil {
+		return err
+	}
+	n, err := result.RowsAffected()
+	if err != nil {
+		return err
+	}
+	if n != 1 {
+		return fmt.Errorf("expected to update one row and not %d", n)
+	}
+	return nil
+}
diff --git a/lxd/db/node_test.go b/lxd/db/node_test.go
index 82d3af111..2dbdf0efc 100644
--- a/lxd/db/node_test.go
+++ b/lxd/db/node_test.go
@@ -2,6 +2,7 @@ package db_test
 
 import (
 	"testing"
+	"time"
 
 	"github.com/lxc/lxd/lxd/db"
 	"github.com/lxc/lxd/lxd/db/cluster"
@@ -28,4 +29,24 @@ func TestNodeAdd(t *testing.T) {
 	assert.Equal(t, "1.2.3.4:666", node.Address)
 	assert.Equal(t, cluster.SchemaVersion, node.Schema)
 	assert.Equal(t, len(version.APIExtensions), node.APIExtensions)
+	assert.False(t, node.IsDown())
+}
+
+// Update the heartbeat of a node.
+func TestNodeHeartbeat(t *testing.T) {
+	tx, cleanup := db.NewTestClusterTx(t)
+	defer cleanup()
+
+	_, err := tx.NodeAdd("buzz", "1.2.3.4:666")
+	require.NoError(t, err)
+
+	err = tx.NodeHeartbeat("1.2.3.4:666", time.Now().Add(-time.Minute))
+	require.NoError(t, err)
+
+	nodes, err := tx.Nodes()
+	require.NoError(t, err)
+	require.Len(t, nodes, 1)
+
+	node := nodes[0]
+	assert.True(t, node.IsDown())
 }

From 338ff167a1533cb3239440295423a1b518759bec Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Thu, 19 Oct 2017 11:11:58 +0000
Subject: [PATCH 041/116] Add cluster.Notifier to run client interactions
 against peer nodes

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/cluster/notify.go      | 128 ++++++++++++++++++++++++++++++++
 lxd/cluster/notify_test.go | 180 +++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 308 insertions(+)
 create mode 100644 lxd/cluster/notify.go
 create mode 100644 lxd/cluster/notify_test.go

diff --git a/lxd/cluster/notify.go b/lxd/cluster/notify.go
new file mode 100644
index 000000000..860692111
--- /dev/null
+++ b/lxd/cluster/notify.go
@@ -0,0 +1,128 @@
+package cluster
+
+import (
+	"fmt"
+	"strings"
+	"sync"
+
+	lxd "github.com/lxc/lxd/client"
+	"github.com/lxc/lxd/lxd/db"
+	"github.com/lxc/lxd/lxd/node"
+	"github.com/lxc/lxd/lxd/state"
+	"github.com/lxc/lxd/shared"
+	"github.com/lxc/lxd/shared/logger"
+	"github.com/pkg/errors"
+)
+
+// Notifier is a function that invokes the given function against each node in
+// the cluster excluding the invoking one.
+type Notifier func(hook func(lxd.ContainerServer) error) error
+
+// NotifierPolicy can be used to tweak the behavior of NewNotifier in case of
+// some nodes are down.
+type NotifierPolicy int
+
+// Possible notifcation policies.
+const (
+	NotifyAll   NotifierPolicy = iota // Requires that all nodes are up.
+	NotifyAlive                       // Only notifies nodes that are alive
+)
+
+// NewNotifier builds a Notifier that can be used to notify other peers using
+// the given policy.
+func NewNotifier(state *state.State, cert *shared.CertInfo, policy NotifierPolicy) (Notifier, error) {
+	address, err := node.HTTPSAddress(state.Node)
+	if err != nil {
+		return nil, errors.Wrap(err, "failed to fetch node address")
+	}
+
+	// Fast-track the case where we're not networked at all.
+	if address == "" {
+		nullNotifier := func(func(lxd.ContainerServer) error) error { return nil }
+		return nullNotifier, nil
+	}
+
+	peers := []string{}
+	err = state.Cluster.Transaction(func(tx *db.ClusterTx) error {
+		nodes, err := tx.Nodes()
+		if err != nil {
+			return err
+		}
+		for _, node := range nodes {
+			if node.Address == address {
+				continue // Exclude ourselves
+			}
+			if node.IsDown() {
+				switch policy {
+				case NotifyAll:
+					return fmt.Errorf("peer node %s is down", node.Address)
+				case NotifyAlive:
+					continue // Just skip this node
+				}
+			}
+			peers = append(peers, node.Address)
+		}
+		return nil
+	})
+	if err != nil {
+		return nil, err
+	}
+
+	// Client parameters to connect to a peer cluster node.
+	args := &lxd.ConnectionArgs{
+		TLSServerCert: string(cert.PublicKey()),
+		TLSClientCert: string(cert.PublicKey()),
+		TLSClientKey:  string(cert.PrivateKey()),
+		// Use a special user agent to let the API handlers know they
+		// should not do any database work.
+		UserAgent: "lxd-cluster-notifier",
+	}
+	if cert.CA() != nil {
+		args.TLSCA = string(cert.CA().Raw)
+	}
+
+	notifier := func(hook func(lxd.ContainerServer) error) error {
+		errs := make([]error, len(peers))
+		wg := sync.WaitGroup{}
+		wg.Add(len(peers))
+		for i, address := range peers {
+			logger.Debugf("Notify node %s of state changes", address)
+			go func(i int, address string) {
+				defer wg.Done()
+				client, err := lxd.ConnectLXD(fmt.Sprintf("https://%s", address), args)
+				if err != nil {
+					errs[i] = errors.Wrapf(err, "failed to connect to peer %s", address)
+					return
+				}
+				err = hook(client)
+				if err != nil {
+					errs[i] = errors.Wrapf(err, "failed to notify peer %s", address)
+				}
+			}(i, address)
+		}
+		wg.Wait()
+		// TODO: aggregate all errors?
+		for i, err := range errs {
+			if err != nil {
+				// FIXME: unfortunately the LXD client currently does not
+				//        provide a way to differentiate between errors
+				if isClientConnectionError(err) && policy == NotifyAlive {
+					logger.Warnf("Could not notify node %s", peers[i])
+					continue
+				}
+				return err
+			}
+		}
+		return nil
+	}
+
+	return notifier, nil
+}
+
+// Return true if the given error is due to the LXD Go client not being able to
+// connect to the target LXD node.
+func isClientConnectionError(err error) bool {
+	// FIXME: unfortunately the LXD client currently does not
+	//        provide a way to differentiate between errors
+	return strings.Contains(err.Error(), "Unable to connect to")
+}
diff --git a/lxd/cluster/notify_test.go b/lxd/cluster/notify_test.go
new file mode 100644
index 000000000..409d04d8b
--- /dev/null
+++ b/lxd/cluster/notify_test.go
@@ -0,0 +1,180 @@
+package cluster_test
+
+import (
+	"net/http"
+	"net/http/httptest"
+	"strconv"
+	"testing"
+	"time"
+
+	lxd "github.com/lxc/lxd/client"
+	"github.com/lxc/lxd/lxd/cluster"
+	"github.com/lxc/lxd/lxd/db"
+	"github.com/lxc/lxd/lxd/node"
+	"github.com/lxc/lxd/lxd/state"
+	"github.com/lxc/lxd/lxd/util"
+	"github.com/lxc/lxd/shared"
+	"github.com/lxc/lxd/shared/api"
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
+)
+
+// The returned notifier connects to all nodes.
+func TestNewNotifier(t *testing.T) {
+	state, cleanup := state.NewTestState(t)
+	defer cleanup()
+
+	cert := shared.TestingKeyPair()
+
+	f := notifyFixtures{t: t, state: state}
+	defer f.Nodes(cert, 3)()
+
+	notifier, err := cluster.NewNotifier(state, cert, cluster.NotifyAll)
+	require.NoError(t, err)
+
+	i := 0
+	hook := func(client lxd.ContainerServer) error {
+		server, _, err := client.GetServer()
+		require.NoError(t, err)
+		assert.Equal(t, f.Address(i+1), server.Config["core.https_address"])
+		i++
+		return nil
+	}
+	assert.NoError(t, notifier(hook))
+	assert.Equal(t, 2, i)
+}
+
+// Creating a new notifier fails if the policy is set to NotifyAll and one of
+// the nodes is down.
+func TestNewNotify_NotifyAllError(t *testing.T) {
+	state, cleanup := state.NewTestState(t)
+	defer cleanup()
+
+	cert := shared.TestingKeyPair()
+
+	f := notifyFixtures{t: t, state: state}
+	defer f.Nodes(cert, 3)()
+
+	f.Down(1)
+	notifier, err := cluster.NewNotifier(state, cert, cluster.NotifyAll)
+	assert.Nil(t, notifier)
+	require.Error(t, err)
+	assert.Regexp(t, "peer node .+ is down", err.Error())
+}
+
+// Creating a new notifier does not fail if the policy is set to NotifyAlive
+// and one of the nodes is down, however dead nodes are ignored.
+func TestNewNotify_NotifyAlive(t *testing.T) {
+	state, cleanup := state.NewTestState(t)
+	defer cleanup()
+
+	cert := shared.TestingKeyPair()
+
+	f := notifyFixtures{t: t, state: state}
+	defer f.Nodes(cert, 3)()
+
+	f.Down(1)
+	notifier, err := cluster.NewNotifier(state, cert, cluster.NotifyAlive)
+	assert.NoError(t, err)
+
+	i := 0
+	hook := func(client lxd.ContainerServer) error {
+		i++
+		return nil
+	}
+	assert.NoError(t, notifier(hook))
+	assert.Equal(t, 1, i)
+}
+
+// Helper for setting fixtures for Notify tests.
+type notifyFixtures struct {
+	t     *testing.T
+	state *state.State
+}
+
+// Spawn the given number of fake nodes, save in them in the database and
+// return a cleanup function.
+//
+// The address of the first node spawned will be saved as local
+// core.https_address.
+func (h *notifyFixtures) Nodes(cert *shared.CertInfo, n int) func() {
+	servers := make([]*httptest.Server, n)
+	for i := 0; i < n; i++ {
+		servers[i] = newRestServer(cert)
+	}
+
+	// Insert new entries in the nodes table of the cluster database.
+	err := h.state.Cluster.Transaction(func(tx *db.ClusterTx) error {
+		for i := 0; i < n; i++ {
+			name := strconv.Itoa(i)
+			address := servers[i].Listener.Addr().String()
+			_, err := tx.NodeAdd(name, address)
+			require.NoError(h.t, err)
+		}
+		return nil
+	})
+	require.NoError(h.t, err)
+
+	// Set the address in the config table of the node database.
+	err = h.state.Node.Transaction(func(tx *db.NodeTx) error {
+		config, err := node.ConfigLoad(tx)
+		require.NoError(h.t, err)
+		address := servers[0].Listener.Addr().String()
+		values := map[string]interface{}{"core.https_address": address}
+		require.NoError(h.t, config.Patch(values))
+		return nil
+	})
+	require.NoError(h.t, err)
+
+	cleanup := func() {
+		for _, server := range servers {
+			server.Close()
+		}
+	}
+
+	return cleanup
+}
+
+// Return the network address of the i-th node.
+func (h *notifyFixtures) Address(i int) string {
+	var address string
+	err := h.state.Cluster.Transaction(func(tx *db.ClusterTx) error {
+		nodes, err := tx.Nodes()
+		require.NoError(h.t, err)
+		address = nodes[i].Address
+		return nil
+	})
+	require.NoError(h.t, err)
+	return address
+}
+
+// Mark the i'th node as down.
+func (h *notifyFixtures) Down(i int) {
+	err := h.state.Cluster.Transaction(func(tx *db.ClusterTx) error {
+		nodes, err := tx.Nodes()
+		require.NoError(h.t, err)
+		err = tx.NodeHeartbeat(nodes[i].Address, time.Now().Add(-time.Minute))
+		require.NoError(h.t, err)
+		return nil
+	})
+	require.NoError(h.t, err)
+}
+
+// Returns a minimal stub for the LXD RESTful API server, just realistic
+// enough to make lxd.ConnectLXD succeed.
+func newRestServer(cert *shared.CertInfo) *httptest.Server {
+	mux := http.NewServeMux()
+
+	server := httptest.NewUnstartedServer(mux)
+	server.TLS = util.ServerTLSConfig(cert)
+	server.StartTLS()
+
+	mux.HandleFunc("/1.0/", func(w http.ResponseWriter, r *http.Request) {
+		w.Header().Set("Content-Type", "application/json")
+		config := map[string]interface{}{"core.https_address": server.Listener.Addr().String()}
+		metadata := api.ServerPut{Config: config}
+		util.WriteJSON(w, api.ResponseRaw{Metadata: metadata}, false)
+	})
+
+	return server
+}

From bbebd167adf701033a0ad5df7e0f13a1020c8e32 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Thu, 19 Oct 2017 13:39:04 +0000
Subject: [PATCH 042/116] Make /1.0 PUT/PATCH API handlers update the cluster
 database

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/api_1.0.go             | 116 ++++++++++++++++-----------------------
 lxd/cluster/config.go      |  71 +++++++++++++++++++++---
 lxd/cluster/config_test.go |   9 +--
 lxd/daemon_config.go       | 133 +++------------------------------------------
 lxd/daemon_test.go         |  43 ---------------
 lxd/main_init_test.go      |  93 +++++++++++++++++++++++++------
 lxd/node/config.go         |   2 +-
 lxd/patches.go             |   3 +-
 8 files changed, 201 insertions(+), 269 deletions(-)
 delete mode 100644 lxd/daemon_test.go

diff --git a/lxd/api_1.0.go b/lxd/api_1.0.go
index dcb357ea1..b63524e5c 100644
--- a/lxd/api_1.0.go
+++ b/lxd/api_1.0.go
@@ -1,13 +1,12 @@
 package main
 
 import (
-	"fmt"
 	"net/http"
 	"os"
-	"reflect"
 
 	"gopkg.in/lxc/go-lxc.v2"
 
+	"github.com/lxc/lxd/lxd/cluster"
 	"github.com/lxc/lxd/lxd/config"
 	"github.com/lxc/lxd/lxd/db"
 	"github.com/lxc/lxd/lxd/node"
@@ -164,16 +163,6 @@ func api10Get(d *Daemon, r *http.Request) Response {
 }
 
 func api10Put(d *Daemon, r *http.Request) Response {
-	var oldConfig map[string]string
-	err := d.cluster.Transaction(func(tx *db.ClusterTx) error {
-		var err error
-		oldConfig, err = tx.Config()
-		return err
-	})
-	if err != nil {
-		return SmartError(err)
-	}
-
 	render, err := daemonConfigRender(d.State())
 	if err != nil {
 		return InternalError(err)
@@ -188,20 +177,10 @@ func api10Put(d *Daemon, r *http.Request) Response {
 		return BadRequest(err)
 	}
 
-	return doApi10Update(d, oldConfig, req)
+	return doApi10Update(d, req, false)
 }
 
 func api10Patch(d *Daemon, r *http.Request) Response {
-	var oldConfig map[string]string
-	err := d.cluster.Transaction(func(tx *db.ClusterTx) error {
-		var err error
-		oldConfig, err = tx.Config()
-		return err
-	})
-	if err != nil {
-		return SmartError(err)
-	}
-
 	render, err := daemonConfigRender(d.State())
 	if err != nil {
 		return InternalError(err)
@@ -220,17 +199,10 @@ func api10Patch(d *Daemon, r *http.Request) Response {
 		return EmptySyncResponse
 	}
 
-	for k, v := range oldConfig {
-		_, ok := req.Config[k]
-		if !ok {
-			req.Config[k] = v
-		}
-	}
-
-	return doApi10Update(d, oldConfig, req)
+	return doApi10Update(d, req, true)
 }
 
-func doApi10Update(d *Daemon, oldConfig map[string]string, req api.ServerPut) Response {
+func doApi10Update(d *Daemon, req api.ServerPut, patch bool) Response {
 	// The HTTPS address is the only config key that we want to save in the
 	// node-level database, so handle it here.
 	nodeValues := map[string]interface{}{}
@@ -254,51 +226,55 @@ func doApi10Update(d *Daemon, oldConfig map[string]string, req api.ServerPut) Re
 		return err
 	})
 	if err != nil {
-		return InternalError(err)
-	}
-
-	// Deal with special keys
-	for k, v := range req.Config {
-		config := daemonConfig[k]
-		if config != nil && config.hiddenValue && v == true {
-			req.Config[k] = oldConfig[k]
-		}
-	}
-
-	// Diff the configs
-	changedConfig := map[string]interface{}{}
-	for key, value := range oldConfig {
-		if req.Config[key] != value {
-			changedConfig[key] = req.Config[key]
-		}
-	}
-
-	for key, value := range req.Config {
-		if oldConfig[key] != value {
-			changedConfig[key] = req.Config[key]
+		switch err.(type) {
+		case config.ErrorList:
+			return BadRequest(err)
+		default:
+			return SmartError(err)
 		}
 	}
 
-	for key, valueRaw := range changedConfig {
-		if valueRaw == nil {
-			valueRaw = ""
+	var changed map[string]string
+	err = d.cluster.Transaction(func(tx *db.ClusterTx) error {
+		config, err := cluster.ConfigLoad(tx)
+		if err != nil {
+			return errors.Wrap(err, "failed to load cluster config")
 		}
-
-		s := reflect.ValueOf(valueRaw)
-		if !s.IsValid() || s.Kind() != reflect.String {
-			return BadRequest(fmt.Errorf("Invalid value type for '%s'", key))
+		if patch {
+			changed, err = config.Patch(req.Config)
+		} else {
+			changed, err = config.Replace(req.Config)
 		}
-
-		value := valueRaw.(string)
-
-		confKey, ok := daemonConfig[key]
-		if !ok {
-			return BadRequest(fmt.Errorf("Bad server config key: '%s'", key))
+		return err
+	})
+	if err != nil {
+		switch err.(type) {
+		case config.ErrorList:
+			return BadRequest(err)
+		default:
+			return SmartError(err)
 		}
+	}
 
-		err := confKey.Set(d, value)
-		if err != nil {
-			return SmartError(err)
+	daemonConfigInit(d.cluster)
+
+	for key, value := range changed {
+		switch key {
+		case "core.proxy_http":
+			fallthrough
+		case "core.proxy_https":
+			fallthrough
+		case "core.proxy_ignore_hosts":
+			daemonConfigSetProxy(d, changed)
+		case "core.macaroon.endpoint":
+			err := d.setupExternalAuthentication(value)
+			if err != nil {
+				return SmartError(err)
+			}
+		case "images.auto_update_interval":
+			d.taskAutoUpdate.Reset()
+		case "images.remote_cache_expiry":
+			d.taskPruneImages.Reset()
 		}
 	}
 
diff --git a/lxd/cluster/config.go b/lxd/cluster/config.go
index a716f1448..7cb8634f2 100644
--- a/lxd/cluster/config.go
+++ b/lxd/cluster/config.go
@@ -1,7 +1,14 @@
 package cluster
 
 import (
+	"crypto/rand"
+	"encoding/hex"
 	"fmt"
+	"io"
+	"os/exec"
+	"time"
+
+	"golang.org/x/crypto/scrypt"
 
 	"github.com/lxc/lxd/lxd/config"
 	"github.com/lxc/lxd/lxd/db"
@@ -50,6 +57,17 @@ func (c *Config) HTTPSAllowedCredentials() bool {
 	return c.m.GetBool("core.https_allowed_credentials")
 }
 
+// TrustPassword returns the LXD trust password for authenticating clients.
+func (c *Config) TrustPassword() string {
+	return c.m.GetString("core.trust_password")
+}
+
+// AutoUpdateInterval returns the configured images auto update interval.
+func (c *Config) AutoUpdateInterval() time.Duration {
+	n := c.m.GetInt64("images.auto_update_interval")
+	return time.Duration(n) * time.Hour
+}
+
 // ProxyHTTP returns the configured HTTP proxy, if any.
 func (c *Config) ProxyHTTP() string {
 	return c.m.GetString("core.proxy_http")
@@ -62,12 +80,16 @@ func (c *Config) Dump() map[string]interface{} {
 }
 
 // Replace the current configuration with the given values.
-func (c *Config) Replace(values map[string]interface{}) error {
+//
+// Return what has actually changed.
+func (c *Config) Replace(values map[string]interface{}) (map[string]string, error) {
 	return c.update(values)
 }
 
 // Patch changes only the configuration keys in the given map.
-func (c *Config) Patch(patch map[string]interface{}) error {
+//
+// Return what has actually changed.
+func (c *Config) Patch(patch map[string]interface{}) (map[string]string, error) {
 	values := c.Dump() // Use current values as defaults
 	for name, value := range patch {
 		values[name] = value
@@ -75,18 +97,18 @@ func (c *Config) Patch(patch map[string]interface{}) error {
 	return c.update(values)
 }
 
-func (c *Config) update(values map[string]interface{}) error {
+func (c *Config) update(values map[string]interface{}) (map[string]string, error) {
 	changed, err := c.m.Change(values)
 	if err != nil {
-		return fmt.Errorf("invalid configuration changes: %s", err)
+		return nil, err
 	}
 
 	err = c.tx.UpdateConfig(changed)
 	if err != nil {
-		return fmt.Errorf("cannot persist confiuration changes: %v", err)
+		return nil, fmt.Errorf("cannot persist confiuration changes: %v", err)
 	}
 
-	return nil
+	return changed, nil
 }
 
 // ConfigSchema defines available server configuration keys.
@@ -98,11 +120,11 @@ var ConfigSchema = config.Schema{
 	"core.proxy_http":                {},
 	"core.proxy_https":               {},
 	"core.proxy_ignore_hosts":        {},
-	"core.trust_password":            {Hidden: true},
+	"core.trust_password":            {Hidden: true, Setter: passwordSetter},
 	"core.macaroon.endpoint":         {},
 	"images.auto_update_cached":      {Type: config.Bool, Default: "true"},
 	"images.auto_update_interval":    {Type: config.Int64, Default: "6"},
-	"images.compression_algorithm":   {Default: "gzip"},
+	"images.compression_algorithm":   {Default: "gzip", Validator: validateCompression},
 	"images.remote_cache_expiry":     {Type: config.Int64, Default: "10"},
 
 	// Keys deprecated since the implementation of the storage api.
@@ -116,6 +138,39 @@ var ConfigSchema = config.Schema{
 	"storage.zfs_use_refquota":     {Setter: deprecatedStorage, Type: config.Bool},
 }
 
+func passwordSetter(value string) (string, error) {
+	// Nothing to do on unset
+	if value == "" {
+		return value, nil
+	}
+
+	// Hash the password
+	buf := make([]byte, 32)
+	_, err := io.ReadFull(rand.Reader, buf)
+	if err != nil {
+		return "", err
+	}
+
+	hash, err := scrypt.Key([]byte(value), buf, 1<<14, 8, 1, 64)
+	if err != nil {
+		return "", err
+	}
+
+	buf = append(buf, hash...)
+	value = hex.EncodeToString(buf)
+
+	return value, nil
+}
+
+func validateCompression(value string) error {
+	if value == "none" {
+		return nil
+	}
+
+	_, err := exec.LookPath(value)
+	return err
+}
+
 func deprecatedStorage(value string) (string, error) {
 	if value == "" {
 		return "", nil
diff --git a/lxd/cluster/config_test.go b/lxd/cluster/config_test.go
index 0da3979d8..ee67ac3d1 100644
--- a/lxd/cluster/config_test.go
+++ b/lxd/cluster/config_test.go
@@ -58,10 +58,11 @@ func TestConfig_ReplaceDeleteValues(t *testing.T) {
 	config, err := cluster.ConfigLoad(tx)
 	require.NoError(t, err)
 
-	err = config.Replace(map[string]interface{}{"core.proxy_http": "foo.bar"})
+	changed, err := config.Replace(map[string]interface{}{"core.proxy_http": "foo.bar"})
 	assert.NoError(t, err)
+	assert.Equal(t, map[string]string{"core.proxy_http": "foo.bar"}, changed)
 
-	err = config.Replace(map[string]interface{}{})
+	_, err = config.Replace(map[string]interface{}{})
 	assert.NoError(t, err)
 
 	assert.Equal(t, "", config.ProxyHTTP())
@@ -80,10 +81,10 @@ func TestConfig_PatchKeepsValues(t *testing.T) {
 	config, err := cluster.ConfigLoad(tx)
 	require.NoError(t, err)
 
-	err = config.Replace(map[string]interface{}{"core.proxy_http": "foo.bar"})
+	_, err = config.Replace(map[string]interface{}{"core.proxy_http": "foo.bar"})
 	assert.NoError(t, err)
 
-	err = config.Patch(map[string]interface{}{})
+	_, err = config.Patch(map[string]interface{}{})
 	assert.NoError(t, err)
 
 	assert.Equal(t, "foo.bar", config.ProxyHTTP())
diff --git a/lxd/daemon_config.go b/lxd/daemon_config.go
index b049f1589..3417d8edd 100644
--- a/lxd/daemon_config.go
+++ b/lxd/daemon_config.go
@@ -1,17 +1,12 @@
 package main
 
 import (
-	"crypto/rand"
-	"encoding/hex"
 	"fmt"
-	"io"
-	"os/exec"
 	"strconv"
 	"strings"
 	"sync"
 
 	log "github.com/lxc/lxd/shared/log15"
-	"golang.org/x/crypto/scrypt"
 
 	"github.com/lxc/lxd/lxd/cluster"
 	dbapi "github.com/lxc/lxd/lxd/db"
@@ -96,53 +91,6 @@ func (k *daemonConfigKey) Validate(d *Daemon, value string) error {
 	return nil
 }
 
-func (k *daemonConfigKey) Set(d *Daemon, value string) error {
-	var name string
-
-	// Check if we are actually changing things
-	oldValue := k.currentValue
-	if oldValue == value {
-		return nil
-	}
-
-	// Validate the new value
-	err := k.Validate(d, value)
-	if err != nil {
-		return err
-	}
-
-	// Run external setting function
-	if k.setter != nil {
-		value, err = k.setter(d, k.name(), value)
-		if err != nil {
-			return err
-		}
-	}
-
-	// Get the configuration key and make sure daemonConfig is sane
-	name = k.name()
-	if name == "" {
-		return fmt.Errorf("Corrupted configuration cache")
-	}
-
-	// Actually apply the change
-	daemonConfigLock.Lock()
-	k.currentValue = value
-	daemonConfigLock.Unlock()
-
-	err = dbapi.ConfigValueSet(d.cluster, name, value)
-	if err != nil {
-		return err
-	}
-
-	// Run the trigger (if any)
-	if k.trigger != nil {
-		k.trigger(d, k.name(), value)
-	}
-
-	return nil
-}
-
 func (k *daemonConfigKey) Get() string {
 	value := k.currentValue
 
@@ -182,16 +130,16 @@ func (k *daemonConfigKey) GetInt64() int64 {
 func daemonConfigInit(cluster *dbapi.Cluster) error {
 	// Set all the keys
 	daemonConfig = map[string]*daemonConfigKey{
-		"core.proxy_http":         {valueType: "string", setter: daemonConfigSetProxy},
-		"core.proxy_https":        {valueType: "string", setter: daemonConfigSetProxy},
-		"core.proxy_ignore_hosts": {valueType: "string", setter: daemonConfigSetProxy},
-		"core.trust_password":     {valueType: "string", hiddenValue: true, setter: daemonConfigSetPassword},
-		"core.macaroon.endpoint":  {valueType: "string", setter: daemonConfigSetMacaroonEndpoint},
+		"core.proxy_http":         {valueType: "string"},
+		"core.proxy_https":        {valueType: "string"},
+		"core.proxy_ignore_hosts": {valueType: "string"},
+		"core.trust_password":     {valueType: "string", hiddenValue: true},
+		"core.macaroon.endpoint":  {valueType: "string"},
 
 		"images.auto_update_cached":    {valueType: "bool", defaultValue: "true"},
-		"images.auto_update_interval":  {valueType: "int", defaultValue: "6", trigger: daemonConfigTriggerAutoUpdateInterval},
-		"images.compression_algorithm": {valueType: "string", validator: daemonConfigValidateCompression, defaultValue: "gzip"},
-		"images.remote_cache_expiry":   {valueType: "int", defaultValue: "10", trigger: daemonConfigTriggerExpiry},
+		"images.auto_update_interval":  {valueType: "int", defaultValue: "6"},
+		"images.compression_algorithm": {valueType: "string", defaultValue: "gzip"},
+		"images.remote_cache_expiry":   {valueType: "int", defaultValue: "10"},
 	}
 
 	// Load the values from the DB
@@ -255,49 +203,7 @@ func daemonConfigRender(state *state.State) (map[string]interface{}, error) {
 	return config, nil
 }
 
-func daemonConfigSetPassword(d *Daemon, key string, value string) (string, error) {
-	// Nothing to do on unset
-	if value == "" {
-		return value, nil
-	}
-
-	// Hash the password
-	buf := make([]byte, 32)
-	_, err := io.ReadFull(rand.Reader, buf)
-	if err != nil {
-		return "", err
-	}
-
-	hash, err := scrypt.Key([]byte(value), buf, 1<<14, 8, 1, 64)
-	if err != nil {
-		return "", err
-	}
-
-	buf = append(buf, hash...)
-	value = hex.EncodeToString(buf)
-
-	return value, nil
-}
-
-func daemonConfigSetMacaroonEndpoint(d *Daemon, key string, value string) (string, error) {
-	err := d.setupExternalAuthentication(value)
-	if err != nil {
-		return "", err
-	}
-
-	return value, nil
-}
-
-func daemonConfigSetProxy(d *Daemon, key string, value string) (string, error) {
-	// Get the current config
-	config := map[string]string{}
-	config["core.proxy_https"] = daemonConfig["core.proxy_https"].Get()
-	config["core.proxy_http"] = daemonConfig["core.proxy_http"].Get()
-	config["core.proxy_ignore_hosts"] = daemonConfig["core.proxy_ignore_hosts"].Get()
-
-	// Apply the change
-	config[key] = value
-
+func daemonConfigSetProxy(d *Daemon, config map[string]string) {
 	// Update the cached proxy function
 	d.proxy = shared.ProxyFromConfig(
 		config["core.proxy_https"],
@@ -311,25 +217,4 @@ func daemonConfigSetProxy(d *Daemon, key string, value string) (string, error) {
 		delete(imageStreamCache, k)
 	}
 	imageStreamCacheLock.Unlock()
-
-	return value, nil
-}
-
-func daemonConfigTriggerExpiry(d *Daemon, key string, value string) {
-	// Trigger an image pruning run
-	d.taskPruneImages.Reset()
-}
-
-func daemonConfigTriggerAutoUpdateInterval(d *Daemon, key string, value string) {
-	// Reset the auto-update interval loop
-	d.taskAutoUpdate.Reset()
-}
-
-func daemonConfigValidateCompression(d *Daemon, key string, value string) error {
-	if value == "none" {
-		return nil
-	}
-
-	_, err := exec.LookPath(value)
-	return err
 }
diff --git a/lxd/daemon_test.go b/lxd/daemon_test.go
deleted file mode 100644
index 9ce47e92b..000000000
--- a/lxd/daemon_test.go
+++ /dev/null
@@ -1,43 +0,0 @@
-package main
-
-import (
-	"testing"
-
-	"github.com/stretchr/testify/suite"
-)
-
-type daemonTestSuite struct {
-	lxdTestSuite
-}
-
-func (suite *daemonTestSuite) Test_config_value_set_empty_removes_val() {
-	var err error
-	d := suite.d
-
-	err = daemonConfig["core.trust_password"].Set(d, "foo")
-	suite.Req.Nil(err)
-
-	val := daemonConfig["core.trust_password"].Get()
-	suite.Req.Equal(len(val), 192)
-
-	valMap, err := daemonConfigRender(d.State())
-	suite.Req.NoError(err)
-	value, present := valMap["core.trust_password"]
-	suite.Req.True(present)
-	suite.Req.Equal(value, true)
-
-	err = daemonConfig["core.trust_password"].Set(d, "")
-	suite.Req.Nil(err)
-
-	val = daemonConfig["core.trust_password"].Get()
-	suite.Req.Equal(val, "")
-
-	valMap, err = daemonConfigRender(d.State())
-	suite.Req.NoError(err)
-	_, present = valMap["core.trust_password"]
-	suite.Req.False(present)
-}
-
-func TestDaemonTestSuite(t *testing.T) {
-	suite.Run(t, new(daemonTestSuite))
-}
diff --git a/lxd/main_init_test.go b/lxd/main_init_test.go
index 8ea05500f..94dce4347 100644
--- a/lxd/main_init_test.go
+++ b/lxd/main_init_test.go
@@ -6,8 +6,11 @@ import (
 	"path/filepath"
 	"strconv"
 	"testing"
+	"time"
 
 	"github.com/lxc/lxd/client"
+	"github.com/lxc/lxd/lxd/cluster"
+	"github.com/lxc/lxd/lxd/db"
 	"github.com/lxc/lxd/lxd/node"
 	"github.com/lxc/lxd/lxd/util"
 
@@ -93,8 +96,13 @@ func (suite *cmdInitTestSuite) TestCmdInit_PreseedHTTPSAddressAndTrustPassword()
 	address, err := node.HTTPSAddress(suite.d.db)
 	suite.Req.NoError(err)
 	suite.Req.Equal(fmt.Sprintf("127.0.0.1:%d", port), address)
-	secret := daemonConfig["core.trust_password"].Get()
-	suite.Req.Nil(util.PasswordCheck(secret, "sekret"))
+	err = suite.d.cluster.Transaction(func(tx *db.ClusterTx) error {
+		config, err := cluster.ConfigLoad(tx)
+		suite.Req.NoError(err)
+		suite.Req.Nil(util.PasswordCheck(config.TrustPassword(), "sekret"))
+		return nil
+	})
+	suite.Req.NoError(err)
 }
 
 // Input network address and trust password interactively.
@@ -116,8 +124,13 @@ func (suite *cmdInitTestSuite) TestCmdInit_InteractiveHTTPSAddressAndTrustPasswo
 	address, err := node.HTTPSAddress(suite.d.db)
 	suite.Req.NoError(err)
 	suite.Req.Equal(fmt.Sprintf("127.0.0.1:%d", port), address)
-	secret := daemonConfig["core.trust_password"].Get()
-	suite.Req.Nil(util.PasswordCheck(secret, "sekret"))
+	err = suite.d.cluster.Transaction(func(tx *db.ClusterTx) error {
+		config, err := cluster.ConfigLoad(tx)
+		suite.Req.NoError(err)
+		suite.Req.Nil(util.PasswordCheck(config.TrustPassword(), "sekret"))
+		return nil
+	})
+	suite.Req.NoError(err)
 }
 
 // Enable clustering interactively.
@@ -155,8 +168,13 @@ func (suite *cmdInitTestSuite) TestCmdInit_AutoHTTPSAddressAndTrustPassword() {
 	address, err := node.HTTPSAddress(suite.d.db)
 	suite.Req.NoError(err)
 	suite.Req.Equal(fmt.Sprintf("127.0.0.1:%d", port), address)
-	secret := daemonConfig["core.trust_password"].Get()
-	suite.Req.Nil(util.PasswordCheck(secret, "sekret"))
+	err = suite.d.cluster.Transaction(func(tx *db.ClusterTx) error {
+		config, err := cluster.ConfigLoad(tx)
+		suite.Req.NoError(err)
+		suite.Req.Nil(util.PasswordCheck(config.TrustPassword(), "sekret"))
+		return nil
+	})
+	suite.Req.NoError(err)
 }
 
 // The images auto-update interval can be interactively set by simply accepting
@@ -169,15 +187,25 @@ func (suite *cmdInitTestSuite) TestCmdInit_ImagesAutoUpdateAnswerYes() {
 
 	suite.Req.Nil(suite.command.Run())
 
-	key, _ := daemonConfig["images.auto_update_interval"]
-	suite.Req.Equal("6", key.Get())
+	err := suite.d.cluster.Transaction(func(tx *db.ClusterTx) error {
+		config, err := cluster.ConfigLoad(tx)
+		suite.Req.NoError(err)
+		suite.Req.Equal(6*time.Hour, config.AutoUpdateInterval())
+		return nil
+	})
+	suite.Req.NoError(err)
 }
 
 // If the images auto-update interval value is already set to non-zero, it
 // won't be overwritten.
 func (suite *cmdInitTestSuite) TestCmdInit_ImagesAutoUpdateNoOverwrite() {
-	key, _ := daemonConfig["images.auto_update_interval"]
-	err := key.Set(suite.d, "10")
+	err := suite.d.cluster.Transaction(func(tx *db.ClusterTx) error {
+		config, err := cluster.ConfigLoad(tx)
+		suite.Req.NoError(err)
+		_, err = config.Patch(map[string]interface{}{"images.auto_update_interval": "10"})
+		suite.Req.NoError(err)
+		return nil
+	})
 	suite.Req.Nil(err)
 
 	answers := &cmdInitAnswers{
@@ -187,7 +215,13 @@ func (suite *cmdInitTestSuite) TestCmdInit_ImagesAutoUpdateNoOverwrite() {
 
 	suite.Req.Nil(suite.command.Run())
 
-	suite.Req.Equal("10", key.Get())
+	err = suite.d.cluster.Transaction(func(tx *db.ClusterTx) error {
+		config, err := cluster.ConfigLoad(tx)
+		suite.Req.NoError(err)
+		suite.Req.Equal(10*time.Hour, config.AutoUpdateInterval())
+		return nil
+	})
+	suite.Req.NoError(err)
 }
 
 // If an invalid backend type is passed with --storage-backend, an
@@ -243,15 +277,26 @@ func (suite *cmdInitTestSuite) TestCmdInit_ImagesAutoUpdateAnswerNo() {
 
 	suite.Req.Nil(suite.command.Run())
 
-	key, _ := daemonConfig["images.auto_update_interval"]
-	suite.Req.Equal("0", key.Get())
+	err := suite.d.cluster.Transaction(func(tx *db.ClusterTx) error {
+		config, err := cluster.ConfigLoad(tx)
+		suite.Req.NoError(err)
+		suite.Req.Equal(time.Duration(0), config.AutoUpdateInterval())
+		return nil
+	})
+	suite.Req.NoError(err)
 }
 
 // If the user answers "no" to the images auto-update question, the value will
 // be set to 0, even it was already set to some value.
 func (suite *cmdInitTestSuite) TestCmdInit_ImagesAutoUpdateOverwriteIfZero() {
-	key, _ := daemonConfig["images.auto_update_interval"]
-	key.Set(suite.d, "10")
+	err := suite.d.cluster.Transaction(func(tx *db.ClusterTx) error {
+		config, err := cluster.ConfigLoad(tx)
+		suite.Req.NoError(err)
+		_, err = config.Patch(map[string]interface{}{"images.auto_update_interval": "10"})
+		suite.Req.NoError(err)
+		return nil
+	})
+	suite.Req.Nil(err)
 
 	answers := &cmdInitAnswers{
 		WantImageAutoUpdate: false,
@@ -259,7 +304,14 @@ func (suite *cmdInitTestSuite) TestCmdInit_ImagesAutoUpdateOverwriteIfZero() {
 	answers.Render(suite.streams)
 
 	suite.Req.Nil(suite.command.Run())
-	suite.Req.Equal("0", key.Get())
+
+	err = suite.d.cluster.Transaction(func(tx *db.ClusterTx) error {
+		config, err := cluster.ConfigLoad(tx)
+		suite.Req.NoError(err)
+		suite.Req.Equal(time.Duration(0), config.AutoUpdateInterval())
+		return nil
+	})
+	suite.Req.NoError(err)
 }
 
 // Preseed the image auto-update interval.
@@ -270,8 +322,13 @@ func (suite *cmdInitTestSuite) TestCmdInit_ImagesAutoUpdatePreseed() {
 `)
 	suite.Req.Nil(suite.command.Run())
 
-	key, _ := daemonConfig["images.auto_update_interval"]
-	suite.Req.Equal("15", key.Get())
+	err := suite.d.cluster.Transaction(func(tx *db.ClusterTx) error {
+		config, err := cluster.ConfigLoad(tx)
+		suite.Req.NoError(err)
+		suite.Req.Equal(15*time.Hour, config.AutoUpdateInterval())
+		return nil
+	})
+	suite.Req.NoError(err)
 }
 
 // If --storage-backend is set to "dir" a storage pool is created.
diff --git a/lxd/node/config.go b/lxd/node/config.go
index 26f06004d..68cb5dd9c 100644
--- a/lxd/node/config.go
+++ b/lxd/node/config.go
@@ -75,7 +75,7 @@ func HTTPSAddress(node *db.Node) (string, error) {
 func (c *Config) update(values map[string]interface{}) error {
 	changed, err := c.m.Change(values)
 	if err != nil {
-		return fmt.Errorf("invalid configuration changes: %s", err)
+		return err
 	}
 
 	err = c.tx.UpdateConfig(changed)
diff --git a/lxd/patches.go b/lxd/patches.go
index f471cd70e..e389567c9 100644
--- a/lxd/patches.go
+++ b/lxd/patches.go
@@ -284,7 +284,7 @@ func patchStorageApi(name string, d *Daemon) error {
 		if err != nil {
 			return err
 		}
-		return config.Patch(map[string]interface{}{
+		_, err = config.Patch(map[string]interface{}{
 			"storage.lvm_fstype":           "",
 			"storage.lvm_mount_options":    "",
 			"storage.lvm_thinpool_name":    "",
@@ -294,6 +294,7 @@ func patchStorageApi(name string, d *Daemon) error {
 			"storage.zfs_remove_snapshots": "",
 			"storage.zfs_use_refquota":     "",
 		})
+		return err
 	})
 	if err != nil {
 		return err

From 758a865b208ee008b61ea1e455f04b3e11a40e67 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Thu, 19 Oct 2017 15:10:56 +0000
Subject: [PATCH 043/116] Drop the daemonConfig  cache and access the db
 directly instead

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/api_1.0.go         |  16 +++--
 lxd/api_cluster.go     |   5 +-
 lxd/certificates.go    |   8 ++-
 lxd/cluster/config.go  |  71 +++++++++++++++++++++
 lxd/containers_post.go |   9 ++-
 lxd/daemon.go          |  37 ++++++-----
 lxd/daemon_config.go   | 166 +------------------------------------------------
 lxd/daemon_images.go   |   6 +-
 lxd/images.go          |  45 +++++++++++---
 lxd/main_init_test.go  |   5 +-
 10 files changed, 167 insertions(+), 201 deletions(-)

diff --git a/lxd/api_1.0.go b/lxd/api_1.0.go
index b63524e5c..4486ce5fa 100644
--- a/lxd/api_1.0.go
+++ b/lxd/api_1.0.go
@@ -63,8 +63,18 @@ var api10 = []Command{
 
 func api10Get(d *Daemon, r *http.Request) Response {
 	authMethods := []string{"tls"}
-	if daemonConfig["core.macaroon.endpoint"].Get() != "" {
-		authMethods = append(authMethods, "macaroons")
+	err := d.cluster.Transaction(func(tx *db.ClusterTx) error {
+		config, err := cluster.ConfigLoad(tx)
+		if err != nil {
+			return err
+		}
+		if config.MacaroonEndpoint() != "" {
+			authMethods = append(authMethods, "macaroons")
+		}
+		return nil
+	})
+	if err != nil {
+		return SmartError(err)
 	}
 	srv := api.ServerUntrusted{
 		APIExtensions: version.APIExtensions,
@@ -256,8 +266,6 @@ func doApi10Update(d *Daemon, req api.ServerPut, patch bool) Response {
 		}
 	}
 
-	daemonConfigInit(d.cluster)
-
 	for key, value := range changed {
 		switch key {
 		case "core.proxy_http":
diff --git a/lxd/api_cluster.go b/lxd/api_cluster.go
index 00ff4a0ff..c00d763c5 100644
--- a/lxd/api_cluster.go
+++ b/lxd/api_cluster.go
@@ -71,7 +71,10 @@ func clusterPostBootstrap(d *Daemon, req api.ClusterPost) Response {
 func clusterPostAccept(d *Daemon, req api.ClusterPost) Response {
 	// Accepting a node requires the client to provide the correct
 	// trust password.
-	secret := daemonConfig["core.trust_password"].Get()
+	secret, err := cluster.ConfigGetString(d.cluster, "core.trust_password")
+	if err != nil {
+		return SmartError(err)
+	}
 	if util.PasswordCheck(secret, req.TargetPassword) != nil {
 		return Forbidden
 	}
diff --git a/lxd/certificates.go b/lxd/certificates.go
index 97dba38a7..fd3c2ea6c 100644
--- a/lxd/certificates.go
+++ b/lxd/certificates.go
@@ -11,6 +11,7 @@ import (
 
 	"github.com/gorilla/mux"
 
+	"github.com/lxc/lxd/lxd/cluster"
 	"github.com/lxc/lxd/lxd/db"
 	"github.com/lxc/lxd/lxd/util"
 	"github.com/lxc/lxd/shared"
@@ -98,7 +99,10 @@ func certificatesPost(d *Daemon, r *http.Request) Response {
 	}
 
 	// Access check
-	secret := daemonConfig["core.trust_password"].Get()
+	secret, err := cluster.ConfigGetString(d.cluster, "core.trust_password")
+	if err != nil {
+		return SmartError(err)
+	}
 	if d.checkTrustedClient(r) != nil && util.PasswordCheck(secret, req.Password) != nil {
 		return Forbidden
 	}
@@ -144,7 +148,7 @@ func certificatesPost(d *Daemon, r *http.Request) Response {
 		}
 	}
 
-	err := saveCert(d.db, name, cert)
+	err = saveCert(d.db, name, cert)
 	if err != nil {
 		return SmartError(err)
 	}
diff --git a/lxd/cluster/config.go b/lxd/cluster/config.go
index 7cb8634f2..a7dbfcb67 100644
--- a/lxd/cluster/config.go
+++ b/lxd/cluster/config.go
@@ -62,17 +62,39 @@ func (c *Config) TrustPassword() string {
 	return c.m.GetString("core.trust_password")
 }
 
+// MacaroonEndpoint returns the address of the macaroon endpoint to use for
+// authentication, if any.
+func (c *Config) MacaroonEndpoint() string {
+	return c.m.GetString("core.macaroon.endpoint")
+}
+
 // AutoUpdateInterval returns the configured images auto update interval.
 func (c *Config) AutoUpdateInterval() time.Duration {
 	n := c.m.GetInt64("images.auto_update_interval")
 	return time.Duration(n) * time.Hour
 }
 
+// RemoteCacheExpiry returns the configured expiration value for remote images
+// expiration.
+func (c *Config) RemoteCacheExpiry() int64 {
+	return c.m.GetInt64("images.remote_cache_expiry")
+}
+
+// ProxyHTTPS returns the configured HTTPS proxy, if any.
+func (c *Config) ProxyHTTPS() string {
+	return c.m.GetString("core.proxy_https")
+}
+
 // ProxyHTTP returns the configured HTTP proxy, if any.
 func (c *Config) ProxyHTTP() string {
 	return c.m.GetString("core.proxy_http")
 }
 
+// ProxyIgnoreHosts returns the configured ignore-hosts proxy setting, if any.
+func (c *Config) ProxyIgnoreHosts() string {
+	return c.m.GetString("core.proxy_ignore_hosts")
+}
+
 // Dump current configuration keys and their values. Keys with values matching
 // their defaults are omitted.
 func (c *Config) Dump() map[string]interface{} {
@@ -111,6 +133,55 @@ func (c *Config) update(values map[string]interface{}) (map[string]string, error
 	return changed, nil
 }
 
+// ConfigGetString is a convenience for loading the cluster configuration and
+// returning the value of a particular key.
+//
+// It's a deprecated API meant to be used by call sites that are not
+// interacting with the database in a transactional way.
+func ConfigGetString(cluster *db.Cluster, key string) (string, error) {
+	config, err := configGet(cluster)
+	if err != nil {
+		return "", err
+	}
+	return config.m.GetString(key), nil
+}
+
+// ConfigGetBool is a convenience for loading the cluster configuration and
+// returning the value of a particular boolean key.
+//
+// It's a deprecated API meant to be used by call sites that are not
+// interacting with the database in a transactional way.
+func ConfigGetBool(cluster *db.Cluster, key string) (bool, error) {
+	config, err := configGet(cluster)
+	if err != nil {
+		return false, err
+	}
+	return config.m.GetBool(key), nil
+}
+
+// ConfigGetInt64 is a convenience for loading the cluster configuration and
+// returning the value of a particular key.
+//
+// It's a deprecated API meant to be used by call sites that are not
+// interacting with the database in a transactional way.
+func ConfigGetInt64(cluster *db.Cluster, key string) (int64, error) {
+	config, err := configGet(cluster)
+	if err != nil {
+		return 0, err
+	}
+	return config.m.GetInt64(key), nil
+}
+
+func configGet(cluster *db.Cluster) (*Config, error) {
+	var config *Config
+	err := cluster.Transaction(func(tx *db.ClusterTx) error {
+		var err error
+		config, err = ConfigLoad(tx)
+		return err
+	})
+	return config, err
+}
+
 // ConfigSchema defines available server configuration keys.
 var ConfigSchema = config.Schema{
 	"core.https_allowed_headers":     {},
diff --git a/lxd/containers_post.go b/lxd/containers_post.go
index 76d2a615a..5ac3e0b34 100644
--- a/lxd/containers_post.go
+++ b/lxd/containers_post.go
@@ -11,6 +11,7 @@ import (
 	"github.com/dustinkirkland/golang-petname"
 	"github.com/gorilla/websocket"
 
+	"github.com/lxc/lxd/lxd/cluster"
 	"github.com/lxc/lxd/lxd/db"
 	"github.com/lxc/lxd/lxd/types"
 	"github.com/lxc/lxd/shared"
@@ -96,9 +97,13 @@ func createFromImage(d *Daemon, req *api.ContainersPost) Response {
 
 		var info *api.Image
 		if req.Source.Server != "" {
+			autoUpdate, err := cluster.ConfigGetBool(d.cluster, "images.auto_update_cached")
+			if err != nil {
+				return err
+			}
 			info, err = d.ImageDownload(
-				op, req.Source.Server, req.Source.Protocol, req.Source.Certificate, req.Source.Secret,
-				hash, true, daemonConfig["images.auto_update_cached"].GetBool(), "", true)
+				op, req.Source.Server, req.Source.Protocol, req.Source.Certificate,
+				req.Source.Secret, hash, true, autoUpdate, "", true)
 			if err != nil {
 				return err
 			}
diff --git a/lxd/daemon.go b/lxd/daemon.go
index 030440b92..f1b8e9bf9 100644
--- a/lxd/daemon.go
+++ b/lxd/daemon.go
@@ -431,12 +431,6 @@ func (d *Daemon) init() error {
 		}
 	}
 
-	/* Load all config values from the database */
-	err = daemonConfigInit(d.cluster)
-	if err != nil {
-		return err
-	}
-
 	/* Read the storage pools */
 	err = SetupStorageDriver(d.State(), false)
 	if err != nil {
@@ -464,12 +458,26 @@ func (d *Daemon) init() error {
 	/* Log expiry */
 	d.tasks.Add(expireLogsTask(d.State()))
 
-	/* set the initial proxy function based on config values in the DB */
-	d.proxy = shared.ProxyFromConfig(
-		daemonConfig["core.proxy_https"].Get(),
-		daemonConfig["core.proxy_http"].Get(),
-		daemonConfig["core.proxy_ignore_hosts"].Get(),
-	)
+	/* set the initial proxy function and external auth based on config values in the DB */
+	macaroonEndpoint := ""
+	err = d.cluster.Transaction(func(tx *db.ClusterTx) error {
+		config, err := cluster.ConfigLoad(tx)
+		if err != nil {
+			return err
+		}
+		d.proxy = shared.ProxyFromConfig(
+			config.ProxyHTTPS(), config.ProxyHTTP(), config.ProxyIgnoreHosts(),
+		)
+		macaroonEndpoint = config.MacaroonEndpoint()
+		return nil
+	})
+	if err != nil {
+		return err
+	}
+	err = d.setupExternalAuthentication(macaroonEndpoint)
+	if err != nil {
+		return err
+	}
 
 	if !d.os.MockMode {
 		/* Start the scheduler */
@@ -477,11 +485,6 @@ func (d *Daemon) init() error {
 		readSavedClientCAList(d)
 	}
 
-	err = d.setupExternalAuthentication(daemonConfig["core.macaroon.endpoint"].Get())
-	if err != nil {
-		return err
-	}
-
 	close(d.setupChan)
 
 	// Run the post initialization actions
diff --git a/lxd/daemon_config.go b/lxd/daemon_config.go
index 3417d8edd..3f0371c87 100644
--- a/lxd/daemon_config.go
+++ b/lxd/daemon_config.go
@@ -1,178 +1,18 @@
 package main
 
 import (
-	"fmt"
-	"strconv"
-	"strings"
-	"sync"
-
-	log "github.com/lxc/lxd/shared/log15"
-
 	"github.com/lxc/lxd/lxd/cluster"
-	dbapi "github.com/lxc/lxd/lxd/db"
+	"github.com/lxc/lxd/lxd/db"
 	"github.com/lxc/lxd/lxd/node"
 	"github.com/lxc/lxd/lxd/state"
 	"github.com/lxc/lxd/shared"
-	"github.com/lxc/lxd/shared/logger"
 )
 
-var daemonConfigLock sync.Mutex
-var daemonConfig map[string]*daemonConfigKey
-
-type daemonConfigKey struct {
-	valueType    string
-	defaultValue string
-	validValues  []string
-	currentValue string
-	hiddenValue  bool
-
-	validator func(d *Daemon, key string, value string) error
-	setter    func(d *Daemon, key string, value string) (string, error)
-	trigger   func(d *Daemon, key string, value string)
-}
-
-func (k *daemonConfigKey) name() string {
-	name := ""
-
-	// Look for a matching entry in daemonConfig
-	daemonConfigLock.Lock()
-	for key, value := range daemonConfig {
-		if value == k {
-			name = key
-			break
-		}
-	}
-	daemonConfigLock.Unlock()
-
-	return name
-}
-
-func (k *daemonConfigKey) Validate(d *Daemon, value string) error {
-	// Handle unsetting
-	if value == "" {
-		value = k.defaultValue
-
-		if k.validator != nil {
-			err := k.validator(d, k.name(), value)
-			if err != nil {
-				return err
-			}
-		}
-
-		return nil
-	}
-
-	// Validate booleans
-	if k.valueType == "bool" && !shared.StringInSlice(strings.ToLower(value), []string{"true", "false", "1", "0", "yes", "no", "on", "off"}) {
-		return fmt.Errorf("Invalid value for a boolean: %s", value)
-	}
-
-	// Validate integers
-	if k.valueType == "int" {
-		_, err := strconv.ParseInt(value, 10, 64)
-		if err != nil {
-			return err
-		}
-	}
-
-	// Check against valid values
-	if k.validValues != nil && !shared.StringInSlice(value, k.validValues) {
-		return fmt.Errorf("Invalid value, only the following values are allowed: %s", k.validValues)
-	}
-
-	// Run external validation function
-	if k.validator != nil {
-		err := k.validator(d, k.name(), value)
-		if err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (k *daemonConfigKey) Get() string {
-	value := k.currentValue
-
-	// Get the default value if not set
-	if value == "" {
-		value = k.defaultValue
-	}
-
-	return value
-}
-
-func (k *daemonConfigKey) GetBool() bool {
-	value := k.currentValue
-
-	// Get the default value if not set
-	if value == "" {
-		value = k.defaultValue
-	}
-
-	// Convert to boolean
-	return shared.IsTrue(value)
-}
-
-func (k *daemonConfigKey) GetInt64() int64 {
-	value := k.currentValue
-
-	// Get the default value if not set
-	if value == "" {
-		value = k.defaultValue
-	}
-
-	// Convert to int64
-	ret, _ := strconv.ParseInt(value, 10, 64)
-	return ret
-}
-
-func daemonConfigInit(cluster *dbapi.Cluster) error {
-	// Set all the keys
-	daemonConfig = map[string]*daemonConfigKey{
-		"core.proxy_http":         {valueType: "string"},
-		"core.proxy_https":        {valueType: "string"},
-		"core.proxy_ignore_hosts": {valueType: "string"},
-		"core.trust_password":     {valueType: "string", hiddenValue: true},
-		"core.macaroon.endpoint":  {valueType: "string"},
-
-		"images.auto_update_cached":    {valueType: "bool", defaultValue: "true"},
-		"images.auto_update_interval":  {valueType: "int", defaultValue: "6"},
-		"images.compression_algorithm": {valueType: "string", defaultValue: "gzip"},
-		"images.remote_cache_expiry":   {valueType: "int", defaultValue: "10"},
-	}
-
-	// Load the values from the DB
-	var dbValues map[string]string
-	err := cluster.Transaction(func(tx *dbapi.ClusterTx) error {
-		var err error
-		dbValues, err = tx.Config()
-		return err
-	})
-	if err != nil {
-		return err
-	}
-
-	daemonConfigLock.Lock()
-	for k, v := range dbValues {
-		_, ok := daemonConfig[k]
-		if !ok {
-			logger.Error("Found unknown configuration key in database", log.Ctx{"key": k})
-			continue
-		}
-
-		daemonConfig[k].currentValue = v
-	}
-	daemonConfigLock.Unlock()
-
-	return nil
-}
-
 func daemonConfigRender(state *state.State) (map[string]interface{}, error) {
 	config := map[string]interface{}{}
 
 	// Turn the config into a JSON-compatible map
-	err := state.Cluster.Transaction(func(tx *dbapi.ClusterTx) error {
+	err := state.Cluster.Transaction(func(tx *db.ClusterTx) error {
 		clusterConfig, err := cluster.ConfigLoad(tx)
 		if err != nil {
 			return err
@@ -186,7 +26,7 @@ func daemonConfigRender(state *state.State) (map[string]interface{}, error) {
 		return nil, err
 	}
 
-	err = state.Node.Transaction(func(tx *dbapi.NodeTx) error {
+	err = state.Node.Transaction(func(tx *db.NodeTx) error {
 		nodeConfig, err := node.ConfigLoad(tx)
 		if err != nil {
 			return err
diff --git a/lxd/daemon_images.go b/lxd/daemon_images.go
index abc81a965..d6f3f9e9c 100644
--- a/lxd/daemon_images.go
+++ b/lxd/daemon_images.go
@@ -15,6 +15,7 @@ import (
 	"gopkg.in/yaml.v2"
 
 	"github.com/lxc/lxd/client"
+	"github.com/lxc/lxd/lxd/cluster"
 	"github.com/lxc/lxd/lxd/sys"
 	"github.com/lxc/lxd/lxd/util"
 	"github.com/lxc/lxd/shared"
@@ -231,7 +232,10 @@ func (d *Daemon) ImageDownload(op *operation, server string, protocol string, ce
 	// server/protocol/alias, regardless of whether it's stale or
 	// not (we can assume that it will be not *too* stale since
 	// auto-update is on).
-	interval := daemonConfig["images.auto_update_interval"].GetInt64()
+	interval, err := cluster.ConfigGetInt64(d.cluster, "images.auto_update_interval")
+	if err != nil {
+		return nil, err
+	}
 	if preferCached && interval > 0 && alias != fp {
 		cachedFingerprint, err := d.db.ImageSourceGetCachedFingerprint(server, protocol, alias)
 		if err == nil && cachedFingerprint != fp {
diff --git a/lxd/images.go b/lxd/images.go
index 68fd51f6d..8e6d1078d 100644
--- a/lxd/images.go
+++ b/lxd/images.go
@@ -21,9 +21,11 @@ import (
 	"time"
 
 	"github.com/gorilla/mux"
+	"github.com/pkg/errors"
 	"golang.org/x/net/context"
 	"gopkg.in/yaml.v2"
 
+	"github.com/lxc/lxd/lxd/cluster"
 	"github.com/lxc/lxd/lxd/db"
 	"github.com/lxc/lxd/lxd/state"
 	"github.com/lxc/lxd/lxd/task"
@@ -268,7 +270,10 @@ func imgPostContInfo(d *Daemon, r *http.Request, req api.ImagesPost, builddir st
 	if req.CompressionAlgorithm != "" {
 		compress = req.CompressionAlgorithm
 	} else {
-		compress = daemonConfig["images.compression_algorithm"].Get()
+		compress, err = cluster.ConfigGetString(d.cluster, "images.compression_algorithm")
+		if err != nil {
+			return nil, err
+		}
 	}
 
 	if compress != "none" {
@@ -859,8 +864,19 @@ func autoUpdateImagesTask(d *Daemon) (task.Func, task.Schedule) {
 		autoUpdateImages(ctx, d)
 	}
 	schedule := func() (time.Duration, error) {
-		interval := daemonConfig["images.auto_update_interval"].GetInt64()
-		return time.Duration(interval) * time.Hour, nil
+		var interval time.Duration
+		err := d.cluster.Transaction(func(tx *db.ClusterTx) error {
+			config, err := cluster.ConfigLoad(tx)
+			if err != nil {
+				return errors.Wrap(err, "failed to load cluster configuration")
+			}
+			interval = config.AutoUpdateInterval()
+			return nil
+		})
+		if err != nil {
+			return 0, err
+		}
+		return interval, nil
 	}
 	return f, schedule
 }
@@ -1037,8 +1053,10 @@ func pruneExpiredImagesTask(d *Daemon) (task.Func, task.Schedule) {
 
 	// Skip the first run, and instead run an initial pruning synchronously
 	// before we start updating images later on in the start up process.
-	expiry := daemonConfig["images.remote_cache_expiry"].GetInt64()
-	if expiry > 0 {
+	expiry, err := cluster.ConfigGetInt64(d.cluster, "images.remote_cache_expiry")
+	if err != nil {
+		logger.Error("Unable to fetch cluster configuration", log.Ctx{"err": err})
+	} else if expiry > 0 {
 		pruneExpiredImages(context.Background(), d)
 	}
 	first := true
@@ -1049,7 +1067,11 @@ func pruneExpiredImagesTask(d *Daemon) (task.Func, task.Schedule) {
 			return interval, task.ErrSkip
 		}
 
-		expiry := daemonConfig["images.remote_cache_expiry"].GetInt64()
+		expiry, err := cluster.ConfigGetInt64(d.cluster, "images.remote_cache_expiry")
+		if err != nil {
+			logger.Error("Unable to fetch cluster configuration", log.Ctx{"err": err})
+			return interval, nil
+		}
 
 		// Check if we're supposed to prune at all
 		if expiry <= 0 {
@@ -1063,10 +1085,15 @@ func pruneExpiredImagesTask(d *Daemon) (task.Func, task.Schedule) {
 }
 
 func pruneExpiredImages(ctx context.Context, d *Daemon) {
-	// Get the list of expired images.
-	expiry := daemonConfig["images.remote_cache_expiry"].GetInt64()
-
 	logger.Infof("Pruning expired images")
+
+	expiry, err := cluster.ConfigGetInt64(d.cluster, "images.remote_cache_expiry")
+	if err != nil {
+		logger.Error("Unable to fetch cluster configuration", log.Ctx{"err": err})
+		return
+	}
+
+	// Get the list of expired images.
 	images, err := d.db.ImagesGetExpired(expiry)
 	if err != nil {
 		logger.Error("Unable to retrieve the list of expired images", log.Ctx{"err": err})
diff --git a/lxd/main_init_test.go b/lxd/main_init_test.go
index 94dce4347..66e37810d 100644
--- a/lxd/main_init_test.go
+++ b/lxd/main_init_test.go
@@ -396,8 +396,9 @@ storage_pools:
 	_, _, err = suite.client.GetStoragePool("second")
 	suite.Req.Equal("not found", err.Error())
 
-	key, _ := daemonConfig["images.auto_update_interval"]
-	suite.Req.NotEqual("15", key.Get())
+	interval, err := cluster.ConfigGetInt64(suite.d.cluster, "images.auto_update_interval")
+	suite.Req.NoError(err)
+	suite.Req.NotEqual(int64(15), interval)
 }
 
 // Updating a storage pool via preseed will fail, since it's not supported

From ef449efe9b7e600767cb4798b6c01e14b18e4947 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Fri, 20 Oct 2017 13:13:28 +0000
Subject: [PATCH 044/116] Add more clustering-related integration tests

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/.dir-locals.el             |   2 +-
 lxd/api_cluster_test.go        | 186 +++++++++++++++++++++++++++++++++++++++--
 lxd/daemon.go                  |  19 +++--
 lxd/daemon_integration_test.go |  20 +++++
 shared/logging/testing.go      |   1 +
 5 files changed, 215 insertions(+), 13 deletions(-)

diff --git a/lxd/.dir-locals.el b/lxd/.dir-locals.el
index 315bd893b..bf09f9074 100644
--- a/lxd/.dir-locals.el
+++ b/lxd/.dir-locals.el
@@ -1,7 +1,7 @@
 ;;; Directory Local Variables
 ;;; For more information see (info "(emacs) Directory Variables")
 ((go-mode
-  . ((go-test-args . "-tags libsqlite3 -timeout 10s")
+  . ((go-test-args . "-tags libsqlite3 -timeout 25s")
      (eval
       . (set
 	 (make-local-variable 'flycheck-go-build-tags)
diff --git a/lxd/api_cluster_test.go b/lxd/api_cluster_test.go
index aa096b9d6..858f2433a 100644
--- a/lxd/api_cluster_test.go
+++ b/lxd/api_cluster_test.go
@@ -5,31 +5,203 @@ import (
 	"testing"
 
 	lxd "github.com/lxc/lxd/client"
+	"github.com/lxc/lxd/lxd/db"
 	"github.com/lxc/lxd/shared"
+	"github.com/stretchr/testify/assert"
 	"github.com/stretchr/testify/require"
 )
 
-// A LXD node which is already configured for networking can be coverted to a
+// A LXD node which is already configured for networking can be converted to a
 // single-node LXD cluster.
 func TestCluster_Bootstrap(t *testing.T) {
 	daemon, cleanup := newDaemon(t)
 	defer cleanup()
 
-	client, err := lxd.ConnectLXDUnix(daemon.UnixSocket(), nil)
+	f := clusterFixture{t: t}
+	f.EnableNetworking(daemon, "")
+
+	client := f.ClientUnix(daemon)
+
+	op, err := client.BootstrapCluster("buzz")
 	require.NoError(t, err)
+	require.NoError(t, op.Wait())
+}
 
-	server, _, err := client.GetServer()
+// A LXD node which is already configured for networking can join an existing
+// cluster.
+func TestCluster_Join(t *testing.T) {
+	daemons, cleanup := newDaemons(t, 2)
+	defer cleanup()
+
+	f := clusterFixture{t: t}
+	passwords := []string{"sekret", ""}
+
+	for i, daemon := range daemons {
+		f.EnableNetworking(daemon, passwords[i])
+	}
+
+	// Bootstrap the cluster using the first node.
+	client := f.ClientUnix(daemons[0])
+	op, err := client.BootstrapCluster("buzz")
+	require.NoError(t, err)
+	require.NoError(t, op.Wait())
+
+	// Make the second node join the cluster.
+	address := daemons[0].endpoints.NetworkAddress()
+	cert := daemons[0].endpoints.NetworkPublicKey()
+	client = f.ClientUnix(daemons[1])
+	op, err = client.JoinCluster(address, "sekret", cert, "rusp")
 	require.NoError(t, err)
+	require.NoError(t, op.Wait())
 
-	port, err := shared.AllocatePort()
+	// Both nodes are listed as database nodes in the second node's sqlite
+	// database.
+	state := daemons[1].State()
+	err = state.Node.Transaction(func(tx *db.NodeTx) error {
+		nodes, err := tx.RaftNodes()
+		require.NoError(t, err)
+		require.Len(t, nodes, 2)
+		assert.Equal(t, int64(1), nodes[0].ID)
+		assert.Equal(t, int64(2), nodes[1].ID)
+		assert.Equal(t, daemons[0].endpoints.NetworkAddress(), nodes[0].Address)
+		assert.Equal(t, daemons[1].endpoints.NetworkAddress(), nodes[1].Address)
+		return nil
+	})
 	require.NoError(t, err)
+}
 
-	serverPut := server.Writable()
-	serverPut.Config["core.https_address"] = fmt.Sprintf("localhost:%d", port)
+// If the wrong trust password is given, the join request fails.
+func TestCluster_JoinWrongTrustPassword(t *testing.T) {
+	daemons, cleanup := newDaemons(t, 2)
+	defer cleanup()
+
+	f := clusterFixture{t: t}
+	passwords := []string{"sekret", ""}
 
-	require.NoError(t, client.UpdateServer(serverPut, ""))
+	for i, daemon := range daemons {
+		f.EnableNetworking(daemon, passwords[i])
+	}
 
+	// Bootstrap the cluster using the first node.
+	client := f.ClientUnix(daemons[0])
 	op, err := client.BootstrapCluster("buzz")
 	require.NoError(t, err)
 	require.NoError(t, op.Wait())
+
+	// Make the second node join the cluster.
+	address := daemons[0].endpoints.NetworkAddress()
+	cert := daemons[0].endpoints.NetworkPublicKey()
+	client = f.ClientUnix(daemons[1])
+	op, err = client.JoinCluster(address, "noop", cert, "rusp")
+	require.NoError(t, err)
+	assert.EqualError(t, op.Wait(), "failed to request to add node: not authorized")
+}
+
+// In a cluster for 3 nodes, if the leader goes down another one is elected the
+// other two nodes continue to operate fine.
+func TestCluster_Failover(t *testing.T) {
+	if testing.Short() {
+		t.Skip("skipping cluster failover test in short mode.")
+	}
+	daemons, cleanup := newDaemons(t, 3)
+	defer cleanup()
+
+	f := clusterFixture{t: t}
+	f.FormCluster(daemons)
+
+	// FIXME: here we manually update the raft_nodes table, this can be
+	//        removed when automatic database nodes updating is in place.
+	var nodes []db.RaftNode
+	state := daemons[0].State()
+	err := state.Node.Transaction(func(tx *db.NodeTx) error {
+		var err error
+		nodes, err = tx.RaftNodes()
+		return err
+	})
+	require.NoError(t, err)
+	for _, daemon := range daemons[1:] {
+		state := daemon.State()
+		err := state.Node.Transaction(func(tx *db.NodeTx) error {
+			return tx.RaftNodesReplace(nodes)
+		})
+		require.NoError(t, err)
+	}
+
+	require.NoError(t, daemons[0].Stop())
+
+	for i, daemon := range daemons[1:] {
+		client := f.ClientUnix(daemon)
+		server, _, err := client.GetServer()
+		require.NoError(f.t, err)
+		serverPut := server.Writable()
+		serverPut.Config["core.trust_password"] = fmt.Sprintf("sekret-%d", i)
+
+		require.NoError(f.t, client.UpdateServer(serverPut, ""))
+	}
+}
+
+// Test helper for cluster-related APIs.
+type clusterFixture struct {
+	t       *testing.T
+	clients map[*Daemon]lxd.ContainerServer
+}
+
+// Form a cluster using the given daemons. The first daemon will be the leader.
+func (f *clusterFixture) FormCluster(daemons []*Daemon) {
+	for i, daemon := range daemons {
+		password := ""
+		if i == 0 {
+			password = "sekret"
+		}
+		f.EnableNetworking(daemon, password)
+	}
+
+	// Bootstrap the cluster using the first node.
+	client := f.ClientUnix(daemons[0])
+	op, err := client.BootstrapCluster("buzz")
+	require.NoError(f.t, err)
+	require.NoError(f.t, op.Wait())
+
+	// Make the other nodes join the cluster.
+	address := daemons[0].endpoints.NetworkAddress()
+	cert := daemons[0].endpoints.NetworkPublicKey()
+	for i, daemon := range daemons[1:] {
+		client = f.ClientUnix(daemon)
+		op, err := client.JoinCluster(address, "sekret", cert, fmt.Sprintf("rusp-%d", i))
+		require.NoError(f.t, err)
+		require.NoError(f.t, op.Wait())
+	}
+}
+
+// Enable networking in the given daemon. The password is optional and can be
+// an empty string.
+func (f *clusterFixture) EnableNetworking(daemon *Daemon, password string) {
+	port, err := shared.AllocatePort()
+	require.NoError(f.t, err)
+
+	address := fmt.Sprintf("127.0.0.1:%d", port)
+
+	client := f.ClientUnix(daemon)
+	server, _, err := client.GetServer()
+	require.NoError(f.t, err)
+	serverPut := server.Writable()
+	serverPut.Config["core.https_address"] = address
+	serverPut.Config["core.trust_password"] = password
+
+	require.NoError(f.t, client.UpdateServer(serverPut, ""))
+}
+
+// Get a client for the given daemon connected via UNIX socket, creating one if
+// needed.
+func (f *clusterFixture) ClientUnix(daemon *Daemon) lxd.ContainerServer {
+	if f.clients == nil {
+		f.clients = make(map[*Daemon]lxd.ContainerServer)
+	}
+	client, ok := f.clients[daemon]
+	if !ok {
+		var err error
+		client, err = lxd.ConnectLXDUnix(daemon.UnixSocket(), nil)
+		require.NoError(f.t, err)
+	}
+	return client
 }
diff --git a/lxd/daemon.go b/lxd/daemon.go
index f1b8e9bf9..b5d580f93 100644
--- a/lxd/daemon.go
+++ b/lxd/daemon.go
@@ -4,6 +4,7 @@ import (
 	"bytes"
 	"crypto/x509"
 	"database/sql"
+	"database/sql/driver"
 	"fmt"
 	"io"
 	"net/http"
@@ -552,10 +553,10 @@ func (d *Daemon) numRunningContainers() (int, error) {
 
 // Stop stops the shared daemon.
 func (d *Daemon) Stop() error {
-	errors := []error{}
+	errs := []error{}
 	trackError := func(err error) {
 		if err != nil {
-			errors = append(errors, err)
+			errs = append(errs, err)
 		}
 	}
 
@@ -575,7 +576,15 @@ func (d *Daemon) Stop() error {
 		trackError(d.db.Close())
 	}
 	if d.cluster != nil {
-		trackError(d.cluster.Close())
+		err := d.cluster.Close()
+		// If we got io.EOF the network connection was interrupted and
+		// it's likely that the other node shutdown. Let's just log a
+		// warning.
+		if errors.Cause(err) == driver.ErrBadConn {
+			logger.Warnf("Could not close remote database: %v", err)
+		} else {
+			trackError(err)
+		}
 	}
 	if d.gateway != nil {
 		trackError(d.gateway.Shutdown())
@@ -605,12 +614,12 @@ func (d *Daemon) Stop() error {
 	logger.Infof("Saved simplestreams cache")
 
 	var err error
-	if n := len(errors); n > 0 {
+	if n := len(errs); n > 0 {
 		format := "%v"
 		if n > 1 {
 			format += fmt.Sprintf(" (and %d more errors)", n)
 		}
-		err = fmt.Errorf(format, errors[0])
+		err = fmt.Errorf(format, errs[0])
 	}
 	return err
 }
diff --git a/lxd/daemon_integration_test.go b/lxd/daemon_integration_test.go
index f18c0e78c..2012dc657 100644
--- a/lxd/daemon_integration_test.go
+++ b/lxd/daemon_integration_test.go
@@ -45,6 +45,26 @@ func newDaemon(t *testing.T) (*Daemon, func()) {
 	return daemon, cleanup
 }
 
+// Create the given numbers of test Daemon instances.
+//
+// Return a function that can be used to cleanup every associated state.
+func newDaemons(t *testing.T, n int) ([]*Daemon, func()) {
+	daemons := make([]*Daemon, n)
+	cleanups := make([]func(), n)
+
+	for i := 0; i < n; i++ {
+		daemons[i], cleanups[i] = newDaemon(t)
+	}
+
+	cleanup := func() {
+		for _, cleanup := range cleanups {
+			cleanup()
+		}
+	}
+
+	return daemons, cleanup
+}
+
 // Create a new DaemonConfig object for testing purposes.
 func newConfig() *DaemonConfig {
 	return &DaemonConfig{
diff --git a/shared/logging/testing.go b/shared/logging/testing.go
index d92241d8f..22c3a9a90 100644
--- a/shared/logging/testing.go
+++ b/shared/logging/testing.go
@@ -34,5 +34,6 @@ func (h *testingHandler) Log(r *log.Record) error {
 	}
 
 	h.t.Logf("%s %s %s%s", r.Time.Format("15:04:05.000"), r.Lvl, r.Msg, ctx)
+	//fmt.Printf("%s %s %s%s\n", r.Time.Format("15:04:05.000"), r.Lvl, r.Msg, ctx)
 	return nil
 }

From 0fbfc903c75560c923407c943e82e6cb3da9191c Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Fri, 20 Oct 2017 14:08:52 +0000
Subject: [PATCH 045/116] Check cluster TLS certificate in gRPC endpoint

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/cluster/gateway.go         |  5 +++++
 lxd/cluster/gateway_test.go    | 39 ++++++++++++++++++++++++++++++++++++++-
 lxd/cluster/raft.go            | 13 ++-----------
 lxd/cluster/tls.go             | 16 ++++++++++++++++
 lxd/cluster/tls_export_test.go |  4 ++++
 5 files changed, 65 insertions(+), 12 deletions(-)
 create mode 100644 lxd/cluster/tls_export_test.go

diff --git a/lxd/cluster/gateway.go b/lxd/cluster/gateway.go
index 1c8d65924..af0a9de80 100644
--- a/lxd/cluster/gateway.go
+++ b/lxd/cluster/gateway.go
@@ -86,6 +86,11 @@ func (g *Gateway) HandlerFuncs() map[string]http.HandlerFunc {
 			return
 		}
 
+		if !tlsCheckCert(r, g.cert) {
+			http.Error(w, "403 invalid client certificate", http.StatusForbidden)
+			return
+		}
+
 		// Before actually establishing the gRPC SQL connection, our
 		// dialer probes the node to see if it's currently the leader
 		// (otherwise it tries with another node or retry later).
diff --git a/lxd/cluster/gateway_test.go b/lxd/cluster/gateway_test.go
index cb5c500e2..d2e5afe69 100644
--- a/lxd/cluster/gateway_test.go
+++ b/lxd/cluster/gateway_test.go
@@ -1,13 +1,14 @@
 package cluster_test
 
 import (
+	"fmt"
 	"net/http"
 	"net/http/httptest"
 	"os"
 	"path/filepath"
 	"testing"
 
-	grpcsql "github.com/CanonicalLtd/go-grpc-sql"
+	"github.com/CanonicalLtd/go-grpc-sql"
 	"github.com/lxc/lxd/lxd/cluster"
 	"github.com/lxc/lxd/lxd/db"
 	"github.com/lxc/lxd/shared"
@@ -68,6 +69,42 @@ func TestGateway_SingleWithNetworkAddress(t *testing.T) {
 	require.NoError(t, conn.Close())
 }
 
+// When networked, the grpc and raft endpoints requires the cluster
+// certificate.
+func TestGateway_NetworkAuth(t *testing.T) {
+	db, cleanup := db.NewTestNode(t)
+	defer cleanup()
+
+	cert := shared.TestingKeyPair()
+	mux := http.NewServeMux()
+	server := newServer(cert, mux)
+	defer server.Close()
+
+	address := server.Listener.Addr().String()
+	setRaftRole(t, db, address)
+
+	gateway := newGateway(t, db, cert)
+	defer gateway.Shutdown()
+
+	for path, handler := range gateway.HandlerFuncs() {
+		mux.HandleFunc(path, handler)
+	}
+
+	// Make a request using a certificate different than the cluster one.
+	config, err := cluster.TLSClientConfig(shared.TestingAltKeyPair())
+	config.InsecureSkipVerify = true // Skip client-side verification
+	require.NoError(t, err)
+	client := &http.Client{Transport: &http.Transport{TLSClientConfig: config}}
+
+	for path := range gateway.HandlerFuncs() {
+		url := fmt.Sprintf("https://%s%s", address, path)
+		response, err := client.Head(url)
+		require.NoError(t, err)
+		assert.Equal(t, http.StatusForbidden, response.StatusCode)
+	}
+
+}
+
 // Create a new test Gateway with the given parameters, and ensure no error
 // happens.
 func newGateway(t *testing.T, db *db.Node, certInfo *shared.CertInfo) *cluster.Gateway {
diff --git a/lxd/cluster/raft.go b/lxd/cluster/raft.go
index 7db15baf9..20c3ac9f9 100644
--- a/lxd/cluster/raft.go
+++ b/lxd/cluster/raft.go
@@ -2,7 +2,6 @@ package cluster
 
 import (
 	"bytes"
-	"crypto/x509"
 	"fmt"
 	"log"
 	"math"
@@ -22,7 +21,6 @@ import (
 	"github.com/hashicorp/raft-boltdb"
 	"github.com/lxc/lxd/lxd/db"
 	"github.com/lxc/lxd/lxd/node"
-	"github.com/lxc/lxd/lxd/util"
 	"github.com/lxc/lxd/shared"
 	"github.com/lxc/lxd/shared/log15"
 	"github.com/lxc/lxd/shared/logger"
@@ -360,17 +358,10 @@ func raftHandler(info *shared.CertInfo, handler *rafthttp.Handler) http.HandlerF
 	if handler == nil {
 		return nil
 	}
-	cert, err := x509.ParseCertificate(info.KeyPair().Certificate[0])
-	if err != nil {
-		// Since we have already loaded this certificate, typically
-		// using LoadX509KeyPair, an error should never happen, but
-		// check for good measure.
-		panic(fmt.Sprintf("invalid keypair material: %v", err))
-	}
-	trustedCerts := []x509.Certificate{*cert}
 	return func(w http.ResponseWriter, r *http.Request) {
-		if r.TLS == nil || !util.CheckTrustState(*r.TLS.PeerCertificates[0], trustedCerts) {
+		if !tlsCheckCert(r, info) {
 			http.Error(w, "403 invalid client certificate", http.StatusForbidden)
+			return
 		}
 		handler.ServeHTTP(w, r)
 	}
diff --git a/lxd/cluster/tls.go b/lxd/cluster/tls.go
index aa9b75731..7ed754ec4 100644
--- a/lxd/cluster/tls.go
+++ b/lxd/cluster/tls.go
@@ -3,7 +3,10 @@ package cluster
 import (
 	"crypto/tls"
 	"crypto/x509"
+	"fmt"
+	"net/http"
 
+	"github.com/lxc/lxd/lxd/util"
 	"github.com/lxc/lxd/shared"
 )
 
@@ -33,3 +36,16 @@ func tlsClientConfig(info *shared.CertInfo) (*tls.Config, error) {
 	}
 	return config, nil
 }
+
+// Return true if the given request is presenting the given cluster certificate.
+func tlsCheckCert(r *http.Request, info *shared.CertInfo) bool {
+	cert, err := x509.ParseCertificate(info.KeyPair().Certificate[0])
+	if err != nil {
+		// Since we have already loaded this certificate, typically
+		// using LoadX509KeyPair, an error should never happen, but
+		// check for good measure.
+		panic(fmt.Sprintf("invalid keypair material: %v", err))
+	}
+	trustedCerts := []x509.Certificate{*cert}
+	return r.TLS != nil && util.CheckTrustState(*r.TLS.PeerCertificates[0], trustedCerts)
+}
diff --git a/lxd/cluster/tls_export_test.go b/lxd/cluster/tls_export_test.go
new file mode 100644
index 000000000..d8248b70a
--- /dev/null
+++ b/lxd/cluster/tls_export_test.go
@@ -0,0 +1,4 @@
+package cluster
+
+// TLSClientConfig is used to generate TLS client configurations in unit tests.
+var TLSClientConfig = tlsClientConfig

From 309754a6c10d3bdf2f595928b1f4fc701c98f237 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Fri, 20 Oct 2017 20:48:07 +0000
Subject: [PATCH 046/116] Add heartbeat logic

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/cluster/gateway.go             |  57 ++++++++++-
 lxd/cluster/gateway_export_test.go |  22 +++++
 lxd/cluster/gateway_test.go        |  22 +++++
 lxd/cluster/heartbeat.go           | 113 +++++++++++++++++++++
 lxd/cluster/heartbeat_test.go      | 197 +++++++++++++++++++++++++++++++++++++
 lxd/cluster/membership_test.go     |   9 ++
 lxd/cluster/raft.go                |  16 +++
 7 files changed, 433 insertions(+), 3 deletions(-)
 create mode 100644 lxd/cluster/gateway_export_test.go
 create mode 100644 lxd/cluster/heartbeat.go
 create mode 100644 lxd/cluster/heartbeat_test.go

diff --git a/lxd/cluster/gateway.go b/lxd/cluster/gateway.go
index af0a9de80..826b11aec 100644
--- a/lxd/cluster/gateway.go
+++ b/lxd/cluster/gateway.go
@@ -4,6 +4,7 @@ import (
 	"fmt"
 	"net"
 	"net/http"
+	"strconv"
 	"time"
 
 	"github.com/CanonicalLtd/dqlite"
@@ -102,6 +103,24 @@ func (g *Gateway) HandlerFuncs() map[string]http.HandlerFunc {
 			return
 		}
 
+		// Handle heatbeats.
+		if r.Method == "PUT" {
+			var nodes []db.RaftNode
+			err := shared.ReadToJSON(r.Body, &nodes)
+			if err != nil {
+				http.Error(w, "400 invalid raft nodes payload", http.StatusBadRequest)
+				return
+			}
+			err = g.db.Transaction(func(tx *db.NodeTx) error {
+				return tx.RaftNodesReplace(nodes)
+			})
+			if err != nil {
+				http.Error(w, "500 failed to update raft nodes", http.StatusInternalServerError)
+				return
+			}
+			return
+		}
+
 		g.server.ServeHTTP(w, r)
 	}
 	raft := func(w http.ResponseWriter, r *http.Request) {
@@ -128,7 +147,7 @@ func (g *Gateway) Dialer() grpcsql.Dialer {
 		}
 
 		// Network connection.
-		addresses, err := g.raftNodes()
+		addresses, err := g.cachedRaftNodes()
 		if err != nil {
 			return nil, err
 		}
@@ -208,8 +227,40 @@ func (g *Gateway) waitLeadership() error {
 	return fmt.Errorf("raft node did not self-elect within 5 seconds")
 }
 
-// Return the addresses of the current raft nodes.
-func (g *Gateway) raftNodes() ([]string, error) {
+// Return information about the LXD nodes that a currently part of the raft
+// cluster, as configured in the raft log. It returns an error if this node is
+// not the leader.
+func (g *Gateway) currentRaftNodes() ([]db.RaftNode, error) {
+	if g.raft == nil {
+		return nil, raft.ErrNotLeader
+	}
+	servers, err := g.raft.Servers()
+	if err != nil {
+		return nil, err
+	}
+	provider := raftAddressProvider{db: g.db}
+	nodes := make([]db.RaftNode, len(servers))
+	for i, server := range servers {
+		address, err := provider.ServerAddr(server.ID)
+		if err != nil {
+			return nil, errors.Wrap(err, "failed to fetch raft server address")
+		}
+		id, err := strconv.Atoi(string(server.ID))
+		if err != nil {
+			return nil, errors.Wrap(err, "non-numeric server ID")
+		}
+		nodes[i].ID = int64(id)
+		nodes[i].Address = string(address)
+	}
+	return nodes, nil
+}
+
+// Return the addresses of the raft nodes as stored in the node-level
+// database.
+//
+// These values might leg behind the actual values, and are refreshed
+// periodically during heartbeats.
+func (g *Gateway) cachedRaftNodes() ([]string, error) {
 	var addresses []string
 	err := g.db.Transaction(func(tx *db.NodeTx) error {
 		var err error
diff --git a/lxd/cluster/gateway_export_test.go b/lxd/cluster/gateway_export_test.go
new file mode 100644
index 000000000..6592158db
--- /dev/null
+++ b/lxd/cluster/gateway_export_test.go
@@ -0,0 +1,22 @@
+package cluster
+
+import (
+	"github.com/hashicorp/raft"
+	"github.com/lxc/lxd/lxd/db"
+	"github.com/lxc/lxd/shared"
+)
+
+// Raft returns the gateway's internal raft instance.
+func (g *Gateway) Raft() *raft.Raft {
+	return g.raft.raft
+}
+
+// Cert returns the gateway's internal TLS certificate information.
+func (g *Gateway) Cert() *shared.CertInfo {
+	return g.cert
+}
+
+// RaftNodes returns the nodes currently part of the raft cluster.
+func (g *Gateway) RaftNodes() ([]db.RaftNode, error) {
+	return g.currentRaftNodes()
+}
diff --git a/lxd/cluster/gateway_test.go b/lxd/cluster/gateway_test.go
index d2e5afe69..10536978b 100644
--- a/lxd/cluster/gateway_test.go
+++ b/lxd/cluster/gateway_test.go
@@ -9,6 +9,7 @@ import (
 	"testing"
 
 	"github.com/CanonicalLtd/go-grpc-sql"
+	"github.com/hashicorp/raft"
 	"github.com/lxc/lxd/lxd/cluster"
 	"github.com/lxc/lxd/lxd/db"
 	"github.com/lxc/lxd/shared"
@@ -105,6 +106,27 @@ func TestGateway_NetworkAuth(t *testing.T) {
 
 }
 
+// RaftNodes returns an error if the underlying raft instance is not the leader.
+func TestGateway_RaftNodesNotLeader(t *testing.T) {
+	db, cleanup := db.NewTestNode(t)
+	defer cleanup()
+
+	cert := shared.TestingKeyPair()
+	mux := http.NewServeMux()
+	server := newServer(cert, mux)
+	defer server.Close()
+
+	address := server.Listener.Addr().String()
+	setRaftRole(t, db, address)
+
+	gateway := newGateway(t, db, cert)
+	defer gateway.Shutdown()
+
+	// Get the node immediately, before the election has took place.
+	_, err := gateway.RaftNodes()
+	assert.Equal(t, raft.ErrNotLeader, err)
+}
+
 // Create a new test Gateway with the given parameters, and ensure no error
 // happens.
 func newGateway(t *testing.T, db *db.Node, certInfo *shared.CertInfo) *cluster.Gateway {
diff --git a/lxd/cluster/heartbeat.go b/lxd/cluster/heartbeat.go
new file mode 100644
index 000000000..a3bf3c99e
--- /dev/null
+++ b/lxd/cluster/heartbeat.go
@@ -0,0 +1,113 @@
+package cluster
+
+import (
+	"bytes"
+	"encoding/json"
+	"fmt"
+	"net/http"
+	"sync"
+	"time"
+
+	"github.com/hashicorp/raft"
+	"github.com/lxc/lxd/lxd/db"
+	"github.com/lxc/lxd/lxd/task"
+	"github.com/lxc/lxd/shared"
+	"github.com/lxc/lxd/shared/logger"
+	"github.com/pkg/errors"
+	"golang.org/x/net/context"
+)
+
+// Heartbeat returns a task function that performs leader-initiated heartbeat
+// checks against all LXD nodes in the cluster.
+//
+// It will update the heartbeat timestamp column of the nodes table
+// accordingly, and also notify them of the current list of database nodes.
+func Heartbeat(gateway *Gateway, cluster *db.Cluster) task.Func {
+	heartbeat := func(ctx context.Context) {
+		if gateway.server == nil || gateway.memoryDial != nil {
+			// We're not a raft node or we're not clustered
+			return
+		}
+
+		raftNodes, err := gateway.currentRaftNodes()
+		if err == raft.ErrNotLeader {
+			return
+		}
+		if err != nil {
+			logger.Warnf("Failed to get current raft nodes: %v", err)
+			return
+		}
+		var nodes []db.NodeInfo
+		err = cluster.Transaction(func(tx *db.ClusterTx) error {
+			var err error
+			nodes, err = tx.Nodes()
+			return err
+		})
+		wg := sync.WaitGroup{}
+		wg.Add(len(nodes))
+		heartbeats := make([]time.Time, len(nodes))
+		for i, node := range nodes {
+			go func(i int, address string) {
+				defer wg.Done()
+				err := heartbeatNode(ctx, address, gateway.cert, raftNodes)
+				if err == nil {
+					heartbeats[i] = time.Now()
+				}
+			}(i, node.Address)
+		}
+		wg.Wait()
+
+		// If the context has been cancelled, return immediately.
+		if ctx.Err() != nil {
+			return
+		}
+
+		err = cluster.Transaction(func(tx *db.ClusterTx) error {
+			for i, node := range nodes {
+				if heartbeats[i].Equal(time.Time{}) {
+					continue
+				}
+				err := tx.NodeHeartbeat(node.Address, heartbeats[i])
+				if err != nil {
+					return err
+				}
+			}
+			return nil
+		})
+		if err != nil {
+			logger.Warnf("Failed to update heartbeat: %v", err)
+		}
+	}
+	return heartbeat
+}
+
+// Perform a single heartbeat request against the node with the given address.
+func heartbeatNode(ctx context.Context, address string, cert *shared.CertInfo, raftNodes []db.RaftNode) error {
+	config, err := tlsClientConfig(cert)
+	if err != nil {
+		return err
+	}
+	url := fmt.Sprintf("https://%s%s", address, grpcEndpoint)
+	client := &http.Client{Transport: &http.Transport{TLSClientConfig: config}}
+
+	buffer := bytes.Buffer{}
+	err = json.NewEncoder(&buffer).Encode(raftNodes)
+	if err != nil {
+		return err
+	}
+
+	request, err := http.NewRequest("PUT", url, bytes.NewReader(buffer.Bytes()))
+	if err != nil {
+		return err
+	}
+	request = request.WithContext(ctx)
+
+	response, err := client.Do(request)
+	if err != nil {
+		return errors.Wrap(err, "failed to send HTTP request")
+	}
+	if response.StatusCode != http.StatusOK {
+		return fmt.Errorf("HTTP request failed: %s", response.Status)
+	}
+	return nil
+}
diff --git a/lxd/cluster/heartbeat_test.go b/lxd/cluster/heartbeat_test.go
new file mode 100644
index 000000000..7b8bf91ff
--- /dev/null
+++ b/lxd/cluster/heartbeat_test.go
@@ -0,0 +1,197 @@
+package cluster_test
+
+import (
+	"net/http"
+	"net/http/httptest"
+	"testing"
+	"time"
+
+	"github.com/lxc/lxd/lxd/cluster"
+	"github.com/lxc/lxd/lxd/db"
+	"github.com/lxc/lxd/lxd/state"
+	"github.com/lxc/lxd/shared"
+	"github.com/lxc/lxd/shared/version"
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
+	"golang.org/x/net/context"
+)
+
+// After a heartbeat request is completed, the leader updates the heartbeat
+// timestamp column, and the serving node updates its cache of raft nodes.
+func TestHeartbeat(t *testing.T) {
+	f := heartbeatFixture{t: t}
+	defer f.Cleanup()
+
+	gateway0 := f.Bootstrap()
+	gateway1 := f.Grow()
+	f.Grow()
+
+	state0 := f.State(gateway0)
+	state1 := f.State(gateway1)
+
+	// Artificially mark all nodes as down
+	err := state0.Cluster.Transaction(func(tx *db.ClusterTx) error {
+		nodes, err := tx.Nodes()
+		require.NoError(t, err)
+		for _, node := range nodes {
+			err := tx.NodeHeartbeat(node.Address, time.Now().Add(-time.Minute))
+			require.NoError(t, err)
+		}
+		return nil
+	})
+	require.NoError(t, err)
+
+	// Perform the heartbeat requests.
+	heartbeat := cluster.Heartbeat(gateway0, state0.Cluster)
+	ctx := context.Background()
+	heartbeat(ctx)
+
+	// The second node that initially did not know about the third, now
+	// does.
+	err = state1.Node.Transaction(func(tx *db.NodeTx) error {
+		nodes, err := tx.RaftNodes()
+		require.NoError(t, err)
+		assert.Len(t, nodes, 3)
+		return nil
+	})
+	require.NoError(t, err)
+
+	// The heartbeat timestamps of all nodes got updated
+	err = state0.Cluster.Transaction(func(tx *db.ClusterTx) error {
+		nodes, err := tx.Nodes()
+		require.NoError(t, err)
+		for _, node := range nodes {
+			assert.False(t, node.IsDown())
+		}
+		return nil
+	})
+	require.NoError(t, err)
+}
+
+// If a certain node does not successfully respond to the heartbeat, its
+// timestamp does not get updated.
+func TestHeartbeat_MarkAsDown(t *testing.T) {
+	f := heartbeatFixture{t: t}
+	defer f.Cleanup()
+
+	gateway0 := f.Bootstrap()
+	gateway1 := f.Grow()
+
+	state0 := f.State(gateway0)
+
+	// Artificially mark all nodes as down
+	err := state0.Cluster.Transaction(func(tx *db.ClusterTx) error {
+		nodes, err := tx.Nodes()
+		require.NoError(t, err)
+		for _, node := range nodes {
+			err := tx.NodeHeartbeat(node.Address, time.Now().Add(-time.Minute))
+			require.NoError(t, err)
+		}
+		return nil
+	})
+	require.NoError(t, err)
+
+	// Shutdown the second node and perform the heartbeat requests.
+	f.Server(gateway1).Close()
+	heartbeat := cluster.Heartbeat(gateway0, state0.Cluster)
+	ctx := context.Background()
+	heartbeat(ctx)
+
+	// The heartbeat timestamp of the second node did not get updated
+	err = state0.Cluster.Transaction(func(tx *db.ClusterTx) error {
+		nodes, err := tx.Nodes()
+		require.NoError(t, err)
+		assert.True(t, nodes[1].IsDown())
+		return nil
+	})
+	require.NoError(t, err)
+}
+
+// Helper for testing heartbeat-related code.
+type heartbeatFixture struct {
+	t        *testing.T
+	gateways map[int]*cluster.Gateway              // node index to gateway
+	states   map[*cluster.Gateway]*state.State     // gateway to its state handle
+	servers  map[*cluster.Gateway]*httptest.Server // gateway to its HTTP server
+	cleanups []func()
+}
+
+// Bootstrap the first node of the cluster.
+func (f *heartbeatFixture) Bootstrap() *cluster.Gateway {
+	state, gateway, _ := f.node()
+
+	err := cluster.Bootstrap(state, gateway, "buzz")
+	require.NoError(f.t, err)
+
+	return gateway
+}
+
+// Grow adds a new node to the cluster.
+func (f *heartbeatFixture) Grow() *cluster.Gateway {
+	state, gateway, address := f.node()
+	name := address
+
+	target := f.gateways[0]
+	targetState := f.states[target]
+
+	nodes, err := cluster.Accept(
+		targetState, name, address, cluster.SchemaVersion, len(version.APIExtensions))
+
+	err = cluster.Join(state, gateway, target.Cert(), name, nodes)
+	require.NoError(f.t, err)
+
+	return gateway
+}
+
+// Return the state associated with the given gateway.
+func (f *heartbeatFixture) State(gateway *cluster.Gateway) *state.State {
+	return f.states[gateway]
+}
+
+// Return the HTTP server associated with the given gateway.
+func (f *heartbeatFixture) Server(gateway *cluster.Gateway) *httptest.Server {
+	return f.servers[gateway]
+}
+
+// Creates a new node, without either bootstrapping or joining it.
+//
+// Return the associated gateway and network address.
+func (f *heartbeatFixture) node() (*state.State, *cluster.Gateway, string) {
+	if f.gateways == nil {
+		f.gateways = make(map[int]*cluster.Gateway)
+		f.states = make(map[*cluster.Gateway]*state.State)
+		f.servers = make(map[*cluster.Gateway]*httptest.Server)
+	}
+
+	state, cleanup := state.NewTestState(f.t)
+	f.cleanups = append(f.cleanups, cleanup)
+
+	cert := shared.TestingKeyPair()
+	gateway := newGateway(f.t, state.Node, cert)
+	f.cleanups = append(f.cleanups, func() { gateway.Shutdown() })
+
+	mux := http.NewServeMux()
+	server := newServer(cert, mux)
+	f.cleanups = append(f.cleanups, server.Close)
+
+	for path, handler := range gateway.HandlerFuncs() {
+		mux.HandleFunc(path, handler)
+	}
+
+	address := server.Listener.Addr().String()
+	mf := &membershipFixtures{t: f.t, state: state}
+	mf.NetworkAddress(address)
+
+	f.gateways[len(f.gateways)] = gateway
+	f.states[gateway] = state
+	f.servers[gateway] = server
+
+	return state, gateway, address
+}
+
+func (f *heartbeatFixture) Cleanup() {
+	// Run the cleanups in reverse order
+	for i := len(f.cleanups) - 1; i >= 0; i-- {
+		f.cleanups[i]()
+	}
+}
diff --git a/lxd/cluster/membership_test.go b/lxd/cluster/membership_test.go
index 70e3ad224..83b8a5576 100644
--- a/lxd/cluster/membership_test.go
+++ b/lxd/cluster/membership_test.go
@@ -292,6 +292,15 @@ func TestJoin(t *testing.T) {
 	// Actually join the cluster.
 	err = cluster.Join(state, gateway, targetCert, "rusp", nodes)
 	require.NoError(t, err)
+
+	// The leader now returns an updated list of raft nodes.
+	nodes, err = targetGateway.RaftNodes()
+	require.NoError(t, err)
+	assert.Len(t, nodes, 2)
+	assert.Equal(t, int64(1), nodes[0].ID)
+	assert.Equal(t, targetAddress, nodes[0].Address)
+	assert.Equal(t, int64(2), nodes[1].ID)
+	assert.Equal(t, address, nodes[1].Address)
 }
 
 // Helper for setting fixtures for Bootstrap tests.
diff --git a/lxd/cluster/raft.go b/lxd/cluster/raft.go
index 20c3ac9f9..ebdbb1efd 100644
--- a/lxd/cluster/raft.go
+++ b/lxd/cluster/raft.go
@@ -207,6 +207,22 @@ func (i *raftInstance) Raft() *raft.Raft {
 	return i.raft
 }
 
+// Servers returns the servers that are currently part of the cluster.
+//
+// If this raft instance is not the leader, an error is returned.
+func (i *raftInstance) Servers() ([]raft.Server, error) {
+	if i.raft.State() != raft.Leader {
+		return nil, raft.ErrNotLeader
+	}
+	future := i.raft.GetConfiguration()
+	err := future.Error()
+	if err != nil {
+		return nil, err
+	}
+	configuration := future.Configuration()
+	return configuration.Servers, nil
+}
+
 // HandlerFunc can be used to handle HTTP requests performed against the LXD
 // API RaftEndpoint ("/internal/raft"), in order to join/leave/form the raft
 // cluster.

From bcd0359c5d0dbe2fecebad02f8e0b285295a98d6 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Fri, 20 Oct 2017 20:56:47 +0000
Subject: [PATCH 047/116] Plug heartbeat logic into the Daemon

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/api_cluster_test.go       | 18 ------------------
 lxd/cluster/gateway.go        | 15 +++++++++------
 lxd/cluster/heartbeat.go      | 11 +++++++++--
 lxd/cluster/heartbeat_test.go |  4 ++--
 lxd/daemon.go                 | 32 +++++++++++++++++---------------
 5 files changed, 37 insertions(+), 43 deletions(-)

diff --git a/lxd/api_cluster_test.go b/lxd/api_cluster_test.go
index 858f2433a..27c4ac162 100644
--- a/lxd/api_cluster_test.go
+++ b/lxd/api_cluster_test.go
@@ -109,24 +109,6 @@ func TestCluster_Failover(t *testing.T) {
 	f := clusterFixture{t: t}
 	f.FormCluster(daemons)
 
-	// FIXME: here we manually update the raft_nodes table, this can be
-	//        removed when automatic database nodes updating is in place.
-	var nodes []db.RaftNode
-	state := daemons[0].State()
-	err := state.Node.Transaction(func(tx *db.NodeTx) error {
-		var err error
-		nodes, err = tx.RaftNodes()
-		return err
-	})
-	require.NoError(t, err)
-	for _, daemon := range daemons[1:] {
-		state := daemon.State()
-		err := state.Node.Transaction(func(tx *db.NodeTx) error {
-			return tx.RaftNodesReplace(nodes)
-		})
-		require.NoError(t, err)
-	}
-
 	require.NoError(t, daemons[0].Stop())
 
 	for i, daemon := range daemons[1:] {
diff --git a/lxd/cluster/gateway.go b/lxd/cluster/gateway.go
index 826b11aec..243ec27b4 100644
--- a/lxd/cluster/gateway.go
+++ b/lxd/cluster/gateway.go
@@ -146,23 +146,26 @@ func (g *Gateway) Dialer() grpcsql.Dialer {
 			return g.memoryDial()
 		}
 
-		// Network connection.
-		addresses, err := g.cachedRaftNodes()
-		if err != nil {
-			return nil, err
-		}
-
 		// FIXME: timeout should be configurable
+		var err error
 		remaining := 10 * time.Second
 		for remaining > 0 {
+			// Network connection.
+			addresses, dbErr := g.cachedRaftNodes()
+			if dbErr != nil {
+				return nil, dbErr
+			}
+
 			for _, address := range addresses {
 				var conn *grpc.ClientConn
 				conn, err = grpcNetworkDial(address, g.cert, time.Second)
 				if err == nil {
 					return conn, nil
 				}
+				logger.Debugf("Failed to establish gRPC connection with %s: %v", address, err)
 			}
 			time.Sleep(250 * time.Millisecond)
+			remaining -= 250 * time.Millisecond
 		}
 		return nil, err
 	}
diff --git a/lxd/cluster/heartbeat.go b/lxd/cluster/heartbeat.go
index a3bf3c99e..150bff51b 100644
--- a/lxd/cluster/heartbeat.go
+++ b/lxd/cluster/heartbeat.go
@@ -22,12 +22,13 @@ import (
 //
 // It will update the heartbeat timestamp column of the nodes table
 // accordingly, and also notify them of the current list of database nodes.
-func Heartbeat(gateway *Gateway, cluster *db.Cluster) task.Func {
+func Heartbeat(gateway *Gateway, cluster *db.Cluster) (task.Func, task.Schedule) {
 	heartbeat := func(ctx context.Context) {
 		if gateway.server == nil || gateway.memoryDial != nil {
 			// We're not a raft node or we're not clustered
 			return
 		}
+		logger.Debugf("Starting heartbeat round")
 
 		raftNodes, err := gateway.currentRaftNodes()
 		if err == raft.ErrNotLeader {
@@ -51,7 +52,10 @@ func Heartbeat(gateway *Gateway, cluster *db.Cluster) task.Func {
 				defer wg.Done()
 				err := heartbeatNode(ctx, address, gateway.cert, raftNodes)
 				if err == nil {
+					logger.Debugf("Successful heartbeat for %s", address)
 					heartbeats[i] = time.Now()
+				} else {
+					logger.Debugf("Failed heartbeat for %s: %v", address, err)
 				}
 			}(i, node.Address)
 		}
@@ -78,7 +82,10 @@ func Heartbeat(gateway *Gateway, cluster *db.Cluster) task.Func {
 			logger.Warnf("Failed to update heartbeat: %v", err)
 		}
 	}
-	return heartbeat
+
+	schedule := task.Every(3 * time.Second)
+
+	return heartbeat, schedule
 }
 
 // Perform a single heartbeat request against the node with the given address.
diff --git a/lxd/cluster/heartbeat_test.go b/lxd/cluster/heartbeat_test.go
index 7b8bf91ff..d129264d5 100644
--- a/lxd/cluster/heartbeat_test.go
+++ b/lxd/cluster/heartbeat_test.go
@@ -42,7 +42,7 @@ func TestHeartbeat(t *testing.T) {
 	require.NoError(t, err)
 
 	// Perform the heartbeat requests.
-	heartbeat := cluster.Heartbeat(gateway0, state0.Cluster)
+	heartbeat, _ := cluster.Heartbeat(gateway0, state0.Cluster)
 	ctx := context.Background()
 	heartbeat(ctx)
 
@@ -93,7 +93,7 @@ func TestHeartbeat_MarkAsDown(t *testing.T) {
 
 	// Shutdown the second node and perform the heartbeat requests.
 	f.Server(gateway1).Close()
-	heartbeat := cluster.Heartbeat(gateway0, state0.Cluster)
+	heartbeat, _ := cluster.Heartbeat(gateway0, state0.Cluster)
 	ctx := context.Background()
 	heartbeat(ctx)
 
diff --git a/lxd/daemon.go b/lxd/daemon.go
index b5d580f93..6da4444b9 100644
--- a/lxd/daemon.go
+++ b/lxd/daemon.go
@@ -498,25 +498,27 @@ func (d *Daemon) init() error {
 }
 
 func (d *Daemon) Ready() error {
-	/* Prune images */
-	d.taskPruneImages = d.tasks.Add(pruneExpiredImagesTask(d))
-
-	/* Auto-update images */
-	d.taskAutoUpdate = d.tasks.Add(autoUpdateImagesTask(d))
+	/* Heartbeats */
+	d.tasks.Add(cluster.Heartbeat(d.gateway, d.cluster))
+
+	// FIXME: There's no hard reason for which we should not run these
+	//        tasks in mock mode. However it requires that we tweak them so
+	//        they exit gracefully without blocking (something we should do
+	//        anyways) and they don't hit the internet or similar. Support
+	//        for proper cancellation is something that has been started
+	//        but has not been fully completed.
+	if !d.os.MockMode {
+		d.taskPruneImages = d.tasks.Add(pruneExpiredImagesTask(d))
 
-	/* Auto-update instance types */
-	d.tasks.Add(instanceRefreshTypesTask(d))
+		/* Auto-update images */
+		d.taskAutoUpdate = d.tasks.Add(autoUpdateImagesTask(d))
 
-	// FIXME: There's no hard reason for which we should not run tasks in
-	//        mock mode. However it requires that we tweak the tasks so
-	//        they exit gracefully without blocking (something we should
-	//        do anyways) and they don't hit the internet or similar. Support
-	//        for proper cancellation is something that has been started but
-	//        has not been fully completed.
-	if !d.os.MockMode {
-		d.tasks.Start()
+		/* Auto-update instance types */
+		d.tasks.Add(instanceRefreshTypesTask(d))
 	}
 
+	d.tasks.Start()
+
 	s := d.State()
 
 	/* Restore containers */

From 6701ed9eabc9bfc27ee9bc4cdf654d11a4d55b85 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Fri, 20 Oct 2017 21:34:02 +0000
Subject: [PATCH 048/116] Add test for joining a cluster interactively with lxd
 init

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/main_init_test.go | 40 +++++++++++++++++++++++++++++++++++++++-
 1 file changed, 39 insertions(+), 1 deletion(-)

diff --git a/lxd/main_init_test.go b/lxd/main_init_test.go
index 66e37810d..13af4cf48 100644
--- a/lxd/main_init_test.go
+++ b/lxd/main_init_test.go
@@ -153,6 +153,36 @@ func (suite *cmdInitTestSuite) TestCmdInit_InteractiveClustering() {
 	suite.Req.True(shared.PathExists(certfile))
 }
 
+// Enable clustering interactively, joining an existing cluser.
+func (suite *cmdInitTestSuite) TestCmdInit_InteractiveClusteringJoin() {
+	leader, cleanup := newDaemon(suite.T())
+	defer cleanup()
+
+	f := clusterFixture{t: suite.T()}
+	f.FormCluster([]*Daemon{leader})
+
+	suite.command.PasswordReader = func(int) ([]byte, error) {
+		return []byte("sekret"), nil
+	}
+	port, err := shared.AllocatePort()
+	suite.Req.Nil(err)
+	answers := &cmdInitAnswers{
+		WantClustering:           true,
+		ClusterName:              "rusp",
+		ClusterAddress:           fmt.Sprintf("127.0.0.1:%d", port),
+		WantJoinCluster:          true,
+		ClusterTargetNodeAddress: leader.endpoints.NetworkAddress(),
+		ClusterAcceptFingerprint: true,
+		ClusterConfirmLosingData: true,
+	}
+	answers.Render(suite.streams)
+
+	suite.Req.Nil(suite.command.Run())
+	state := suite.d.State()
+	certfile := filepath.Join(state.OS.VarDir, "cluster.crt")
+	suite.Req.True(shared.PathExists(certfile))
+}
+
 // Pass network address and trust password via command line arguments.
 func (suite *cmdInitTestSuite) TestCmdInit_AutoHTTPSAddressAndTrustPassword() {
 	port, err := shared.AllocatePort()
@@ -716,9 +746,12 @@ func (suite *cmdInitTestSuite) TestCmdInit_ProfilesPreseedUpdate() {
 // sequence of answers.
 type cmdInitAnswers struct {
 	WantClustering           bool
-	WantJoinCluster          bool
 	ClusterName              string
 	ClusterAddress           string
+	WantJoinCluster          bool
+	ClusterTargetNodeAddress string
+	ClusterAcceptFingerprint bool
+	ClusterConfirmLosingData bool
 	WantStoragePool          bool
 	WantAvailableOverNetwork bool
 	BindToAddress            string
@@ -738,6 +771,11 @@ func (answers *cmdInitAnswers) Render(streams *cmd.MemoryStreams) {
 		streams.InputAppendLine(answers.ClusterName)
 		streams.InputAppendLine(answers.ClusterAddress)
 		streams.InputAppendBoolAnswer(answers.WantJoinCluster)
+		if answers.WantJoinCluster {
+			streams.InputAppendLine(answers.ClusterTargetNodeAddress)
+			streams.InputAppendBoolAnswer(answers.ClusterAcceptFingerprint)
+			streams.InputAppendBoolAnswer(answers.ClusterConfirmLosingData)
+		}
 	}
 	streams.InputAppendBoolAnswer(answers.WantStoragePool)
 	if !answers.WantClustering {

From 5052316b653a97122aad9301bba978b730dbba47 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Sat, 21 Oct 2017 09:04:31 +0000
Subject: [PATCH 049/116] Notify other nodes of config changes received via
 REST API

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/api_1.0.go           | 61 +++++++++++++++++++++++++++++++++++++++++-------
 lxd/api_cluster_test.go  | 14 +++++++++++
 lxd/daemon.go            |  8 ++++++-
 lxd/db/db.go             |  1 +
 lxd/endpoints/network.go |  8 +++++++
 5 files changed, 82 insertions(+), 10 deletions(-)

diff --git a/lxd/api_1.0.go b/lxd/api_1.0.go
index 4486ce5fa..36b118c3a 100644
--- a/lxd/api_1.0.go
+++ b/lxd/api_1.0.go
@@ -6,6 +6,7 @@ import (
 
 	"gopkg.in/lxc/go-lxc.v2"
 
+	lxd "github.com/lxc/lxd/client"
 	"github.com/lxc/lxd/lxd/cluster"
 	"github.com/lxc/lxd/lxd/config"
 	"github.com/lxc/lxd/lxd/db"
@@ -173,20 +174,34 @@ func api10Get(d *Daemon, r *http.Request) Response {
 }
 
 func api10Put(d *Daemon, r *http.Request) Response {
+	req := api.ServerPut{}
+	if err := shared.ReadToJSON(r.Body, &req); err != nil {
+		return BadRequest(err)
+	}
+
+	// If this is a notification from a cluster node, just run the triggers
+	// for reacting to the values that changed.
+	if r.Header.Get("User-Agent") == "lxd-cluster-notifier" {
+		changed := make(map[string]string)
+		for key, value := range req.Config {
+			changed[key] = value.(string)
+		}
+		err := doApi10UpdateTriggers(d, changed)
+		if err != nil {
+			return SmartError(err)
+		}
+		return EmptySyncResponse
+	}
+
 	render, err := daemonConfigRender(d.State())
 	if err != nil {
-		return InternalError(err)
+		return SmartError(err)
 	}
 	err = util.EtagCheck(r, render)
 	if err != nil {
 		return PreconditionFailed(err)
 	}
 
-	req := api.ServerPut{}
-	if err := shared.ReadToJSON(r.Body, &req); err != nil {
-		return BadRequest(err)
-	}
-
 	return doApi10Update(d, req, false)
 }
 
@@ -266,6 +281,35 @@ func doApi10Update(d *Daemon, req api.ServerPut, patch bool) Response {
 		}
 	}
 
+	notifier, err := cluster.NewNotifier(d.State(), d.endpoints.NetworkCert(), cluster.NotifyAlive)
+	if err != nil {
+		return SmartError(err)
+	}
+	err = notifier(func(client lxd.ContainerServer) error {
+		server, etag, err := client.GetServer()
+		if err != nil {
+			return err
+		}
+		serverPut := server.Writable()
+		serverPut.Config = make(map[string]interface{})
+		for key, value := range changed {
+			serverPut.Config[key] = value
+		}
+		return client.UpdateServer(serverPut, etag)
+	})
+	if err != nil {
+		return SmartError(err)
+	}
+
+	err = doApi10UpdateTriggers(d, changed)
+	if err != nil {
+		return SmartError(err)
+	}
+
+	return EmptySyncResponse
+}
+
+func doApi10UpdateTriggers(d *Daemon, changed map[string]string) error {
 	for key, value := range changed {
 		switch key {
 		case "core.proxy_http":
@@ -277,7 +321,7 @@ func doApi10Update(d *Daemon, req api.ServerPut, patch bool) Response {
 		case "core.macaroon.endpoint":
 			err := d.setupExternalAuthentication(value)
 			if err != nil {
-				return SmartError(err)
+				return err
 			}
 		case "images.auto_update_interval":
 			d.taskAutoUpdate.Reset()
@@ -285,8 +329,7 @@ func doApi10Update(d *Daemon, req api.ServerPut, patch bool) Response {
 			d.taskPruneImages.Reset()
 		}
 	}
-
-	return EmptySyncResponse
+	return nil
 }
 
 var api10Cmd = Command{name: "", untrustedGet: true, get: api10Get, put: api10Put, patch: api10Patch}
diff --git a/lxd/api_cluster_test.go b/lxd/api_cluster_test.go
index 27c4ac162..02f5e7fc4 100644
--- a/lxd/api_cluster_test.go
+++ b/lxd/api_cluster_test.go
@@ -68,6 +68,18 @@ func TestCluster_Join(t *testing.T) {
 		return nil
 	})
 	require.NoError(t, err)
+
+	// Changing the configuration on the second node also updates it on the
+	// first, via internal notifications.
+	server, _, err := client.GetServer()
+	require.NoError(t, err)
+	serverPut := server.Writable()
+	serverPut.Config["core.macaroon.endpoint"] = "foo.bar"
+	require.NoError(t, client.UpdateServer(serverPut, ""))
+
+	for _, daemon := range daemons {
+		assert.NotNil(t, daemon.externalAuth)
+	}
 }
 
 // If the wrong trust password is given, the join request fails.
@@ -112,12 +124,14 @@ func TestCluster_Failover(t *testing.T) {
 	require.NoError(t, daemons[0].Stop())
 
 	for i, daemon := range daemons[1:] {
+		t.Logf("Invoking GetServer API against daemon %d", i)
 		client := f.ClientUnix(daemon)
 		server, _, err := client.GetServer()
 		require.NoError(f.t, err)
 		serverPut := server.Writable()
 		serverPut.Config["core.trust_password"] = fmt.Sprintf("sekret-%d", i)
 
+		t.Logf("Invoking UpdateServer API against daemon %d", i)
 		require.NoError(f.t, client.UpdateServer(serverPut, ""))
 	}
 }
diff --git a/lxd/daemon.go b/lxd/daemon.go
index 6da4444b9..183623690 100644
--- a/lxd/daemon.go
+++ b/lxd/daemon.go
@@ -137,11 +137,17 @@ func (d *Daemon) checkTrustedClient(r *http.Request) error {
 		return err
 	}
 
+	// Add the server or cluster certificate to the list of trusted ones.
+	cert, _ := x509.ParseCertificate(d.endpoints.NetworkCert().KeyPair().Certificate[0])
+	certs := d.clientCerts
+	certs = append(certs, *cert)
+
 	for i := range r.TLS.PeerCertificates {
-		if util.CheckTrustState(*r.TLS.PeerCertificates[i], d.clientCerts) {
+		if util.CheckTrustState(*r.TLS.PeerCertificates[i], certs) {
 			return nil
 		}
 	}
+
 	return fmt.Errorf("unauthorized")
 }
 
diff --git a/lxd/db/db.go b/lxd/db/db.go
index 420257762..e21c429aa 100644
--- a/lxd/db/db.go
+++ b/lxd/db/db.go
@@ -195,6 +195,7 @@ func (c *Cluster) Transaction(f func(*ClusterTx) error) error {
 			// FIXME: we should bubble errors using errors.Wrap()
 			// instead, and check for sql.ErrBadConnection.
 			if strings.Contains(err.Error(), "bad connection") {
+				logger.Debugf("Retry failed transaction")
 				time.Sleep(time.Second)
 				continue
 			}
diff --git a/lxd/endpoints/network.go b/lxd/endpoints/network.go
index 6d6ddb42d..b965c50b3 100644
--- a/lxd/endpoints/network.go
+++ b/lxd/endpoints/network.go
@@ -31,6 +31,14 @@ func (e *Endpoints) NetworkPrivateKey() []byte {
 	return e.cert.PrivateKey()
 }
 
+// NetworkCert returns the full TLS certificate information for this endpoint.
+func (e *Endpoints) NetworkCert() *shared.CertInfo {
+	e.mu.RLock()
+	defer e.mu.RUnlock()
+
+	return e.cert
+}
+
 // NetworkAddress returns the network addresss of the network endpoint, or an
 // empty string if there's no network endpoint
 func (e *Endpoints) NetworkAddress() string {

From f6a7fc59fcbd8a1f80e5b977d649b74808c6a3d4 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Mon, 23 Oct 2017 11:09:05 +0000
Subject: [PATCH 050/116] Add networks and networks_config table to cluster db

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/db/cluster/open.go        |  2 +-
 lxd/db/cluster/schema.go      | 14 +++++++++++
 lxd/db/cluster/update.go      | 14 +++++++++++
 lxd/db/cluster/update_test.go | 57 ++++++++++++++++++++++++++++++++++++++++---
 lxd/db/schema/schema.go       |  2 +-
 5 files changed, 83 insertions(+), 6 deletions(-)

diff --git a/lxd/db/cluster/open.go b/lxd/db/cluster/open.go
index f9b3139e7..03fe5ece9 100644
--- a/lxd/db/cluster/open.go
+++ b/lxd/db/cluster/open.go
@@ -29,7 +29,7 @@ func Open(name string, dialer grpcsql.Dialer) (*sql.DB, error) {
 	if name == "" {
 		name = "db.bin"
 	}
-	db, err := sql.Open(driverName, name)
+	db, err := sql.Open(driverName, name+"?_foreign_keys=1")
 	if err != nil {
 		return nil, fmt.Errorf("cannot open cluster database: %v", err)
 	}
diff --git a/lxd/db/cluster/schema.go b/lxd/db/cluster/schema.go
index 76302fbf7..3354e36b2 100644
--- a/lxd/db/cluster/schema.go
+++ b/lxd/db/cluster/schema.go
@@ -12,6 +12,20 @@ CREATE TABLE config (
     value TEXT,
     UNIQUE (key)
 );
+CREATE TABLE networks (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    name VARCHAR(255) NOT NULL,
+    description TEXT,
+    UNIQUE (name)
+);
+CREATE TABLE networks_config (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    network_id INTEGER NOT NULL,
+    key VARCHAR(255) NOT NULL,
+    value TEXT,
+    UNIQUE (network_id, key),
+    FOREIGN KEY (network_id) REFERENCES networks (id) ON DELETE CASCADE
+);
 CREATE TABLE nodes (
     id INTEGER PRIMARY KEY,
     name TEXT NOT NULL,
diff --git a/lxd/db/cluster/update.go b/lxd/db/cluster/update.go
index 33006db06..967ac40f4 100644
--- a/lxd/db/cluster/update.go
+++ b/lxd/db/cluster/update.go
@@ -36,6 +36,20 @@ CREATE TABLE config (
     value TEXT,
     UNIQUE (key)
 );
+CREATE TABLE networks (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    name VARCHAR(255) NOT NULL,
+    description TEXT,
+    UNIQUE (name)
+);
+CREATE TABLE networks_config (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    network_id INTEGER NOT NULL,
+    key VARCHAR(255) NOT NULL,
+    value TEXT,
+    UNIQUE (network_id, key),
+    FOREIGN KEY (network_id) REFERENCES networks (id) ON DELETE CASCADE
+);
 `
 	_, err := tx.Exec(stmt)
 	return err
diff --git a/lxd/db/cluster/update_test.go b/lxd/db/cluster/update_test.go
index f637f5083..f56f63e11 100644
--- a/lxd/db/cluster/update_test.go
+++ b/lxd/db/cluster/update_test.go
@@ -5,6 +5,7 @@ import (
 	"time"
 
 	"github.com/lxc/lxd/lxd/db/cluster"
+	"github.com/stretchr/testify/assert"
 	"github.com/stretchr/testify/require"
 )
 
@@ -13,19 +14,22 @@ func TestUpdateFromV0(t *testing.T) {
 	db, err := schema.ExerciseUpdate(1, nil)
 	require.NoError(t, err)
 
-	_, err = db.Exec("INSERT INTO nodes VALUES (1, 'foo', 'blah', '1.2.3.4:666', 1, 32, ?)", time.Now())
+	stmt := "INSERT INTO nodes VALUES (1, 'foo', 'blah', '1.2.3.4:666', 1, 32, ?)"
+	_, err = db.Exec(stmt, time.Now())
 	require.NoError(t, err)
 
 	// Unique constraint on name
-	_, err = db.Exec("INSERT INTO nodes VALUES (2, 'foo', 'gosh', '5.6.7.8:666', 5, 20, ?)", time.Now())
+	stmt = "INSERT INTO nodes VALUES (2, 'foo', 'gosh', '5.6.7.8:666', 5, 20, ?)"
+	_, err = db.Exec(stmt, time.Now())
 	require.Error(t, err)
 
 	// Unique constraint on address
-	_, err = db.Exec("INSERT INTO nodes VALUES (3, 'bar', 'gasp', '1.2.3.4:666', 9, 11), ?)", time.Now())
+	stmt = "INSERT INTO nodes VALUES (3, 'bar', 'gasp', '1.2.3.4:666', 9, 11), ?)"
+	_, err = db.Exec(stmt, time.Now())
 	require.Error(t, err)
 }
 
-func TestUpdateFromV1(t *testing.T) {
+func TestUpdateFromV1_Config(t *testing.T) {
 	schema := cluster.Schema()
 	db, err := schema.ExerciseUpdate(2, nil)
 	require.NoError(t, err)
@@ -37,3 +41,48 @@ func TestUpdateFromV1(t *testing.T) {
 	_, err = db.Exec("INSERT INTO config VALUES (2, 'foo', 'gosh')")
 	require.Error(t, err)
 }
+
+func TestUpdateFromV1_Network(t *testing.T) {
+	schema := cluster.Schema()
+	db, err := schema.ExerciseUpdate(2, nil)
+	require.NoError(t, err)
+
+	_, err = db.Exec("INSERT INTO networks VALUES (1, 'foo', 'blah')")
+	require.NoError(t, err)
+
+	// Unique constraint on name.
+	_, err = db.Exec("INSERT INTO networks VALUES (2, 'foo', 'gosh')")
+	require.Error(t, err)
+}
+
+func TestUpdateFromV1_NetworkConfig(t *testing.T) {
+	schema := cluster.Schema()
+	db, err := schema.ExerciseUpdate(2, nil)
+	require.NoError(t, err)
+
+	_, err = db.Exec("INSERT INTO networks VALUES (1, 'foo', 'blah')")
+	require.NoError(t, err)
+
+	_, err = db.Exec("INSERT INTO networks_config VALUES (1, 1, 'bar', 'baz')")
+	require.NoError(t, err)
+
+	// Unique constraint on network_id/key.
+	_, err = db.Exec("INSERT INTO networks_config VALUES (2, 1, 'bar', 'egg')")
+	require.Error(t, err)
+
+	// Reference constraint on network_id.
+	_, err = db.Exec("INSERT INTO networks_config VALUES (3, , 'fuz', 'buz')")
+	require.Error(t, err)
+
+	// Cascade deletes
+	result, err := db.Exec("DELETE FROM networks")
+	require.NoError(t, err)
+	n, err := result.RowsAffected()
+	require.NoError(t, err)
+	assert.Equal(t, int64(1), n)
+	result, err = db.Exec("DELETE FROM networks_config")
+	require.NoError(t, err)
+	n, err = result.RowsAffected()
+	require.NoError(t, err)
+	assert.Equal(t, int64(0), n) // The row was already deleted by the previous query
+}
diff --git a/lxd/db/schema/schema.go b/lxd/db/schema/schema.go
index ba798984c..7015977c4 100644
--- a/lxd/db/schema/schema.go
+++ b/lxd/db/schema/schema.go
@@ -226,7 +226,7 @@ func (s *Schema) Trim(version int) []Update {
 // inspection of the resulting state.
 func (s *Schema) ExerciseUpdate(version int, hook func(*sql.DB)) (*sql.DB, error) {
 	// Create an in-memory database.
-	db, err := sql.Open("sqlite3", ":memory:")
+	db, err := sql.Open("sqlite3", ":memory:?_foreign_keys=1")
 	if err != nil {
 		return nil, fmt.Errorf("failed to open memory database: %v", err)
 	}

From 97b9e61bfb2db40bd23e28fd7943e9912309cc64 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Mon, 23 Oct 2017 12:29:33 +0000
Subject: [PATCH 051/116] Migrate networks data from node to cluster database

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/db/migration.go            | 22 ++++++++++++++++-----
 lxd/db/migration_test.go       | 22 ++++++++++++++++++++-
 lxd/db/networks.go             | 44 +++++++++++++++++++++---------------------
 lxd/db/node/schema.go          | 14 --------------
 lxd/db/node/update.go          |  3 +++
 lxd/db/node/update_test.go     | 30 +++++++++++++++++++++++++++-
 lxd/devices.go                 |  2 +-
 lxd/networks.go                | 26 ++++++++++++-------------
 lxd/networks_utils.go          | 10 +++++-----
 lxd/patches.go                 |  4 ++--
 test/includes/lxd.sh           |  8 ++++++--
 test/suites/database_update.sh |  4 ++--
 12 files changed, 121 insertions(+), 68 deletions(-)

diff --git a/lxd/db/migration.go b/lxd/db/migration.go
index af9284d10..737d3b11b 100644
--- a/lxd/db/migration.go
+++ b/lxd/db/migration.go
@@ -18,14 +18,11 @@ import (
 // (regardless of whether clustering is actually on or off).
 func LoadPreClusteringData(tx *sql.Tx) (*Dump, error) {
 	// Dump all tables.
-	tables := []string{
-		"config",
-	}
 	dump := &Dump{
 		Schema: map[string][]string{},
 		Data:   map[string][][]interface{}{},
 	}
-	for _, table := range tables {
+	for _, table := range preClusteringTables {
 		data := [][]interface{}{}
 		stmt := fmt.Sprintf("SELECT * FROM %s", table)
 		rows, err := tx.Query(stmt)
@@ -68,10 +65,19 @@ func (c *Cluster) ImportPreClusteringData(dump *Dump) error {
 		return errors.Wrap(err, "failed to start cluster database transaction")
 	}
 
-	for table, columns := range dump.Schema {
+	for _, table := range preClusteringTables {
+		columns := dump.Schema[table]
 		stmt := fmt.Sprintf("INSERT INTO %s(%s)", table, strings.Join(columns, ", "))
 		stmt += fmt.Sprintf(" VALUES %s", query.Params(len(columns)))
 		for i, row := range dump.Data[table] {
+			for i, element := range row {
+				// Convert []byte columns to string. This is safe to do since
+				// the pre-clustering schema only had TEXT fields and no BLOB.
+				bytes, ok := element.([]byte)
+				if ok {
+					row[i] = string(bytes)
+				}
+			}
 			result, err := tx.Exec(stmt, row...)
 			if err != nil {
 				return errors.Wrapf(err, "failed to insert row %d into %s", i, table)
@@ -99,3 +105,9 @@ type Dump struct {
 	// of interfaces.
 	Data map[string][][]interface{}
 }
+
+var preClusteringTables = []string{
+	"config",
+	"networks",
+	"networks_config",
+}
diff --git a/lxd/db/migration_test.go b/lxd/db/migration_test.go
index 0719a8f61..ac201ace4 100644
--- a/lxd/db/migration_test.go
+++ b/lxd/db/migration_test.go
@@ -15,10 +15,17 @@ func TestLoadPreClusteringData(t *testing.T) {
 	dump, err := db.LoadPreClusteringData(tx)
 	require.NoError(t, err)
 
+	// config
 	assert.Equal(t, []string{"id", "key", "value"}, dump.Schema["config"])
 	assert.Len(t, dump.Data["config"], 1)
 	rows := []interface{}{int64(1), []byte("core.https_address"), []byte("1.2.3.4:666")}
 	assert.Equal(t, rows, dump.Data["config"][0])
+
+	// networks
+	assert.Equal(t, []string{"id", "name", "description"}, dump.Schema["networks"])
+	assert.Len(t, dump.Data["networks"], 1)
+	rows = []interface{}{int64(1), []byte("lxcbr0"), []byte("LXD bridge")}
+	assert.Equal(t, rows, dump.Data["networks"][0])
 }
 
 func TestImportPreClusteringData(t *testing.T) {
@@ -33,13 +40,24 @@ func TestImportPreClusteringData(t *testing.T) {
 	err = cluster.ImportPreClusteringData(dump)
 	require.NoError(t, err)
 
-	cluster.Transaction(func(tx *db.ClusterTx) error {
+	// config
+	err = cluster.Transaction(func(tx *db.ClusterTx) error {
 		config, err := tx.Config()
 		require.NoError(t, err)
 		values := map[string]string{"core.https_address": "1.2.3.4:666"}
 		assert.Equal(t, values, config)
 		return nil
 	})
+	require.NoError(t, err)
+
+	// networks
+	networks, err := cluster.Networks()
+	require.NoError(t, err)
+	assert.Equal(t, []string{"lxcbr0"}, networks)
+	id, network, err := cluster.NetworkGet("lxcbr0")
+	require.NoError(t, err)
+	assert.Equal(t, int64(1), id)
+	assert.Equal(t, "true", network.Config["ipv4.nat"])
 }
 
 // Return a sql.Tx against a memory database populated with pre-clustering
@@ -54,6 +72,8 @@ func newPreClusteringTx(t *testing.T) *sql.Tx {
 	stmts := []string{
 		preClusteringNodeSchema,
 		"INSERT INTO config VALUES(1, 'core.https_address', '1.2.3.4:666')",
+		"INSERT INTO networks VALUES(1, 'lxcbr0', 'LXD bridge')",
+		"INSERT INTO networks_config VALUES(1, 1, 'ipv4.nat', 'true')",
 	}
 	for _, stmt := range stmts {
 		_, err := tx.Exec(stmt)
diff --git a/lxd/db/networks.go b/lxd/db/networks.go
index 66b3b5913..16406c2f9 100644
--- a/lxd/db/networks.go
+++ b/lxd/db/networks.go
@@ -10,12 +10,12 @@ import (
 	"github.com/lxc/lxd/shared/api"
 )
 
-func (n *Node) Networks() ([]string, error) {
+func (c *Cluster) Networks() ([]string, error) {
 	q := fmt.Sprintf("SELECT name FROM networks")
 	inargs := []interface{}{}
 	var name string
 	outfmt := []interface{}{name}
-	result, err := queryScan(n.db, q, inargs, outfmt)
+	result, err := queryScan(c.db, q, inargs, outfmt)
 	if err != nil {
 		return []string{}, err
 	}
@@ -28,19 +28,19 @@ func (n *Node) Networks() ([]string, error) {
 	return response, nil
 }
 
-func (n *Node) NetworkGet(name string) (int64, *api.Network, error) {
+func (c *Cluster) NetworkGet(name string) (int64, *api.Network, error) {
 	description := sql.NullString{}
 	id := int64(-1)
 
 	q := "SELECT id, description FROM networks WHERE name=?"
 	arg1 := []interface{}{name}
 	arg2 := []interface{}{&id, &description}
-	err := dbQueryRowScan(n.db, q, arg1, arg2)
+	err := dbQueryRowScan(c.db, q, arg1, arg2)
 	if err != nil {
 		return -1, nil, err
 	}
 
-	config, err := n.NetworkConfigGet(id)
+	config, err := c.NetworkConfigGet(id)
 	if err != nil {
 		return -1, nil, err
 	}
@@ -56,7 +56,7 @@ func (n *Node) NetworkGet(name string) (int64, *api.Network, error) {
 	return id, &network, nil
 }
 
-func (n *Node) NetworkGetInterface(devName string) (int64, *api.Network, error) {
+func (c *Cluster) NetworkGetInterface(devName string) (int64, *api.Network, error) {
 	id := int64(-1)
 	name := ""
 	value := ""
@@ -64,7 +64,7 @@ func (n *Node) NetworkGetInterface(devName string) (int64, *api.Network, error)
 	q := "SELECT networks.id, networks.name, networks_config.value FROM networks LEFT JOIN networks_config ON networks.id=networks_config.network_id WHERE networks_config.key=\"bridge.external_interfaces\""
 	arg1 := []interface{}{}
 	arg2 := []interface{}{id, name, value}
-	result, err := queryScan(n.db, q, arg1, arg2)
+	result, err := queryScan(c.db, q, arg1, arg2)
 	if err != nil {
 		return -1, nil, err
 	}
@@ -84,7 +84,7 @@ func (n *Node) NetworkGetInterface(devName string) (int64, *api.Network, error)
 		return -1, nil, fmt.Errorf("No network found for interface: %s", devName)
 	}
 
-	config, err := n.NetworkConfigGet(id)
+	config, err := c.NetworkConfigGet(id)
 	if err != nil {
 		return -1, nil, err
 	}
@@ -99,7 +99,7 @@ func (n *Node) NetworkGetInterface(devName string) (int64, *api.Network, error)
 	return id, &network, nil
 }
 
-func (n *Node) NetworkConfigGet(id int64) (map[string]string, error) {
+func (c *Cluster) NetworkConfigGet(id int64) (map[string]string, error) {
 	var key, value string
 	query := `
         SELECT
@@ -108,7 +108,7 @@ func (n *Node) NetworkConfigGet(id int64) (map[string]string, error) {
 		WHERE network_id=?`
 	inargs := []interface{}{id}
 	outfmt := []interface{}{key, value}
-	results, err := queryScan(n.db, query, inargs, outfmt)
+	results, err := queryScan(c.db, query, inargs, outfmt)
 	if err != nil {
 		return nil, fmt.Errorf("Failed to get network '%d'", id)
 	}
@@ -120,7 +120,7 @@ func (n *Node) NetworkConfigGet(id int64) (map[string]string, error) {
 		 */
 		query := "SELECT id FROM networks WHERE id=?"
 		var r int
-		results, err := queryScan(n.db, query, []interface{}{id}, []interface{}{r})
+		results, err := queryScan(c.db, query, []interface{}{id}, []interface{}{r})
 		if err != nil {
 			return nil, err
 		}
@@ -142,8 +142,8 @@ func (n *Node) NetworkConfigGet(id int64) (map[string]string, error) {
 	return config, nil
 }
 
-func (n *Node) NetworkCreate(name, description string, config map[string]string) (int64, error) {
-	tx, err := begin(n.db)
+func (c *Cluster) NetworkCreate(name, description string, config map[string]string) (int64, error) {
+	tx, err := begin(c.db)
 	if err != nil {
 		return -1, err
 	}
@@ -174,13 +174,13 @@ func (n *Node) NetworkCreate(name, description string, config map[string]string)
 	return id, nil
 }
 
-func (n *Node) NetworkUpdate(name, description string, config map[string]string) error {
-	id, _, err := n.NetworkGet(name)
+func (c *Cluster) NetworkUpdate(name, description string, config map[string]string) error {
+	id, _, err := c.NetworkGet(name)
 	if err != nil {
 		return err
 	}
 
-	tx, err := begin(n.db)
+	tx, err := begin(c.db)
 	if err != nil {
 		return err
 	}
@@ -242,13 +242,13 @@ func NetworkConfigClear(tx *sql.Tx, id int64) error {
 	return nil
 }
 
-func (n *Node) NetworkDelete(name string) error {
-	id, _, err := n.NetworkGet(name)
+func (c *Cluster) NetworkDelete(name string) error {
+	id, _, err := c.NetworkGet(name)
 	if err != nil {
 		return err
 	}
 
-	_, err = exec(n.db, "DELETE FROM networks WHERE id=?", id)
+	_, err = exec(c.db, "DELETE FROM networks WHERE id=?", id)
 	if err != nil {
 		return err
 	}
@@ -256,13 +256,13 @@ func (n *Node) NetworkDelete(name string) error {
 	return nil
 }
 
-func (n *Node) NetworkRename(oldName string, newName string) error {
-	id, _, err := n.NetworkGet(oldName)
+func (c *Cluster) NetworkRename(oldName string, newName string) error {
+	id, _, err := c.NetworkGet(oldName)
 	if err != nil {
 		return err
 	}
 
-	tx, err := begin(n.db)
+	tx, err := begin(c.db)
 	if err != nil {
 		return err
 	}
diff --git a/lxd/db/node/schema.go b/lxd/db/node/schema.go
index a9754eeaa..7a0511f92 100644
--- a/lxd/db/node/schema.go
+++ b/lxd/db/node/schema.go
@@ -105,20 +105,6 @@ CREATE TABLE images_source (
     alias VARCHAR(255) NOT NULL,
     FOREIGN KEY (image_id) REFERENCES images (id) ON DELETE CASCADE
 );
-CREATE TABLE networks (
-    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
-    name VARCHAR(255) NOT NULL,
-    description TEXT,
-    UNIQUE (name)
-);
-CREATE TABLE networks_config (
-    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
-    network_id INTEGER NOT NULL,
-    key VARCHAR(255) NOT NULL,
-    value TEXT,
-    UNIQUE (network_id, key),
-    FOREIGN KEY (network_id) REFERENCES networks (id) ON DELETE CASCADE
-);
 CREATE TABLE patches (
     id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
     name VARCHAR(255) NOT NULL,
diff --git a/lxd/db/node/update.go b/lxd/db/node/update.go
index ce1dd6b85..0866bd63d 100644
--- a/lxd/db/node/update.go
+++ b/lxd/db/node/update.go
@@ -117,6 +117,9 @@ CREATE TABLE raft_nodes (
     address TEXT NOT NULL,
     UNIQUE (address)
 );
+DELETE FROM config WHERE NOT key='core.https_address';
+DROP TABLE networks_config;
+DROP TABLE networks;
 `
 	_, err := tx.Exec(stmts)
 	return err
diff --git a/lxd/db/node/update_test.go b/lxd/db/node/update_test.go
index 980ef8bf3..36d3d2136 100644
--- a/lxd/db/node/update_test.go
+++ b/lxd/db/node/update_test.go
@@ -1,13 +1,17 @@
 package node_test
 
 import (
+	"database/sql"
 	"testing"
 
 	"github.com/lxc/lxd/lxd/db/node"
+	"github.com/lxc/lxd/lxd/db/query"
+	"github.com/lxc/lxd/shared"
+	"github.com/stretchr/testify/assert"
 	"github.com/stretchr/testify/require"
 )
 
-func TestUpdateFromV36(t *testing.T) {
+func TestUpdateFromV36_RaftNodes(t *testing.T) {
 	schema := node.Schema()
 	db, err := schema.ExerciseUpdate(37, nil)
 	require.NoError(t, err)
@@ -15,3 +19,27 @@ func TestUpdateFromV36(t *testing.T) {
 	_, err = db.Exec("INSERT INTO raft_nodes VALUES (1, '1.2.3.4:666')")
 	require.NoError(t, err)
 }
+
+// All model tables previously in the node database have been migrated to the
+// cluster database, and dropped from the node database.
+func TestUpdateFromV36_DropTables(t *testing.T) {
+	schema := node.Schema()
+	db, err := schema.ExerciseUpdate(37, nil)
+	require.NoError(t, err)
+
+	var current []string
+	query.Transaction(db, func(tx *sql.Tx) error {
+		var err error
+		stmt := "SELECT name FROM sqlite_master WHERE type='table'"
+		current, err = query.SelectStrings(tx, stmt)
+		return err
+	})
+	require.NoError(t, err)
+	deleted := []string{
+		"networks",
+		"networks_config",
+	}
+	for _, name := range deleted {
+		assert.False(t, shared.StringInSlice(name, current))
+	}
+}
diff --git a/lxd/devices.go b/lxd/devices.go
index 917b69422..99d0a7e7a 100644
--- a/lxd/devices.go
+++ b/lxd/devices.go
@@ -847,7 +847,7 @@ func deviceEventListener(s *state.State) {
 
 			logger.Debugf("Scheduler: network: %s has been added: updating network priorities", e[0])
 			deviceNetworkPriority(s, e[0])
-			networkAutoAttach(s.Node, e[0])
+			networkAutoAttach(s.Cluster, e[0])
 		case e := <-chUSB:
 			deviceUSBEvent(s, e)
 		case e := <-deviceSchedRebalance:
diff --git a/lxd/networks.go b/lxd/networks.go
index 76234965d..e12bc5619 100644
--- a/lxd/networks.go
+++ b/lxd/networks.go
@@ -32,7 +32,7 @@ func networksGet(d *Daemon, r *http.Request) Response {
 		recursion = 0
 	}
 
-	ifs, err := networkGetInterfaces(d.db)
+	ifs, err := networkGetInterfaces(d.cluster)
 	if err != nil {
 		return InternalError(err)
 	}
@@ -81,7 +81,7 @@ func networksPost(d *Daemon, r *http.Request) Response {
 		return BadRequest(fmt.Errorf("Only 'bridge' type networks can be created"))
 	}
 
-	networks, err := networkGetInterfaces(d.db)
+	networks, err := networkGetInterfaces(d.cluster)
 	if err != nil {
 		return InternalError(err)
 	}
@@ -130,7 +130,7 @@ func networksPost(d *Daemon, r *http.Request) Response {
 	}
 
 	// Create the database entry
-	_, err = d.db.NetworkCreate(req.Name, req.Description, req.Config)
+	_, err = d.cluster.NetworkCreate(req.Name, req.Description, req.Config)
 	if err != nil {
 		return InternalError(
 			fmt.Errorf("Error inserting %s into database: %s", req.Name, err))
@@ -169,7 +169,7 @@ func networkGet(d *Daemon, r *http.Request) Response {
 func doNetworkGet(d *Daemon, name string) (api.Network, error) {
 	// Get some information
 	osInfo, _ := net.InterfaceByName(name)
-	_, dbInfo, _ := d.db.NetworkGet(name)
+	_, dbInfo, _ := d.cluster.NetworkGet(name)
 
 	// Sanity check
 	if osInfo == nil && dbInfo == nil {
@@ -280,7 +280,7 @@ func networkPost(d *Daemon, r *http.Request) Response {
 	}
 
 	// Check that the name isn't already in use
-	networks, err := networkGetInterfaces(d.db)
+	networks, err := networkGetInterfaces(d.cluster)
 	if err != nil {
 		return InternalError(err)
 	}
@@ -302,7 +302,7 @@ func networkPut(d *Daemon, r *http.Request) Response {
 	name := mux.Vars(r)["name"]
 
 	// Get the existing network
-	_, dbInfo, err := d.db.NetworkGet(name)
+	_, dbInfo, err := d.cluster.NetworkGet(name)
 	if err != nil {
 		return SmartError(err)
 	}
@@ -327,7 +327,7 @@ func networkPatch(d *Daemon, r *http.Request) Response {
 	name := mux.Vars(r)["name"]
 
 	// Get the existing network
-	_, dbInfo, err := d.db.NetworkGet(name)
+	_, dbInfo, err := d.cluster.NetworkGet(name)
 	if dbInfo != nil {
 		return SmartError(err)
 	}
@@ -392,7 +392,7 @@ var networkCmd = Command{name: "networks/{name}", get: networkGet, delete: netwo
 
 // The network structs and functions
 func networkLoadByName(s *state.State, name string) (*network, error) {
-	id, dbInfo, err := s.Node.NetworkGet(name)
+	id, dbInfo, err := s.Cluster.NetworkGet(name)
 	if err != nil {
 		return nil, err
 	}
@@ -404,7 +404,7 @@ func networkLoadByName(s *state.State, name string) (*network, error) {
 
 func networkStartup(s *state.State) error {
 	// Get a list of managed networks
-	networks, err := s.Node.Networks()
+	networks, err := s.Cluster.Networks()
 	if err != nil {
 		return err
 	}
@@ -428,7 +428,7 @@ func networkStartup(s *state.State) error {
 
 func networkShutdown(s *state.State) error {
 	// Get a list of managed networks
-	networks, err := s.Node.Networks()
+	networks, err := s.Cluster.Networks()
 	if err != nil {
 		return err
 	}
@@ -509,7 +509,7 @@ func (n *network) Delete() error {
 	}
 
 	// Remove the network from the database
-	err := n.db.NetworkDelete(n.name)
+	err := n.state.Cluster.NetworkDelete(n.name)
 	if err != nil {
 		return err
 	}
@@ -544,7 +544,7 @@ func (n *network) Rename(name string) error {
 	}
 
 	// Rename the database entry
-	err := n.db.NetworkRename(n.name, name)
+	err := n.state.Cluster.NetworkRename(n.name, name)
 	if err != nil {
 		return err
 	}
@@ -1425,7 +1425,7 @@ func (n *network) Update(newNetwork api.NetworkPut) error {
 	n.description = newNetwork.Description
 
 	// Update the database
-	err = n.db.NetworkUpdate(n.name, n.description, n.config)
+	err = n.state.Cluster.NetworkUpdate(n.name, n.description, n.config)
 	if err != nil {
 		return err
 	}
diff --git a/lxd/networks_utils.go b/lxd/networks_utils.go
index d10b4b00e..1de47c57a 100644
--- a/lxd/networks_utils.go
+++ b/lxd/networks_utils.go
@@ -29,8 +29,8 @@ import (
 
 var networkStaticLock sync.Mutex
 
-func networkAutoAttach(db *db.Node, devName string) error {
-	_, dbInfo, err := db.NetworkGetInterface(devName)
+func networkAutoAttach(cluster *db.Cluster, devName string) error {
+	_, dbInfo, err := cluster.NetworkGetInterface(devName)
 	if err != nil {
 		// No match found, move on
 		return nil
@@ -77,8 +77,8 @@ func networkDetachInterface(netName string, devName string) error {
 	return nil
 }
 
-func networkGetInterfaces(db *db.Node) ([]string, error) {
-	networks, err := db.Networks()
+func networkGetInterfaces(cluster *db.Cluster) ([]string, error) {
+	networks, err := cluster.Networks()
 	if err != nil {
 		return nil, err
 	}
@@ -753,7 +753,7 @@ func networkUpdateStatic(s *state.State, networkName string) error {
 	var networks []string
 	if networkName == "" {
 		var err error
-		networks, err = s.Node.Networks()
+		networks, err = s.Cluster.Networks()
 		if err != nil {
 			return err
 		}
diff --git a/lxd/patches.go b/lxd/patches.go
index e389567c9..8d3fcf2f6 100644
--- a/lxd/patches.go
+++ b/lxd/patches.go
@@ -131,7 +131,7 @@ func patchInvalidProfileNames(name string, d *Daemon) error {
 
 func patchNetworkPermissions(name string, d *Daemon) error {
 	// Get the list of networks
-	networks, err := d.db.Networks()
+	networks, err := d.cluster.Networks()
 	if err != nil {
 		return err
 	}
@@ -2393,7 +2393,7 @@ func patchStorageZFSVolumeSize(name string, d *Daemon) error {
 
 func patchNetworkDnsmasqHosts(name string, d *Daemon) error {
 	// Get the list of networks
-	networks, err := d.db.Networks()
+	networks, err := d.cluster.Networks()
 	if err != nil {
 		return err
 	}
diff --git a/test/includes/lxd.sh b/test/includes/lxd.sh
index bf0eb230e..f02cb36d7 100644
--- a/test/includes/lxd.sh
+++ b/test/includes/lxd.sh
@@ -187,8 +187,6 @@ kill_lxd() {
         check_empty_table "${daemon_dir}/lxd.db" "containers_devices"
         check_empty_table "${daemon_dir}/lxd.db" "containers_devices_config"
         check_empty_table "${daemon_dir}/lxd.db" "containers_profiles"
-        check_empty_table "${daemon_dir}/lxd.db" "networks"
-        check_empty_table "${daemon_dir}/lxd.db" "networks_config"
         check_empty_table "${daemon_dir}/lxd.db" "images"
         check_empty_table "${daemon_dir}/lxd.db" "images_aliases"
         check_empty_table "${daemon_dir}/lxd.db" "images_properties"
@@ -201,6 +199,12 @@ kill_lxd() {
         check_empty_table "${daemon_dir}/lxd.db" "storage_pools_config"
         check_empty_table "${daemon_dir}/lxd.db" "storage_volumes"
         check_empty_table "${daemon_dir}/lxd.db" "storage_volumes_config"
+
+        echo "==> Checking for leftover cluster DB entries"
+	# FIXME: we should not use the command line sqlite client, since it's
+        #        not compatible with dqlite
+        check_empty_table "${daemon_dir}/raft/db.bin" "networks"
+        check_empty_table "${daemon_dir}/raft/db.bin" "networks_config"
     fi
 
     # teardown storage
diff --git a/test/suites/database_update.sh b/test/suites/database_update.sh
index 15189bd2f..4af380d32 100644
--- a/test/suites/database_update.sh
+++ b/test/suites/database_update.sh
@@ -9,12 +9,12 @@ test_database_update(){
   spawn_lxd "${LXD_MIGRATE_DIR}" true
 
   # Assert there are enough tables.
-  expected_tables=24
+  expected_tables=22
   tables=$(sqlite3 "${MIGRATE_DB}" ".dump" | grep -c "CREATE TABLE")
   [ "${tables}" -eq "${expected_tables}" ] || { echo "FAIL: Wrong number of tables after database migration. Found: ${tables}, expected ${expected_tables}"; false; }
 
   # There should be 15 "ON DELETE CASCADE" occurrences
-  expected_cascades=15
+  expected_cascades=14
   cascades=$(sqlite3 "${MIGRATE_DB}" ".dump" | grep -c "ON DELETE CASCADE")
   [ "${cascades}" -eq "${expected_cascades}" ] || { echo "FAIL: Wrong number of ON DELETE CASCADE foreign keys. Found: ${cascades}, exected: ${expected_cascades}"; false; }
 

From 7ed649ec5801a4697b0816b5fd3135b19a428090 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Tue, 24 Oct 2017 11:06:28 +0000
Subject: [PATCH 052/116] Configure networks when joining an existing cluster
 node

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 client/interfaces.go  |  1 +
 client/lxd_cluster.go | 24 +++++++++++++++++++-
 lxd/api_cluster.go    | 34 ++++++++++++++++++++++++++-
 lxd/main_init.go      | 63 ++++++++++++++++++++++++++++++++++++++++++++-------
 lxd/main_init_test.go | 21 +++++++++++++++++
 shared/api/cluster.go |  9 ++++++++
 6 files changed, 142 insertions(+), 10 deletions(-)

diff --git a/client/interfaces.go b/client/interfaces.go
index a0856fe9d..eaca03a79 100644
--- a/client/interfaces.go
+++ b/client/interfaces.go
@@ -161,6 +161,7 @@ type ContainerServer interface {
 	RenameStoragePoolVolume(pool string, volType string, name string, volume api.StorageVolumePost) (err error)
 
 	// Cluster functions ("cluster" API extensions)
+	GetCluster(password string) (cluster *api.Cluster, err error)
 	BootstrapCluster(name string) (op *Operation, err error)
 	AcceptNode(targetPassword, name, address string, schema, api int) (info *api.ClusterNodeAccepted, err error)
 	JoinCluster(targetAddress, targetPassword string, targetCert []byte, name string) (op *Operation, err error)
diff --git a/client/lxd_cluster.go b/client/lxd_cluster.go
index 4c8de7bad..7d153cbb5 100644
--- a/client/lxd_cluster.go
+++ b/client/lxd_cluster.go
@@ -1,6 +1,28 @@
 package lxd
 
-import "github.com/lxc/lxd/shared/api"
+import (
+	"fmt"
+
+	"github.com/lxc/lxd/shared/api"
+)
+
+// GetCluster returns information about a cluster.
+//
+// If this client is not trusted, the password must be supplied.
+func (r *ProtocolLXD) GetCluster(password string) (*api.Cluster, error) {
+	cluster := &api.Cluster{}
+	path := "/cluster"
+	if password != "" {
+		path += fmt.Sprintf("?password=%s", password)
+	}
+	_, err := r.queryStruct("GET", path, nil, "", &cluster)
+
+	if err != nil {
+		return nil, err
+	}
+
+	return cluster, nil
+}
 
 // BootstrapCluster requests to bootstrap a new cluster.
 func (r *ProtocolLXD) BootstrapCluster(name string) (*Operation, error) {
diff --git a/lxd/api_cluster.go b/lxd/api_cluster.go
index c00d763c5..28b3c46f7 100644
--- a/lxd/api_cluster.go
+++ b/lxd/api_cluster.go
@@ -15,7 +15,39 @@ import (
 	"github.com/pkg/errors"
 )
 
-var clusterCmd = Command{name: "cluster", untrustedPost: true, post: clusterPost}
+var clusterCmd = Command{name: "cluster", untrustedGet: true, get: clusterGet, untrustedPost: true, post: clusterPost}
+
+func clusterGet(d *Daemon, r *http.Request) Response {
+	// If the client is not trusted, check that it's presenting the trust
+	// password.
+	trusted := d.checkTrustedClient(r) == nil
+	if !trusted {
+		secret, err := cluster.ConfigGetString(d.cluster, "core.trust_password")
+		if err != nil {
+			return SmartError(err)
+		}
+		if util.PasswordCheck(secret, r.FormValue("password")) != nil {
+			return Forbidden
+		}
+	}
+
+	cluster := api.Cluster{}
+
+	// Fill the Networks attribute
+	networks, err := d.cluster.Networks()
+	if err != nil {
+		return SmartError(err)
+	}
+	for _, name := range networks {
+		_, network, err := d.cluster.NetworkGet(name)
+		if err != nil {
+			return SmartError(err)
+		}
+		cluster.Networks = append(cluster.Networks, *network)
+	}
+
+	return SyncResponse(true, cluster)
+}
 
 func clusterPost(d *Daemon, r *http.Request) Response {
 	req := api.ClusterPost{}
diff --git a/lxd/main_init.go b/lxd/main_init.go
index 131b17810..2116c02e6 100644
--- a/lxd/main_init.go
+++ b/lxd/main_init.go
@@ -6,6 +6,7 @@ import (
 	"net"
 	"os"
 	"os/exec"
+	"sort"
 	"strconv"
 	"strings"
 	"syscall"
@@ -168,6 +169,25 @@ func (cmd *CmdInit) fillDataInteractive(data *cmdInitData, client lxd.ContainerS
 			Port:          clustering.Port,
 			TrustPassword: clustering.TrustPassword,
 		}
+		if clustering.TargetAddress != "" {
+			// Client parameters to connect to the target cluster node.
+			args := &lxd.ConnectionArgs{
+				TLSServerCert: string(clustering.TargetCert),
+			}
+			url := fmt.Sprintf("https://%s", clustering.TargetAddress)
+			client, err := lxd.ConnectLXD(url, args)
+			if err != nil {
+				return err
+			}
+			cluster, err := client.GetCluster(clustering.TargetPassword)
+			if err != nil {
+				return err
+			}
+			data.Networks, err = cmd.askClusteringNetworks(cluster)
+			if err != nil {
+				return err
+			}
+		}
 	}
 
 	_, err = exec.LookPath("dnsmasq")
@@ -426,13 +446,6 @@ func (cmd *CmdInit) apply(client lxd.ContainerServer, data *cmdInitData) error {
 		return cmd.initConfig(client, data.Config)
 	})
 
-	// Cluster changers
-	if data.Cluster.Name != "" {
-		changers = append(changers, func() (reverter, error) {
-			return cmd.initCluster(client, data.Cluster)
-		})
-	}
-
 	// Storage pool changers
 	for i := range data.Pools {
 		pool := data.Pools[i] // Local variable for the closure
@@ -457,6 +470,13 @@ func (cmd *CmdInit) apply(client lxd.ContainerServer, data *cmdInitData) error {
 		})
 	}
 
+	// Cluster changers
+	if data.Cluster.Name != "" {
+		changers = append(changers, func() (reverter, error) {
+			return cmd.initCluster(client, data.Cluster)
+		})
+	}
+
 	// Apply all changes. If anything goes wrong at any iteration
 	// of the loop, we'll try to revert any change performed in
 	// earlier iterations.
@@ -800,7 +820,7 @@ join:
 		goto join
 	}
 	digest := shared.CertFingerprint(certificate)
-	askFingerprint := fmt.Sprintf("Remote node fingerprint: %s ok (y/n)? ", digest)
+	askFingerprint := fmt.Sprintf("Remote node fingerprint: %s ok (yes/no)? ", digest)
 	if !cmd.Context.AskBool(askFingerprint, "") {
 		return nil, fmt.Errorf("Cluster certificate NACKed by user")
 	}
@@ -816,6 +836,33 @@ join:
 	return params, nil
 }
 
+func (cmd *CmdInit) askClusteringNetworks(cluster *api.Cluster) ([]api.NetworksPost, error) {
+	networks := make([]api.NetworksPost, len(cluster.Networks))
+	for i, network := range cluster.Networks {
+		if !network.Managed {
+			continue
+		}
+		post := api.NetworksPost{}
+		post.Name = network.Name
+		post.Config = network.Config
+		post.Type = network.Type
+		post.Managed = true
+		// Sort config keys to get a stable ordering (expecially for tests)
+		keys := []string{}
+		for key := range post.Config {
+			keys = append(keys, key)
+		}
+		sort.Strings(keys)
+		for _, key := range keys {
+			question := fmt.Sprintf(
+				`Enter local value for key "%s" of network "%s": `, key, post.Name)
+			post.Config[key] = cmd.Context.AskString(question, "", nil)
+		}
+		networks[i] = post
+	}
+	return networks, nil
+}
+
 // Ask if the user wants to create a new storage pool, and return
 // the relevant parameters if so.
 func (cmd *CmdInit) askStorage(client lxd.ContainerServer, existingPools []string, availableBackends []string) (*cmdInitStorageParams, error) {
diff --git a/lxd/main_init_test.go b/lxd/main_init_test.go
index 13af4cf48..6eef12c19 100644
--- a/lxd/main_init_test.go
+++ b/lxd/main_init_test.go
@@ -161,6 +161,17 @@ func (suite *cmdInitTestSuite) TestCmdInit_InteractiveClusteringJoin() {
 	f := clusterFixture{t: suite.T()}
 	f.FormCluster([]*Daemon{leader})
 
+	network := api.NetworksPost{
+		Name:    "mybr",
+		Type:    "bridge",
+		Managed: true,
+	}
+	network.Config = map[string]string{
+		"ipv4.nat": "true",
+	}
+	client := f.ClientUnix(leader)
+	client.CreateNetwork(network)
+
 	suite.command.PasswordReader = func(int) ([]byte, error) {
 		return []byte("sekret"), nil
 	}
@@ -174,6 +185,12 @@ func (suite *cmdInitTestSuite) TestCmdInit_InteractiveClusteringJoin() {
 		ClusterTargetNodeAddress: leader.endpoints.NetworkAddress(),
 		ClusterAcceptFingerprint: true,
 		ClusterConfirmLosingData: true,
+		ClusterConfig: []string{
+			"10.23.189.2/24", // ipv4.address
+			"true",           // ipv4.nat
+			"aaaa:bbbb:cccc:dddd::1/64", // ipv6.address
+			"true", // ipv6.nat
+		},
 	}
 	answers.Render(suite.streams)
 
@@ -752,6 +769,7 @@ type cmdInitAnswers struct {
 	ClusterTargetNodeAddress string
 	ClusterAcceptFingerprint bool
 	ClusterConfirmLosingData bool
+	ClusterConfig            []string
 	WantStoragePool          bool
 	WantAvailableOverNetwork bool
 	BindToAddress            string
@@ -775,6 +793,9 @@ func (answers *cmdInitAnswers) Render(streams *cmd.MemoryStreams) {
 			streams.InputAppendLine(answers.ClusterTargetNodeAddress)
 			streams.InputAppendBoolAnswer(answers.ClusterAcceptFingerprint)
 			streams.InputAppendBoolAnswer(answers.ClusterConfirmLosingData)
+			for _, value := range answers.ClusterConfig {
+				streams.InputAppendLine(value)
+			}
 		}
 	}
 	streams.InputAppendBoolAnswer(answers.WantStoragePool)
diff --git a/shared/api/cluster.go b/shared/api/cluster.go
index 4f54d2ada..5000394c5 100644
--- a/shared/api/cluster.go
+++ b/shared/api/cluster.go
@@ -1,5 +1,10 @@
 package api
 
+// Cluster represents high-level information about a LXD cluster.
+type Cluster struct {
+	Networks []Network
+}
+
 // ClusterPost represents the fields required to bootstrap or join a LXD
 // cluster.
 //
@@ -16,12 +21,16 @@ type ClusterPost struct {
 }
 
 // ClusterNodeAccepted represents the response of a request to join a cluster.
+//
+// API extension: cluster
 type ClusterNodeAccepted struct {
 	RaftNodes  []RaftNode `json:"raft_nodes" yaml:"raft_nodes"`
 	PrivateKey []byte     `json:"private_key" yaml:"private_key"`
 }
 
 // RaftNode represents the a LXD node that is part of the dqlite raft cluster.
+//
+// API extension: cluster
 type RaftNode struct {
 	ID      int64  `json:"id" yaml:"id"`
 	Address string `json:"address" yaml:"address"`

From 14304aed91984873df399a678666afca358a3453 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Tue, 24 Oct 2017 13:20:27 +0000
Subject: [PATCH 053/116] Add a node_id column to the networks_config table

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/api_1.0.go                |  8 ++++++--
 lxd/cluster/membership.go     | 10 +++++-----
 lxd/cluster/notify.go         |  2 +-
 lxd/cluster/notify_test.go    | 25 ++++++++++++++++++++-----
 lxd/db/cluster/open.go        | 28 +++++++++++++++++++++++-----
 lxd/db/cluster/open_test.go   |  1 +
 lxd/db/cluster/query.go       |  7 ++++---
 lxd/db/cluster/schema.go      |  6 ++++--
 lxd/db/cluster/update.go      |  6 ++++--
 lxd/db/cluster/update_test.go | 36 ++++++++++++++++++++++++++++++------
 lxd/db/db.go                  | 24 ++++++++++++++++++++++++
 lxd/db/migration.go           | 11 ++++++++---
 lxd/db/networks.go            | 27 +++++++++++++++------------
 lxd/db/node.go                | 16 ++++++++++++++++
 lxd/db/node_test.go           | 10 +++++-----
 15 files changed, 166 insertions(+), 51 deletions(-)

diff --git a/lxd/api_1.0.go b/lxd/api_1.0.go
index 36b118c3a..27f123712 100644
--- a/lxd/api_1.0.go
+++ b/lxd/api_1.0.go
@@ -324,9 +324,13 @@ func doApi10UpdateTriggers(d *Daemon, changed map[string]string) error {
 				return err
 			}
 		case "images.auto_update_interval":
-			d.taskAutoUpdate.Reset()
+			if !d.os.MockMode {
+				d.taskAutoUpdate.Reset()
+			}
 		case "images.remote_cache_expiry":
-			d.taskPruneImages.Reset()
+			if !d.os.MockMode {
+				d.taskPruneImages.Reset()
+			}
 		}
 	}
 	return nil
diff --git a/lxd/cluster/membership.go b/lxd/cluster/membership.go
index e923f0e05..d635ad9c0 100644
--- a/lxd/cluster/membership.go
+++ b/lxd/cluster/membership.go
@@ -61,7 +61,7 @@ func Bootstrap(state *state.State, gateway *Gateway, name string) error {
 		return err
 	}
 
-	// Insert ourselves into the nodes table.
+	// Update our own entry in the nodes table.
 	err = state.Cluster.Transaction(func(tx *db.ClusterTx) error {
 		// Make sure cluster database state is in order.
 		err := membershipCheckClusterStateForBootstrapOrJoin(tx)
@@ -70,9 +70,9 @@ func Bootstrap(state *state.State, gateway *Gateway, name string) error {
 		}
 
 		// Add ourselves to the nodes table.
-		_, err = tx.NodeAdd(name, address)
+		err = tx.NodeUpdate(1, name, address)
 		if err != nil {
-			return errors.Wrap(err, "failed to insert cluster node")
+			return errors.Wrap(err, "failed to update cluster node")
 		}
 
 		return nil
@@ -320,7 +320,7 @@ func membershipCheckClusterStateForBootstrapOrJoin(tx *db.ClusterTx) error {
 	if err != nil {
 		return errors.Wrap(err, "failed to fetch current cluster nodes")
 	}
-	if len(nodes) > 0 {
+	if len(nodes) != 1 {
 		return fmt.Errorf("inconsistent state: found leftover entries in nodes")
 	}
 	return nil
@@ -332,7 +332,7 @@ func membershipCheckClusterStateForAccept(tx *db.ClusterTx, name string, address
 	if err != nil {
 		return errors.Wrap(err, "failed to fetch current cluster nodes")
 	}
-	if len(nodes) == 0 {
+	if len(nodes) == 1 && nodes[0].Address == "0.0.0.0" {
 		return fmt.Errorf("clustering not enabled")
 	}
 
diff --git a/lxd/cluster/notify.go b/lxd/cluster/notify.go
index 860692111..cb5a69a76 100644
--- a/lxd/cluster/notify.go
+++ b/lxd/cluster/notify.go
@@ -49,7 +49,7 @@ func NewNotifier(state *state.State, cert *shared.CertInfo, policy NotifierPolic
 			return err
 		}
 		for _, node := range nodes {
-			if node.Address == address {
+			if node.Address == address || node.Address == "0.0.0.0" {
 				continue // Exclude ourselves
 			}
 			if node.IsDown() {
diff --git a/lxd/cluster/notify_test.go b/lxd/cluster/notify_test.go
index 409d04d8b..1dd2fea9a 100644
--- a/lxd/cluster/notify_test.go
+++ b/lxd/cluster/notify_test.go
@@ -32,16 +32,26 @@ func TestNewNotifier(t *testing.T) {
 	notifier, err := cluster.NewNotifier(state, cert, cluster.NotifyAll)
 	require.NoError(t, err)
 
-	i := 0
+	peers := make(chan string, 2)
 	hook := func(client lxd.ContainerServer) error {
 		server, _, err := client.GetServer()
 		require.NoError(t, err)
-		assert.Equal(t, f.Address(i+1), server.Config["core.https_address"])
-		i++
+		peers <- server.Config["core.https_address"].(string)
 		return nil
 	}
 	assert.NoError(t, notifier(hook))
-	assert.Equal(t, 2, i)
+
+	addresses := make([]string, 2)
+	for i := range addresses {
+		select {
+		case addresses[i] = <-peers:
+		default:
+		}
+	}
+	require.NoError(t, err)
+	for i := range addresses {
+		assert.True(t, shared.StringInSlice(f.Address(i+1), addresses))
+	}
 }
 
 // Creating a new notifier fails if the policy is set to NotifyAll and one of
@@ -108,7 +118,12 @@ func (h *notifyFixtures) Nodes(cert *shared.CertInfo, n int) func() {
 		for i := 0; i < n; i++ {
 			name := strconv.Itoa(i)
 			address := servers[i].Listener.Addr().String()
-			_, err := tx.NodeAdd(name, address)
+			var err error
+			if i == 0 {
+				err = tx.NodeUpdate(int64(1), name, address)
+			} else {
+				_, err = tx.NodeAdd(name, address)
+			}
 			require.NoError(h.t, err)
 		}
 		return nil
diff --git a/lxd/db/cluster/open.go b/lxd/db/cluster/open.go
index 03fe5ece9..bcbb3a727 100644
--- a/lxd/db/cluster/open.go
+++ b/lxd/db/cluster/open.go
@@ -57,12 +57,16 @@ func EnsureSchema(db *sql.DB, address string) (bool, error) {
 		}
 
 		// Check if we're clustered
-		n, err := selectNodesCount(tx)
+		n, err := selectUnclusteredNodesCount(tx)
 		if err != nil {
-			return errors.Wrap(err, "failed to fetch current nodes count")
+			return errors.Wrap(err, "failed to fetch unclustered nodes count")
 		}
-		if n == 0 {
-			return nil // Nothing to do.
+		if n > 1 {
+			// This should never happen, since we only add nodes
+			// with valid addresses, but check it for sanity.
+			return fmt.Errorf("found more than one unclustered nodes")
+		} else if n == 1 {
+			address = "0.0.0.0" // We're not clustered
 		}
 
 		// Update the schema and api_extension columns of ourselves.
@@ -83,13 +87,27 @@ func EnsureSchema(db *sql.DB, address string) (bool, error) {
 	schema := Schema()
 	schema.Check(check)
 
-	_, err := schema.Ensure(db)
+	initial, err := schema.Ensure(db)
 	if someNodesAreBehind {
 		return false, nil
 	}
 	if err != nil {
 		return false, err
 	}
+
+	// When creating a database from scratch, insert an entry for node
+	// 1. This is needed for referential integrity with other tables.
+	if initial == 0 {
+		stmt := `
+INSERT INTO nodes(id, name, address, schema, api_extensions) VALUES(1, 'none', '0.0.0.0', ?, ?)
+`
+		_, err := db.Exec(stmt, SchemaVersion, apiExtensions)
+		if err != nil {
+			return false, err
+		}
+
+	}
+
 	return true, err
 }
 
diff --git a/lxd/db/cluster/open_test.go b/lxd/db/cluster/open_test.go
index f858d7b35..5a83789ca 100644
--- a/lxd/db/cluster/open_test.go
+++ b/lxd/db/cluster/open_test.go
@@ -16,6 +16,7 @@ import (
 // If the node is not clustered, the schema updates works normally.
 func TestEnsureSchema_NoClustered(t *testing.T) {
 	db := newDB(t)
+	addNode(t, db, "0.0.0.0", 1, 1)
 	ready, err := cluster.EnsureSchema(db, "1.2.3.4:666")
 	assert.True(t, ready)
 	assert.NoError(t, err)
diff --git a/lxd/db/cluster/query.go b/lxd/db/cluster/query.go
index 286ffe2db..dda6b63c5 100644
--- a/lxd/db/cluster/query.go
+++ b/lxd/db/cluster/query.go
@@ -27,9 +27,10 @@ func updateNodeVersion(tx *sql.Tx, address string, apiExtensions int) error {
 	return nil
 }
 
-// Return the number of rows in the nodes table.
-func selectNodesCount(tx *sql.Tx) (int, error) {
-	return query.Count(tx, "nodes", "")
+// Return the number of rows in the nodes table that have their address column
+// set to '0.0.0.0'.
+func selectUnclusteredNodesCount(tx *sql.Tx) (int, error) {
+	return query.Count(tx, "nodes", "address='0.0.0.0'")
 }
 
 // Return a slice of binary integer tuples. Each tuple contains the schema
diff --git a/lxd/db/cluster/schema.go b/lxd/db/cluster/schema.go
index 3354e36b2..c6f3f647f 100644
--- a/lxd/db/cluster/schema.go
+++ b/lxd/db/cluster/schema.go
@@ -21,10 +21,12 @@ CREATE TABLE networks (
 CREATE TABLE networks_config (
     id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
     network_id INTEGER NOT NULL,
+    node_id INTEGER,
     key VARCHAR(255) NOT NULL,
     value TEXT,
-    UNIQUE (network_id, key),
-    FOREIGN KEY (network_id) REFERENCES networks (id) ON DELETE CASCADE
+    UNIQUE (network_id, node_id, key),
+    FOREIGN KEY (network_id) REFERENCES networks (id) ON DELETE CASCADE,
+    FOREIGN KEY (node_id) REFERENCES nodes (id) ON DELETE CASCADE
 );
 CREATE TABLE nodes (
     id INTEGER PRIMARY KEY,
diff --git a/lxd/db/cluster/update.go b/lxd/db/cluster/update.go
index 967ac40f4..4bf1df4ae 100644
--- a/lxd/db/cluster/update.go
+++ b/lxd/db/cluster/update.go
@@ -45,10 +45,12 @@ CREATE TABLE networks (
 CREATE TABLE networks_config (
     id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
     network_id INTEGER NOT NULL,
+    node_id INTEGER,
     key VARCHAR(255) NOT NULL,
     value TEXT,
-    UNIQUE (network_id, key),
-    FOREIGN KEY (network_id) REFERENCES networks (id) ON DELETE CASCADE
+    UNIQUE (network_id, node_id, key),
+    FOREIGN KEY (network_id) REFERENCES networks (id) ON DELETE CASCADE,
+    FOREIGN KEY (node_id) REFERENCES nodes (id) ON DELETE CASCADE
 );
 `
 	_, err := tx.Exec(stmt)
diff --git a/lxd/db/cluster/update_test.go b/lxd/db/cluster/update_test.go
index f56f63e11..a0bfa9eb6 100644
--- a/lxd/db/cluster/update_test.go
+++ b/lxd/db/cluster/update_test.go
@@ -60,26 +60,50 @@ func TestUpdateFromV1_NetworkConfig(t *testing.T) {
 	db, err := schema.ExerciseUpdate(2, nil)
 	require.NoError(t, err)
 
+	_, err = db.Exec("INSERT INTO nodes VALUES (1, 'one', '', '1.1.1.1', 666, 999, ?)", time.Now())
+	require.NoError(t, err)
+
+	_, err = db.Exec("INSERT INTO nodes VALUES (2, 'two', '', '2.2.2.2', 666, 999, ?)", time.Now())
+	require.NoError(t, err)
+
 	_, err = db.Exec("INSERT INTO networks VALUES (1, 'foo', 'blah')")
 	require.NoError(t, err)
 
-	_, err = db.Exec("INSERT INTO networks_config VALUES (1, 1, 'bar', 'baz')")
+	_, err = db.Exec("INSERT INTO networks_config VALUES (1, 1, 1, 'bar', 'baz')")
 	require.NoError(t, err)
 
-	// Unique constraint on network_id/key.
-	_, err = db.Exec("INSERT INTO networks_config VALUES (2, 1, 'bar', 'egg')")
+	// Unique constraint on network_id/node_id/key.
+	_, err = db.Exec("INSERT INTO networks_config VALUES (2, 1, 1, 'bar', 'egg')")
 	require.Error(t, err)
+	_, err = db.Exec("INSERT INTO networks_config VALUES (3, 1, 2, 'bar', 'egg')")
+	require.NoError(t, err)
 
 	// Reference constraint on network_id.
-	_, err = db.Exec("INSERT INTO networks_config VALUES (3, , 'fuz', 'buz')")
+	_, err = db.Exec("INSERT INTO networks_config VALUES (4, 2, 1, 'fuz', 'buz')")
+	require.Error(t, err)
+
+	// Reference constraint on node_id.
+	_, err = db.Exec("INSERT INTO networks_config VALUES (5, 1, 3, 'fuz', 'buz')")
 	require.Error(t, err)
 
-	// Cascade deletes
-	result, err := db.Exec("DELETE FROM networks")
+	// Cascade deletes on node_id
+	result, err := db.Exec("DELETE FROM nodes WHERE id=2")
 	require.NoError(t, err)
 	n, err := result.RowsAffected()
 	require.NoError(t, err)
 	assert.Equal(t, int64(1), n)
+	result, err = db.Exec("UPDATE networks_config SET value='yuk'")
+	require.NoError(t, err)
+	n, err = result.RowsAffected()
+	require.NoError(t, err)
+	assert.Equal(t, int64(1), n) // Only one row was affected, since the other got deleted
+
+	// Cascade deletes on network_id
+	result, err = db.Exec("DELETE FROM networks")
+	require.NoError(t, err)
+	n, err = result.RowsAffected()
+	require.NoError(t, err)
+	assert.Equal(t, int64(1), n)
 	result, err = db.Exec("DELETE FROM networks_config")
 	require.NoError(t, err)
 	n, err = result.RowsAffected()
diff --git a/lxd/db/db.go b/lxd/db/db.go
index e21c429aa..985e7b2f2 100644
--- a/lxd/db/db.go
+++ b/lxd/db/db.go
@@ -145,6 +145,7 @@ func (n *Node) Begin() (*sql.Tx, error) {
 // Cluster mediates access to LXD's data stored in the cluster dqlite database.
 type Cluster struct {
 	db *sql.DB // Handle to the cluster dqlite database, gated behind gRPC SQL.
+	id int64   // Node ID of this LXD instance.
 }
 
 // OpenCluster creates a new Cluster object for interacting with the dqlite
@@ -174,6 +175,29 @@ func OpenCluster(name string, dialer grpcsql.Dialer, address string) (*Cluster,
 		db: db,
 	}
 
+	// Figure out the ID of this node.
+	err = cluster.Transaction(func(tx *ClusterTx) error {
+		nodes, err := tx.Nodes()
+		if err != nil {
+			return errors.Wrap(err, "failed to fetch nodes")
+		}
+		if len(nodes) == 1 && nodes[0].Address == "0.0.0.0" {
+			// We're not clustered
+			cluster.id = 1
+			return nil
+		}
+		for _, node := range nodes {
+			if node.Address == address {
+				cluster.id = node.ID
+				return nil
+			}
+		}
+		return fmt.Errorf("no node registered with address %s", address)
+	})
+	if err != nil {
+		return nil, err
+	}
+
 	return cluster, nil
 }
 
diff --git a/lxd/db/migration.go b/lxd/db/migration.go
index 737d3b11b..084c14b85 100644
--- a/lxd/db/migration.go
+++ b/lxd/db/migration.go
@@ -66,9 +66,6 @@ func (c *Cluster) ImportPreClusteringData(dump *Dump) error {
 	}
 
 	for _, table := range preClusteringTables {
-		columns := dump.Schema[table]
-		stmt := fmt.Sprintf("INSERT INTO %s(%s)", table, strings.Join(columns, ", "))
-		stmt += fmt.Sprintf(" VALUES %s", query.Params(len(columns)))
 		for i, row := range dump.Data[table] {
 			for i, element := range row {
 				// Convert []byte columns to string. This is safe to do since
@@ -78,6 +75,14 @@ func (c *Cluster) ImportPreClusteringData(dump *Dump) error {
 					row[i] = string(bytes)
 				}
 			}
+			columns := dump.Schema[table]
+			switch table {
+			case "networks_config":
+				columns = append(columns, "node_id")
+				row = append(row, int64(1))
+			}
+			stmt := fmt.Sprintf("INSERT INTO %s(%s)", table, strings.Join(columns, ", "))
+			stmt += fmt.Sprintf(" VALUES %s", query.Params(len(columns)))
 			result, err := tx.Exec(stmt, row...)
 			if err != nil {
 				return errors.Wrapf(err, "failed to insert row %d into %s", i, table)
diff --git a/lxd/db/networks.go b/lxd/db/networks.go
index 16406c2f9..a2cae8a15 100644
--- a/lxd/db/networks.go
+++ b/lxd/db/networks.go
@@ -61,8 +61,8 @@ func (c *Cluster) NetworkGetInterface(devName string) (int64, *api.Network, erro
 	name := ""
 	value := ""
 
-	q := "SELECT networks.id, networks.name, networks_config.value FROM networks LEFT JOIN networks_config ON networks.id=networks_config.network_id WHERE networks_config.key=\"bridge.external_interfaces\""
-	arg1 := []interface{}{}
+	q := "SELECT networks.id, networks.name, networks_config.value FROM networks LEFT JOIN networks_config ON networks.id=networks_config.network_id WHERE networks_config.key=\"bridge.external_interfaces\" AND networks_config.node_id=?"
+	arg1 := []interface{}{c.id}
 	arg2 := []interface{}{id, name, value}
 	result, err := queryScan(c.db, q, arg1, arg2)
 	if err != nil {
@@ -105,8 +105,9 @@ func (c *Cluster) NetworkConfigGet(id int64) (map[string]string, error) {
         SELECT
             key, value
         FROM networks_config
-		WHERE network_id=?`
-	inargs := []interface{}{id}
+		WHERE network_id=?
+                AND node_id=?`
+	inargs := []interface{}{id, c.id}
 	outfmt := []interface{}{key, value}
 	results, err := queryScan(c.db, query, inargs, outfmt)
 	if err != nil {
@@ -160,7 +161,7 @@ func (c *Cluster) NetworkCreate(name, description string, config map[string]stri
 		return -1, err
 	}
 
-	err = NetworkConfigAdd(tx, id, config)
+	err = NetworkConfigAdd(tx, id, c.id, config)
 	if err != nil {
 		tx.Rollback()
 		return -1, err
@@ -191,13 +192,13 @@ func (c *Cluster) NetworkUpdate(name, description string, config map[string]stri
 		return err
 	}
 
-	err = NetworkConfigClear(tx, id)
+	err = NetworkConfigClear(tx, id, c.id)
 	if err != nil {
 		tx.Rollback()
 		return err
 	}
 
-	err = NetworkConfigAdd(tx, id, config)
+	err = NetworkConfigAdd(tx, id, c.id, config)
 	if err != nil {
 		tx.Rollback()
 		return err
@@ -211,8 +212,8 @@ func NetworkUpdateDescription(tx *sql.Tx, id int64, description string) error {
 	return err
 }
 
-func NetworkConfigAdd(tx *sql.Tx, id int64, config map[string]string) error {
-	str := fmt.Sprintf("INSERT INTO networks_config (network_id, key, value) VALUES(?, ?, ?)")
+func NetworkConfigAdd(tx *sql.Tx, networkID, nodeID int64, config map[string]string) error {
+	str := fmt.Sprintf("INSERT INTO networks_config (network_id, node_id, key, value) VALUES(?, ?, ?, ?)")
 	stmt, err := tx.Prepare(str)
 	defer stmt.Close()
 	if err != nil {
@@ -224,7 +225,7 @@ func NetworkConfigAdd(tx *sql.Tx, id int64, config map[string]string) error {
 			continue
 		}
 
-		_, err = stmt.Exec(id, k, v)
+		_, err = stmt.Exec(networkID, nodeID, k, v)
 		if err != nil {
 			return err
 		}
@@ -233,8 +234,10 @@ func NetworkConfigAdd(tx *sql.Tx, id int64, config map[string]string) error {
 	return nil
 }
 
-func NetworkConfigClear(tx *sql.Tx, id int64) error {
-	_, err := tx.Exec("DELETE FROM networks_config WHERE network_id=?", id)
+func NetworkConfigClear(tx *sql.Tx, networkID, nodeID int64) error {
+	_, err := tx.Exec(
+		"DELETE FROM networks_config WHERE network_id=? AND node_id=?",
+		networkID, nodeID)
 	if err != nil {
 		return err
 	}
diff --git a/lxd/db/node.go b/lxd/db/node.go
index 96fd70bf7..506eb44a6 100644
--- a/lxd/db/node.go
+++ b/lxd/db/node.go
@@ -64,6 +64,22 @@ func (c *ClusterTx) NodeAdd(name string, address string) (int64, error) {
 	return query.UpsertObject(c.tx, "nodes", columns, values)
 }
 
+// NodeUpdate updates the name an address of a node.
+func (c *ClusterTx) NodeUpdate(id int64, name string, address string) error {
+	result, err := c.tx.Exec("UPDATE nodes SET name=?, address=? WHERE id=?", name, address, id)
+	if err != nil {
+		return err
+	}
+	n, err := result.RowsAffected()
+	if err != nil {
+		return err
+	}
+	if n != 1 {
+		return fmt.Errorf("query updated %d rows instead of 1", n)
+	}
+	return nil
+}
+
 // NodeHeartbeat updates the heartbeat column of the node with the given address.
 func (c *ClusterTx) NodeHeartbeat(address string, heartbeat time.Time) error {
 	stmt := "UPDATE nodes SET heartbeat=? WHERE address=?"
diff --git a/lxd/db/node_test.go b/lxd/db/node_test.go
index 2dbdf0efc..c213580ae 100644
--- a/lxd/db/node_test.go
+++ b/lxd/db/node_test.go
@@ -18,13 +18,13 @@ func TestNodeAdd(t *testing.T) {
 
 	id, err := tx.NodeAdd("buzz", "1.2.3.4:666")
 	require.NoError(t, err)
-	assert.Equal(t, int64(1), id)
+	assert.Equal(t, int64(2), id)
 
 	nodes, err := tx.Nodes()
 	require.NoError(t, err)
-	require.Len(t, nodes, 1)
+	require.Len(t, nodes, 2)
 
-	node := nodes[0]
+	node := nodes[1]
 	assert.Equal(t, "buzz", node.Name)
 	assert.Equal(t, "1.2.3.4:666", node.Address)
 	assert.Equal(t, cluster.SchemaVersion, node.Schema)
@@ -45,8 +45,8 @@ func TestNodeHeartbeat(t *testing.T) {
 
 	nodes, err := tx.Nodes()
 	require.NoError(t, err)
-	require.Len(t, nodes, 1)
+	require.Len(t, nodes, 2)
 
-	node := nodes[0]
+	node := nodes[1]
 	assert.True(t, node.IsDown())
 }

From cfa6406c86e58c12e6c573c1d35f4b8482101220 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Tue, 24 Oct 2017 19:32:49 +0000
Subject: [PATCH 054/116] Update networks_config table with joining node's data

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/cluster/heartbeat_test.go  |  9 ++++++-
 lxd/cluster/membership.go      | 37 +++++++++++++++++++++++++---
 lxd/cluster/membership_test.go | 29 ++++++++++++++--------
 lxd/db/cluster/schema.go       |  6 ++---
 lxd/db/cluster/update.go       |  6 ++---
 lxd/db/db.go                   | 14 +++++++++--
 lxd/db/networks.go             | 56 +++++++++++++++++++++++++++++++++++++++---
 lxd/db/node.go                 | 36 ++++++++++++++++++++++-----
 lxd/db/node_test.go            |  2 +-
 9 files changed, 163 insertions(+), 32 deletions(-)

diff --git a/lxd/cluster/heartbeat_test.go b/lxd/cluster/heartbeat_test.go
index d129264d5..1e78496f3 100644
--- a/lxd/cluster/heartbeat_test.go
+++ b/lxd/cluster/heartbeat_test.go
@@ -172,7 +172,6 @@ func (f *heartbeatFixture) node() (*state.State, *cluster.Gateway, string) {
 
 	mux := http.NewServeMux()
 	server := newServer(cert, mux)
-	f.cleanups = append(f.cleanups, server.Close)
 
 	for path, handler := range gateway.HandlerFuncs() {
 		mux.HandleFunc(path, handler)
@@ -182,6 +181,11 @@ func (f *heartbeatFixture) node() (*state.State, *cluster.Gateway, string) {
 	mf := &membershipFixtures{t: f.t, state: state}
 	mf.NetworkAddress(address)
 
+	var err error
+	require.NoError(f.t, state.Cluster.Close())
+	state.Cluster, err = db.OpenCluster("db.bin", gateway.Dialer(), address)
+	require.NoError(f.t, err)
+
 	f.gateways[len(f.gateways)] = gateway
 	f.states[gateway] = state
 	f.servers[gateway] = server
@@ -194,4 +198,7 @@ func (f *heartbeatFixture) Cleanup() {
 	for i := len(f.cleanups) - 1; i >= 0; i-- {
 		f.cleanups[i]()
 	}
+	for _, server := range f.servers {
+		server.Close()
+	}
 }
diff --git a/lxd/cluster/membership.go b/lxd/cluster/membership.go
index d635ad9c0..e763f110a 100644
--- a/lxd/cluster/membership.go
+++ b/lxd/cluster/membership.go
@@ -228,6 +228,18 @@ func Join(state *state.State, gateway *Gateway, cert *shared.CertInfo, name stri
 		return err
 	}
 
+	// Get the local config keys for the cluster networks. It assumes that
+	// the local networks match the cluster networks, if not an error will
+	// be returned.
+	var networks map[string]map[string]string
+	err = state.Cluster.Transaction(func(tx *db.ClusterTx) error {
+		networks, err = tx.NetworkConfigs()
+		return err
+	})
+	if err != nil {
+		return err
+	}
+
 	// Shutdown the gateway and wipe any raft data. This will trash any
 	// gRPC SQL connection against our in-memory dqlite driver and shutdown
 	// the associated raft instance.
@@ -273,10 +285,29 @@ func Join(state *state.State, gateway *Gateway, cert *shared.CertInfo, name stri
 	// Make sure we can actually connect to the cluster database through
 	// the network endpoint. This also makes the Go SQL pooling system
 	// invalidate the old connection, so new queries will be executed over
-	// the new gRPC network connection.
+	// the new gRPC network connection. Also, update the networks table
+	// with our local configuration.
 	err = state.Cluster.Transaction(func(tx *db.ClusterTx) error {
-		_, err := tx.Nodes()
-		return err
+		node, err := tx.Node(address)
+		if err != nil {
+			return errors.Wrap(err, "failed to get ID of joining node")
+		}
+		state.Cluster.ID(node.ID)
+		ids, err := tx.NetworkIDs()
+		if err != nil {
+			return errors.Wrap(err, "failed to get cluster network IDs")
+		}
+		for name, id := range ids {
+			config, ok := networks[name]
+			if !ok {
+				return fmt.Errorf("joining node has no config for network %s", name)
+			}
+			err := tx.NetworkConfigAdd(id, node.ID, config)
+			if err != nil {
+				return errors.Wrap(err, "failed to add joining node's network config")
+			}
+		}
+		return nil
 	})
 	if err != nil {
 		return errors.Wrap(err, "cluster database initialization failed")
diff --git a/lxd/cluster/membership_test.go b/lxd/cluster/membership_test.go
index 83b8a5576..fd4489ac0 100644
--- a/lxd/cluster/membership_test.go
+++ b/lxd/cluster/membership_test.go
@@ -242,45 +242,54 @@ func TestAccept_MaxRaftNodes(t *testing.T) {
 
 func TestJoin(t *testing.T) {
 	// Setup a target node running as leader of a cluster.
+	targetCert := shared.TestingKeyPair()
+	targetMux := http.NewServeMux()
+	targetServer := newServer(targetCert, targetMux)
+	defer targetServer.Close()
+
 	targetState, cleanup := state.NewTestState(t)
 	defer cleanup()
 
-	targetCert := shared.TestingKeyPair()
 	targetGateway := newGateway(t, targetState.Node, targetCert)
 	defer targetGateway.Shutdown()
 
-	targetMux := http.NewServeMux()
-	targetServer := newServer(targetCert, targetMux)
-	defer targetServer.Close()
-
 	for path, handler := range targetGateway.HandlerFuncs() {
 		targetMux.HandleFunc(path, handler)
 	}
 
 	targetAddress := targetServer.Listener.Addr().String()
+	var err error
+	require.NoError(t, targetState.Cluster.Close())
+	targetState.Cluster, err = db.OpenCluster("db.bin", targetGateway.Dialer(), targetAddress)
+	require.NoError(t, err)
 	targetF := &membershipFixtures{t: t, state: targetState}
 	targetF.NetworkAddress(targetAddress)
 
-	err := cluster.Bootstrap(targetState, targetGateway, "buzz")
+	err = cluster.Bootstrap(targetState, targetGateway, "buzz")
 	require.NoError(t, err)
 
 	// Setup a joining node
+	mux := http.NewServeMux()
+	server := newServer(targetCert, mux)
+	defer server.Close()
+
 	state, cleanup := state.NewTestState(t)
 	defer cleanup()
 
 	cert := shared.TestingAltKeyPair()
 	gateway := newGateway(t, state.Node, cert)
-	defer gateway.Shutdown()
 
-	mux := http.NewServeMux()
-	server := newServer(cert, mux)
-	defer server.Close()
+	defer gateway.Shutdown()
 
 	for path, handler := range gateway.HandlerFuncs() {
 		mux.HandleFunc(path, handler)
 	}
 
 	address := server.Listener.Addr().String()
+	require.NoError(t, state.Cluster.Close())
+	state.Cluster, err = db.OpenCluster("db.bin", gateway.Dialer(), address)
+	require.NoError(t, err)
+
 	f := &membershipFixtures{t: t, state: state}
 	f.NetworkAddress(address)
 
diff --git a/lxd/db/cluster/schema.go b/lxd/db/cluster/schema.go
index c6f3f647f..015509b66 100644
--- a/lxd/db/cluster/schema.go
+++ b/lxd/db/cluster/schema.go
@@ -8,13 +8,13 @@ package cluster
 const freshSchema = `
 CREATE TABLE config (
     id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
-    key VARCHAR(255) NOT NULL,
+    key TEXT NOT NULL,
     value TEXT,
     UNIQUE (key)
 );
 CREATE TABLE networks (
     id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
-    name VARCHAR(255) NOT NULL,
+    name TEXT NOT NULL,
     description TEXT,
     UNIQUE (name)
 );
@@ -22,7 +22,7 @@ CREATE TABLE networks_config (
     id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
     network_id INTEGER NOT NULL,
     node_id INTEGER,
-    key VARCHAR(255) NOT NULL,
+    key TEXT NOT NULL,
     value TEXT,
     UNIQUE (network_id, node_id, key),
     FOREIGN KEY (network_id) REFERENCES networks (id) ON DELETE CASCADE,
diff --git a/lxd/db/cluster/update.go b/lxd/db/cluster/update.go
index 4bf1df4ae..8af0b8d0f 100644
--- a/lxd/db/cluster/update.go
+++ b/lxd/db/cluster/update.go
@@ -32,13 +32,13 @@ func updateFromV1(tx *sql.Tx) error {
 	stmt := `
 CREATE TABLE config (
     id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
-    key VARCHAR(255) NOT NULL,
+    key TEXT NOT NULL,
     value TEXT,
     UNIQUE (key)
 );
 CREATE TABLE networks (
     id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
-    name VARCHAR(255) NOT NULL,
+    name TEXT NOT NULL,
     description TEXT,
     UNIQUE (name)
 );
@@ -46,7 +46,7 @@ CREATE TABLE networks_config (
     id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
     network_id INTEGER NOT NULL,
     node_id INTEGER,
-    key VARCHAR(255) NOT NULL,
+    key TEXT NOT NULL,
     value TEXT,
     UNIQUE (network_id, node_id, key),
     FOREIGN KEY (network_id) REFERENCES networks (id) ON DELETE CASCADE,
diff --git a/lxd/db/db.go b/lxd/db/db.go
index 985e7b2f2..4f3a95a34 100644
--- a/lxd/db/db.go
+++ b/lxd/db/db.go
@@ -183,7 +183,7 @@ func OpenCluster(name string, dialer grpcsql.Dialer, address string) (*Cluster,
 		}
 		if len(nodes) == 1 && nodes[0].Address == "0.0.0.0" {
 			// We're not clustered
-			cluster.id = 1
+			cluster.ID(1)
 			return nil
 		}
 		for _, node := range nodes {
@@ -218,7 +218,9 @@ func (c *Cluster) Transaction(f func(*ClusterTx) error) error {
 		if err != nil {
 			// FIXME: we should bubble errors using errors.Wrap()
 			// instead, and check for sql.ErrBadConnection.
-			if strings.Contains(err.Error(), "bad connection") {
+			badConnection := strings.Contains(err.Error(), "bad connection")
+			leadershipLost := strings.Contains(err.Error(), "leadership lost")
+			if badConnection || leadershipLost {
 				logger.Debugf("Retry failed transaction")
 				time.Sleep(time.Second)
 				continue
@@ -229,6 +231,14 @@ func (c *Cluster) Transaction(f func(*ClusterTx) error) error {
 	return err
 }
 
+// ID sets the the node ID associated with this cluster instance. It's used for
+// backward-compatibility of all db-related APIs that were written before
+// clustering and don't accept a node ID, so in those cases we automatically
+// use this value as implict node ID.
+func (c *Cluster) ID(id int64) {
+	c.id = id
+}
+
 // Close the database facade.
 func (c *Cluster) Close() error {
 	return c.db.Close()
diff --git a/lxd/db/networks.go b/lxd/db/networks.go
index a2cae8a15..193950b2f 100644
--- a/lxd/db/networks.go
+++ b/lxd/db/networks.go
@@ -7,9 +7,59 @@ import (
 
 	_ "github.com/mattn/go-sqlite3"
 
+	"github.com/lxc/lxd/lxd/db/query"
 	"github.com/lxc/lxd/shared/api"
 )
 
+// NetworkConfigs returns a map associating each network name to its config
+// values.
+func (c *ClusterTx) NetworkConfigs() (map[string]map[string]string, error) {
+	names, err := query.SelectStrings(c.tx, "SELECT name FROM networks")
+	if err != nil {
+		return nil, err
+	}
+	networks := make(map[string]map[string]string, len(names))
+	for _, name := range names {
+		table := "networks_config JOIN networks ON networks.id=networks_config.network_id"
+		config, err := query.SelectConfig(c.tx, table, fmt.Sprintf("networks.name='%s'", name))
+		if err != nil {
+			return nil, err
+		}
+		networks[name] = config
+	}
+	return networks, nil
+}
+
+// NetworkIDs returns a map associating each network name to its ID.
+func (c *ClusterTx) NetworkIDs() (map[string]int64, error) {
+	networks := []struct {
+		id   int64
+		name string
+	}{}
+	dest := func(i int) []interface{} {
+		networks = append(networks, struct {
+			id   int64
+			name string
+		}{})
+		return []interface{}{&networks[i].id, &networks[i].name}
+
+	}
+	err := query.SelectObjects(c.tx, dest, "SELECT id, name FROM networks")
+	if err != nil {
+		return nil, err
+	}
+	ids := map[string]int64{}
+	for _, network := range networks {
+		ids[network.name] = network.id
+	}
+	return ids, nil
+}
+
+// NetworkConfigAdd adds a new entry in the networks_config table
+func (c *ClusterTx) NetworkConfigAdd(networkID, nodeID int64, config map[string]string) error {
+	return networkConfigAdd(c.tx, networkID, nodeID, config)
+}
+
 func (c *Cluster) Networks() ([]string, error) {
 	q := fmt.Sprintf("SELECT name FROM networks")
 	inargs := []interface{}{}
@@ -161,7 +211,7 @@ func (c *Cluster) NetworkCreate(name, description string, config map[string]stri
 		return -1, err
 	}
 
-	err = NetworkConfigAdd(tx, id, c.id, config)
+	err = networkConfigAdd(tx, id, c.id, config)
 	if err != nil {
 		tx.Rollback()
 		return -1, err
@@ -198,7 +248,7 @@ func (c *Cluster) NetworkUpdate(name, description string, config map[string]stri
 		return err
 	}
 
-	err = NetworkConfigAdd(tx, id, c.id, config)
+	err = networkConfigAdd(tx, id, c.id, config)
 	if err != nil {
 		tx.Rollback()
 		return err
@@ -212,7 +262,7 @@ func NetworkUpdateDescription(tx *sql.Tx, id int64, description string) error {
 	return err
 }
 
-func NetworkConfigAdd(tx *sql.Tx, networkID, nodeID int64, config map[string]string) error {
+func networkConfigAdd(tx *sql.Tx, networkID, nodeID int64, config map[string]string) error {
 	str := fmt.Sprintf("INSERT INTO networks_config (network_id, node_id, key, value) VALUES(?, ?, ?, ?)")
 	stmt, err := tx.Prepare(str)
 	defer stmt.Close()
diff --git a/lxd/db/node.go b/lxd/db/node.go
index 506eb44a6..e029d1b31 100644
--- a/lxd/db/node.go
+++ b/lxd/db/node.go
@@ -27,10 +27,33 @@ func (n NodeInfo) IsDown() bool {
 	return n.Heartbeat.Before(time.Now().Add(-20 * time.Second))
 }
 
+// Node returns the node with the given network address.
+func (c *ClusterTx) Node(address string) (NodeInfo, error) {
+	null := NodeInfo{}
+	nodes, err := c.nodes("address=?", address)
+	if err != nil {
+		return null, err
+	}
+	switch len(nodes) {
+	case 0:
+		return null, NoSuchObjectError
+	case 1:
+		return nodes[0], nil
+	default:
+		return null, fmt.Errorf("more than one node matches")
+	}
+}
+
 // Nodes returns all LXD nodes part of the cluster.
 //
-// If this LXD instance is not clustered, an empty list is returned.
+// If this LXD instance is not clustered, a list with a single node whose
+// address is 0.0.0.0 is returned.
 func (c *ClusterTx) Nodes() ([]NodeInfo, error) {
+	return c.nodes("")
+}
+
+// Nodes returns all LXD nodes part of the cluster.
+func (c *ClusterTx) nodes(where string, args ...interface{}) ([]NodeInfo, error) {
 	nodes := []NodeInfo{}
 	dest := func(i int) []interface{} {
 		nodes = append(nodes, NodeInfo{})
@@ -45,11 +68,12 @@ func (c *ClusterTx) Nodes() ([]NodeInfo, error) {
 		}
 	}
 	stmt := `
-SELECT id, name, address, description, schema, api_extensions, heartbeat
-  FROM nodes
-    ORDER BY id
-`
-	err := query.SelectObjects(c.tx, dest, stmt)
+SELECT id, name, address, description, schema, api_extensions, heartbeat FROM nodes `
+	if where != "" {
+		stmt += fmt.Sprintf("WHERE %s ", where)
+	}
+	stmt += "ORDER BY id"
+	err := query.SelectObjects(c.tx, dest, stmt, args...)
 	if err != nil {
 		return nil, errors.Wrap(err, "failed to fecth nodes")
 	}
diff --git a/lxd/db/node_test.go b/lxd/db/node_test.go
index c213580ae..f59a08d1f 100644
--- a/lxd/db/node_test.go
+++ b/lxd/db/node_test.go
@@ -24,7 +24,7 @@ func TestNodeAdd(t *testing.T) {
 	require.NoError(t, err)
 	require.Len(t, nodes, 2)
 
-	node := nodes[1]
+	node, err := tx.Node("1.2.3.4:666")
 	assert.Equal(t, "buzz", node.Name)
 	assert.Equal(t, "1.2.3.4:666", node.Address)
 	assert.Equal(t, cluster.SchemaVersion, node.Schema)

From 31cef3ac3470a852656f52f8418a2de5258136ee Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Tue, 24 Oct 2017 20:07:23 +0000
Subject: [PATCH 055/116] Add storage-related tables to cluster db

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/db/cluster/schema.go      | 36 +++++++++++++++++++++++++++++++
 lxd/db/cluster/update.go      | 36 +++++++++++++++++++++++++++++++
 lxd/db/cluster/update_test.go | 50 +++++++++++++++++++++++++++++++------------
 3 files changed, 108 insertions(+), 14 deletions(-)

diff --git a/lxd/db/cluster/schema.go b/lxd/db/cluster/schema.go
index 015509b66..f6a8c1e1e 100644
--- a/lxd/db/cluster/schema.go
+++ b/lxd/db/cluster/schema.go
@@ -39,6 +39,42 @@ CREATE TABLE nodes (
     UNIQUE (name),
     UNIQUE (address)
 );
+CREATE TABLE storage_pools (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    name TEXT NOT NULL,
+    driver TEXT NOT NULL,
+    description TEXT,
+    UNIQUE (name)
+);
+CREATE TABLE storage_pools_config (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    storage_pool_id INTEGER NOT NULL,
+    node_id INTEGER,
+    key TEXT NOT NULL,
+    value TEXT,
+    UNIQUE (storage_pool_id, node_id, key),
+    FOREIGN KEY (storage_pool_id) REFERENCES storage_pools (id) ON DELETE CASCADE,
+    FOREIGN KEY (node_id) REFERENCES nodes (id) ON DELETE CASCADE
+);
+CREATE TABLE storage_volumes (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    name TEXT NOT NULL,
+    storage_pool_id INTEGER NOT NULL,
+    type INTEGER NOT NULL,
+    description TEXT,
+    UNIQUE (storage_pool_id, name, type),
+    FOREIGN KEY (storage_pool_id) REFERENCES storage_pools (id) ON DELETE CASCADE
+);
+CREATE TABLE storage_volumes_config (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    storage_volume_id INTEGER NOT NULL,
+    node_id INTEGER NOT NULL,
+    key TEXT NOT NULL,
+    value TEXT,
+    UNIQUE (storage_volume_id, node_id, key),
+    FOREIGN KEY (storage_volume_id) REFERENCES storage_volumes (id) ON DELETE CASCADE,
+    FOREIGN KEY (node_id) REFERENCES nodes (id) ON DELETE CASCADE
+);
 
 INSERT INTO schema (version, updated_at) VALUES (2, strftime("%s"))
 `
diff --git a/lxd/db/cluster/update.go b/lxd/db/cluster/update.go
index 8af0b8d0f..f63d88533 100644
--- a/lxd/db/cluster/update.go
+++ b/lxd/db/cluster/update.go
@@ -52,6 +52,42 @@ CREATE TABLE networks_config (
     FOREIGN KEY (network_id) REFERENCES networks (id) ON DELETE CASCADE,
     FOREIGN KEY (node_id) REFERENCES nodes (id) ON DELETE CASCADE
 );
+CREATE TABLE storage_pools (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    name TEXT NOT NULL,
+    driver TEXT NOT NULL,
+    description TEXT,
+    UNIQUE (name)
+);
+CREATE TABLE storage_pools_config (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    storage_pool_id INTEGER NOT NULL,
+    node_id INTEGER,
+    key TEXT NOT NULL,
+    value TEXT,
+    UNIQUE (storage_pool_id, node_id, key),
+    FOREIGN KEY (storage_pool_id) REFERENCES storage_pools (id) ON DELETE CASCADE,
+    FOREIGN KEY (node_id) REFERENCES nodes (id) ON DELETE CASCADE
+);
+CREATE TABLE storage_volumes (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    name TEXT NOT NULL,
+    storage_pool_id INTEGER NOT NULL,
+    type INTEGER NOT NULL,
+    description TEXT,
+    UNIQUE (storage_pool_id, name, type),
+    FOREIGN KEY (storage_pool_id) REFERENCES storage_pools (id) ON DELETE CASCADE
+);
+CREATE TABLE storage_volumes_config (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    storage_volume_id INTEGER NOT NULL,
+    node_id INTEGER NOT NULL,
+    key TEXT NOT NULL,
+    value TEXT,
+    UNIQUE (storage_volume_id, node_id, key),
+    FOREIGN KEY (storage_volume_id) REFERENCES storage_volumes (id) ON DELETE CASCADE,
+    FOREIGN KEY (node_id) REFERENCES nodes (id) ON DELETE CASCADE
+);
 `
 	_, err := tx.Exec(stmt)
 	return err
diff --git a/lxd/db/cluster/update_test.go b/lxd/db/cluster/update_test.go
index a0bfa9eb6..7c65fa9da 100644
--- a/lxd/db/cluster/update_test.go
+++ b/lxd/db/cluster/update_test.go
@@ -1,6 +1,8 @@
 package cluster_test
 
 import (
+	"database/sql"
+	"fmt"
 	"testing"
 	"time"
 
@@ -55,7 +57,24 @@ func TestUpdateFromV1_Network(t *testing.T) {
 	require.Error(t, err)
 }
 
-func TestUpdateFromV1_NetworkConfig(t *testing.T) {
+func TestUpdateFromV1_ConfigTables(t *testing.T) {
+	testConfigTable(t, "networks", func(db *sql.DB) {
+		_, err := db.Exec("INSERT INTO networks VALUES (1, 'foo', 'blah')")
+		require.NoError(t, err)
+	})
+	testConfigTable(t, "storage_pools", func(db *sql.DB) {
+		_, err := db.Exec("INSERT INTO storage_pools VALUES (1, 'default', 'dir', '')")
+		require.NoError(t, err)
+	})
+	testConfigTable(t, "storage_volumes", func(db *sql.DB) {
+		_, err := db.Exec("INSERT INTO storage_pools VALUES (1, 'default', 'dir', '')")
+		require.NoError(t, err)
+		_, err = db.Exec("INSERT INTO storage_volumes VALUES (1, 'dev', 1, 1, '')")
+		require.NoError(t, err)
+	})
+}
+
+func testConfigTable(t *testing.T, table string, setup func(db *sql.DB)) {
 	schema := cluster.Schema()
 	db, err := schema.ExerciseUpdate(2, nil)
 	require.NoError(t, err)
@@ -66,24 +85,27 @@ func TestUpdateFromV1_NetworkConfig(t *testing.T) {
 	_, err = db.Exec("INSERT INTO nodes VALUES (2, 'two', '', '2.2.2.2', 666, 999, ?)", time.Now())
 	require.NoError(t, err)
 
-	_, err = db.Exec("INSERT INTO networks VALUES (1, 'foo', 'blah')")
-	require.NoError(t, err)
+	stmt := func(format string) string {
+		return fmt.Sprintf(format, table)
+	}
+
+	setup(db)
 
-	_, err = db.Exec("INSERT INTO networks_config VALUES (1, 1, 1, 'bar', 'baz')")
+	_, err = db.Exec(stmt("INSERT INTO %s_config VALUES (1, 1, 1, 'bar', 'baz')"))
 	require.NoError(t, err)
 
-	// Unique constraint on network_id/node_id/key.
-	_, err = db.Exec("INSERT INTO networks_config VALUES (2, 1, 1, 'bar', 'egg')")
+	// Unique constraint on <entity>_id/node_id/key.
+	_, err = db.Exec(stmt("INSERT INTO %s_config VALUES (2, 1, 1, 'bar', 'egg')"))
 	require.Error(t, err)
-	_, err = db.Exec("INSERT INTO networks_config VALUES (3, 1, 2, 'bar', 'egg')")
+	_, err = db.Exec(stmt("INSERT INTO %s_config VALUES (3, 1, 2, 'bar', 'egg')"))
 	require.NoError(t, err)
 
-	// Reference constraint on network_id.
-	_, err = db.Exec("INSERT INTO networks_config VALUES (4, 2, 1, 'fuz', 'buz')")
+	// Reference constraint on <entity>_id.
+	_, err = db.Exec(stmt("INSERT INTO %s_config VALUES (4, 2, 1, 'fuz', 'buz')"))
 	require.Error(t, err)
 
 	// Reference constraint on node_id.
-	_, err = db.Exec("INSERT INTO networks_config VALUES (5, 1, 3, 'fuz', 'buz')")
+	_, err = db.Exec(stmt("INSERT INTO %s_config VALUES (5, 1, 3, 'fuz', 'buz')"))
 	require.Error(t, err)
 
 	// Cascade deletes on node_id
@@ -92,19 +114,19 @@ func TestUpdateFromV1_NetworkConfig(t *testing.T) {
 	n, err := result.RowsAffected()
 	require.NoError(t, err)
 	assert.Equal(t, int64(1), n)
-	result, err = db.Exec("UPDATE networks_config SET value='yuk'")
+	result, err = db.Exec(stmt("UPDATE %s_config SET value='yuk'"))
 	require.NoError(t, err)
 	n, err = result.RowsAffected()
 	require.NoError(t, err)
 	assert.Equal(t, int64(1), n) // Only one row was affected, since the other got deleted
 
-	// Cascade deletes on network_id
-	result, err = db.Exec("DELETE FROM networks")
+	// Cascade deletes on <entity>_id
+	result, err = db.Exec(stmt("DELETE FROM %s"))
 	require.NoError(t, err)
 	n, err = result.RowsAffected()
 	require.NoError(t, err)
 	assert.Equal(t, int64(1), n)
-	result, err = db.Exec("DELETE FROM networks_config")
+	result, err = db.Exec(stmt("DELETE FROM %s_config"))
 	require.NoError(t, err)
 	n, err = result.RowsAffected()
 	require.NoError(t, err)

From b25ed67c9fc2915c13166ae53d2f14eb8ff4f879 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Thu, 26 Oct 2017 09:47:49 +0000
Subject: [PATCH 056/116] Add internal "lxd sql" command to run arbitrary SQL
 queries

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/api_internal.go      | 69 ++++++++++++++++++++++++++++++++++++++++
 lxd/db/db.go             |  8 +++++
 lxd/db/db_export_test.go |  9 ------
 lxd/main.go              |  1 +
 lxd/main_sql.go          | 83 ++++++++++++++++++++++++++++++++++++++++++++++++
 5 files changed, 161 insertions(+), 9 deletions(-)
 delete mode 100644 lxd/db/db_export_test.go
 create mode 100644 lxd/main_sql.go

diff --git a/lxd/api_internal.go b/lxd/api_internal.go
index b5a568d8d..eaea3a906 100644
--- a/lxd/api_internal.go
+++ b/lxd/api_internal.go
@@ -28,6 +28,7 @@ var apiInternal = []Command{
 	internalContainerOnStartCmd,
 	internalContainerOnStopCmd,
 	internalContainersCmd,
+	internalSQLCmd,
 }
 
 func internalReady(d *Daemon, r *http.Request) Response {
@@ -91,10 +92,78 @@ func internalContainerOnStop(d *Daemon, r *http.Request) Response {
 	return EmptySyncResponse
 }
 
+type internalSQLPost struct {
+	Query string `json:"query" yaml:"query"`
+}
+
+type internalSQLResult struct {
+	Columns      []string        `json:"columns" yaml:"columns"`
+	Rows         [][]interface{} `json:"rows" yaml:"rows"`
+	RowsAffected int64           `json:"rows_affected" yaml:"rows_affected"`
+}
+
+func internalSQL(d *Daemon, r *http.Request) Response {
+	req := &internalSQLPost{}
+	// Parse the request.
+	err := json.NewDecoder(r.Body).Decode(&req)
+	if err != nil {
+		return BadRequest(err)
+	}
+	db := d.cluster.DB()
+	result := internalSQLResult{}
+	if strings.HasPrefix(req.Query, "SELECT") {
+		rows, err := db.Query(req.Query)
+		if err != nil {
+			return SmartError(err)
+		}
+		defer rows.Close()
+		result.Columns, err = rows.Columns()
+		if err != nil {
+			return SmartError(err)
+		}
+		for rows.Next() {
+			row := make([]interface{}, len(result.Columns))
+			rowPointers := make([]interface{}, len(result.Columns))
+			for i := range row {
+				rowPointers[i] = &row[i]
+			}
+			err := rows.Scan(rowPointers...)
+			if err != nil {
+				return SmartError(err)
+			}
+			for i, column := range row {
+				// Convert bytes to string. This is safe as
+				// long as we don't have any BLOB column type.
+				data, ok := column.([]byte)
+				if ok {
+					row[i] = string(data)
+				}
+			}
+			result.Rows = append(result.Rows, row)
+		}
+		err = rows.Err()
+		if err != nil {
+			return SmartError(err)
+		}
+	} else {
+		r, err := db.Exec(req.Query)
+		if err != nil {
+			return SmartError(err)
+		}
+		result.RowsAffected, err = r.RowsAffected()
+		if err != nil {
+			return SmartError(err)
+		}
+
+	}
+	return SyncResponse(true, result)
+}
+
 var internalShutdownCmd = Command{name: "shutdown", put: internalShutdown}
 var internalReadyCmd = Command{name: "ready", put: internalReady, get: internalWaitReady}
 var internalContainerOnStartCmd = Command{name: "containers/{id}/onstart", get: internalContainerOnStart}
 var internalContainerOnStopCmd = Command{name: "containers/{id}/onstop", get: internalContainerOnStop}
+var internalSQLCmd = Command{name: "sql", post: internalSQL}
 
 func slurpBackupFile(path string) (*backupFile, error) {
 	data, err := ioutil.ReadFile(path)
diff --git a/lxd/db/db.go b/lxd/db/db.go
index 4f3a95a34..76e20ed10 100644
--- a/lxd/db/db.go
+++ b/lxd/db/db.go
@@ -244,6 +244,14 @@ func (c *Cluster) Close() error {
 	return c.db.Close()
 }
 
+// DB returns the low level database handle to the cluster database.
+//
+// FIXME: this is used for compatibility with some legacy code, and should be
+//        dropped once there are no call sites left.
+func (c *Cluster) DB() *sql.DB {
+	return c.db
+}
+
 // UpdateSchemasDotGo updates the schema.go files in the local/ and cluster/
 // sub-packages.
 func UpdateSchemasDotGo() error {
diff --git a/lxd/db/db_export_test.go b/lxd/db/db_export_test.go
deleted file mode 100644
index a975c9081..000000000
--- a/lxd/db/db_export_test.go
+++ /dev/null
@@ -1,9 +0,0 @@
-package db
-
-import "database/sql"
-
-// DB returns the low level database handle to the cluster gRPC SQL database
-// handler. Used by tests for introspecing the database with raw SQL.
-func (c *Cluster) DB() *sql.DB {
-	return c.db
-}
diff --git a/lxd/main.go b/lxd/main.go
index 35df502be..48429830f 100644
--- a/lxd/main.go
+++ b/lxd/main.go
@@ -65,4 +65,5 @@ var subcommands = map[string]SubCommand{
 	"forkexec":           cmdForkExec,
 	"netcat":             cmdNetcat,
 	"migratedumpsuccess": cmdMigrateDumpSuccess,
+	"sql":                cmdSQL,
 }
diff --git a/lxd/main_sql.go b/lxd/main_sql.go
new file mode 100644
index 000000000..e721633bb
--- /dev/null
+++ b/lxd/main_sql.go
@@ -0,0 +1,83 @@
+package main
+
+import (
+	"encoding/json"
+	"fmt"
+	"strconv"
+	"strings"
+	"time"
+
+	lxd "github.com/lxc/lxd/client"
+)
+
+func cmdSQL(args *Args) error {
+	if len(args.Params) != 1 {
+		return fmt.Errorf("Invalid arguments")
+	}
+	query := args.Params[0]
+
+	// Connect to LXD
+	c, err := lxd.ConnectLXDUnix("", nil)
+	if err != nil {
+		return err
+	}
+
+	data := internalSQLPost{
+		Query: query,
+	}
+	response, _, err := c.RawQuery("POST", "/internal/sql", data, "")
+	if err != nil {
+		return err
+	}
+
+	result := internalSQLResult{}
+	err = json.Unmarshal(response.Metadata, &result)
+	if err != nil {
+		return err
+	}
+	if strings.HasPrefix(query, "SELECT") {
+		// Print results in tabular format
+		widths := make([]int, len(result.Columns))
+		for i, column := range result.Columns {
+			widths[i] = len(column)
+		}
+		for _, row := range result.Rows {
+			for i, v := range row {
+				width := 10
+				switch v := v.(type) {
+				case string:
+					width = len(v)
+				case int:
+					width = 6
+				case int64:
+					width = 6
+				case time.Time:
+					width = 12
+				}
+				if width > widths[i] {
+					widths[i] = width
+				}
+			}
+		}
+		format := "|"
+		separator := "+"
+		columns := make([]interface{}, len(result.Columns))
+		for i, column := range result.Columns {
+			format += " %-" + strconv.Itoa(widths[i]) + "v |"
+			columns[i] = column
+			separator += strings.Repeat("-", widths[i]+2) + "+"
+		}
+		format += "\n"
+		separator += "\n"
+		fmt.Printf(separator)
+		fmt.Printf(fmt.Sprintf(format, columns...))
+		fmt.Printf(separator)
+		for _, row := range result.Rows {
+			fmt.Printf(format, row...)
+		}
+		fmt.Printf(separator)
+	} else {
+		fmt.Printf("Rows affected: %d\n", result.RowsAffected)
+	}
+	return nil
+}

From e345d55e1fb1746ff4052446b37c20a9c6f876d1 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Wed, 25 Oct 2017 07:18:49 +0000
Subject: [PATCH 057/116] Use cluster database for storage-related data

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/api_internal.go            |  12 +--
 lxd/container.go               |   4 +-
 lxd/container_lxc.go           |  24 +++---
 lxd/containers_post.go         |   6 +-
 lxd/daemon_images.go           |   4 +-
 lxd/db/containers.go           |   4 +-
 lxd/db/images.go               |   4 +-
 lxd/db/migration.go            |  31 ++++++-
 lxd/db/migration_test.go       |  19 +++++
 lxd/db/node/schema.go          |  32 --------
 lxd/db/node/update.go          |   4 +
 lxd/db/storage_pools.go        | 122 ++++++++++++++--------------
 lxd/db/storage_volumes.go      |  41 ++++++----
 lxd/images.go                  |   6 +-
 lxd/main_test.go               |   2 +-
 lxd/patches.go                 | 178 +++++++++++++++++++++--------------------
 lxd/profiles.go                |   2 +-
 lxd/profiles_utils.go          |   2 +-
 lxd/storage.go                 |  20 ++---
 lxd/storage_btrfs.go           |   4 +-
 lxd/storage_ceph.go            |   8 +-
 lxd/storage_dir.go             |   4 +-
 lxd/storage_lvm.go             |   8 +-
 lxd/storage_lvm_utils.go       |   2 +-
 lxd/storage_pools.go           |  20 ++---
 lxd/storage_pools_utils.go     |  20 ++---
 lxd/storage_shared.go          |   8 +-
 lxd/storage_utils.go           |   2 +-
 lxd/storage_volumes.go         |  26 +++---
 lxd/storage_volumes_utils.go   |  12 +--
 lxd/storage_zfs.go             |   4 +-
 test/includes/lxd.sh           |   8 +-
 test/suites/backup.sh          |  10 +--
 test/suites/database_update.sh |   6 +-
 34 files changed, 344 insertions(+), 315 deletions(-)

diff --git a/lxd/api_internal.go b/lxd/api_internal.go
index eaea3a906..07d8e9ec3 100644
--- a/lxd/api_internal.go
+++ b/lxd/api_internal.go
@@ -257,7 +257,7 @@ func internalImport(d *Daemon, r *http.Request) Response {
 
 	// Try to retrieve the storage pool the container supposedly lives on.
 	var poolErr error
-	poolID, pool, poolErr := d.db.StoragePoolGet(containerPoolName)
+	poolID, pool, poolErr := d.cluster.StoragePoolGet(containerPoolName)
 	if poolErr != nil {
 		if poolErr != db.NoSuchObjectError {
 			return SmartError(poolErr)
@@ -279,7 +279,7 @@ func internalImport(d *Daemon, r *http.Request) Response {
 			return SmartError(err)
 		}
 
-		poolID, err = d.db.StoragePoolGetID(containerPoolName)
+		poolID, err = d.cluster.StoragePoolGetID(containerPoolName)
 		if err != nil {
 			return SmartError(err)
 		}
@@ -574,7 +574,7 @@ func internalImport(d *Daemon, r *http.Request) Response {
 	}
 
 	// Check if a storage volume entry for the container already exists.
-	_, volume, ctVolErr := d.db.StoragePoolVolumeGetType(
+	_, volume, ctVolErr := d.cluster.StoragePoolVolumeGetType(
 		req.Name, storagePoolVolumeTypeContainer, poolID)
 	if ctVolErr != nil {
 		if ctVolErr != db.NoSuchObjectError {
@@ -624,7 +624,7 @@ func internalImport(d *Daemon, r *http.Request) Response {
 
 		// Remove the storage volume db entry for the container since
 		// force was specified.
-		err := d.db.StoragePoolVolumeDelete(req.Name,
+		err := d.cluster.StoragePoolVolumeDelete(req.Name,
 			storagePoolVolumeTypeContainer, poolID)
 		if err != nil {
 			return SmartError(err)
@@ -657,7 +657,7 @@ func internalImport(d *Daemon, r *http.Request) Response {
 		}
 
 		// Check if a storage volume entry for the snapshot already exists.
-		_, _, csVolErr := d.db.StoragePoolVolumeGetType(snap.Name,
+		_, _, csVolErr := d.cluster.StoragePoolVolumeGetType(snap.Name,
 			storagePoolVolumeTypeContainer, poolID)
 		if csVolErr != nil {
 			if csVolErr != db.NoSuchObjectError {
@@ -680,7 +680,7 @@ func internalImport(d *Daemon, r *http.Request) Response {
 		}
 
 		if csVolErr == nil {
-			err := d.db.StoragePoolVolumeDelete(snap.Name,
+			err := d.cluster.StoragePoolVolumeDelete(snap.Name,
 				storagePoolVolumeTypeContainer, poolID)
 			if err != nil {
 				return SmartError(err)
diff --git a/lxd/container.go b/lxd/container.go
index fb509df76..708d30552 100644
--- a/lxd/container.go
+++ b/lxd/container.go
@@ -292,7 +292,7 @@ func containerGetRootDiskDevice(devices types.Devices) (string, types.Device, er
 	return "", types.Device{}, fmt.Errorf("No root device could be found.")
 }
 
-func containerValidDevices(db *db.Node, devices types.Devices, profile bool, expanded bool) error {
+func containerValidDevices(db *db.Cluster, devices types.Devices, profile bool, expanded bool) error {
 	// Empty device list
 	if devices == nil {
 		return nil
@@ -813,7 +813,7 @@ func containerCreateInternal(s *state.State, args db.ContainerArgs) (container,
 	}
 
 	// Validate container devices
-	err = containerValidDevices(s.Node, args.Devices, false, false)
+	err = containerValidDevices(s.Cluster, args.Devices, false, false)
 	if err != nil {
 		return nil, err
 	}
diff --git a/lxd/container_lxc.go b/lxd/container_lxc.go
index 7ec0dc567..d121df541 100644
--- a/lxd/container_lxc.go
+++ b/lxd/container_lxc.go
@@ -310,7 +310,7 @@ func containerLXCCreate(s *state.State, args db.ContainerArgs) (container, error
 		return nil, err
 	}
 
-	err = containerValidDevices(s.Node, c.expandedDevices, false, true)
+	err = containerValidDevices(s.Cluster, c.expandedDevices, false, true)
 	if err != nil {
 		c.Delete()
 		logger.Error("Failed creating container", ctxMap)
@@ -332,7 +332,7 @@ func containerLXCCreate(s *state.State, args db.ContainerArgs) (container, error
 	storagePool := rootDiskDevice["pool"]
 
 	// Get the storage pool ID for the container
-	poolID, pool, err := s.Node.StoragePoolGet(storagePool)
+	poolID, pool, err := s.Cluster.StoragePoolGet(storagePool)
 	if err != nil {
 		c.Delete()
 		return nil, err
@@ -346,7 +346,7 @@ func containerLXCCreate(s *state.State, args db.ContainerArgs) (container, error
 	}
 
 	// Create a new database entry for the container's storage volume
-	_, err = s.Node.StoragePoolVolumeCreate(args.Name, "", storagePoolVolumeTypeContainer, poolID, volumeConfig)
+	_, err = s.Cluster.StoragePoolVolumeCreate(args.Name, "", storagePoolVolumeTypeContainer, poolID, volumeConfig)
 	if err != nil {
 		c.Delete()
 		return nil, err
@@ -356,7 +356,7 @@ func containerLXCCreate(s *state.State, args db.ContainerArgs) (container, error
 	cStorage, err := storagePoolVolumeContainerCreateInit(s, storagePool, args.Name)
 	if err != nil {
 		c.Delete()
-		s.Node.StoragePoolVolumeDelete(args.Name, storagePoolVolumeTypeContainer, poolID)
+		s.Cluster.StoragePoolVolumeDelete(args.Name, storagePoolVolumeTypeContainer, poolID)
 		logger.Error("Failed to initialize container storage", ctxMap)
 		return nil, err
 	}
@@ -3109,7 +3109,7 @@ func (c *containerLXC) Delete() error {
 		poolID, _, _ := c.storage.GetContainerPoolInfo()
 
 		// Remove volume from storage pool.
-		err := c.db.StoragePoolVolumeDelete(c.Name(), storagePoolVolumeTypeContainer, poolID)
+		err := c.state.Cluster.StoragePoolVolumeDelete(c.Name(), storagePoolVolumeTypeContainer, poolID)
 		if err != nil {
 			return err
 		}
@@ -3197,7 +3197,7 @@ func (c *containerLXC) Rename(newName string) error {
 
 	// Rename storage volume for the container.
 	poolID, _, _ := c.storage.GetContainerPoolInfo()
-	err = c.db.StoragePoolVolumeRename(oldName, newName, storagePoolVolumeTypeContainer, poolID)
+	err = c.state.Cluster.StoragePoolVolumeRename(oldName, newName, storagePoolVolumeTypeContainer, poolID)
 	if err != nil {
 		logger.Error("Failed renaming storage volume", ctxMap)
 		return err
@@ -3222,7 +3222,7 @@ func (c *containerLXC) Rename(newName string) error {
 			}
 
 			// Rename storage volume for the snapshot.
-			err = c.db.StoragePoolVolumeRename(sname, newSnapshotName, storagePoolVolumeTypeContainer, poolID)
+			err = c.state.Cluster.StoragePoolVolumeRename(sname, newSnapshotName, storagePoolVolumeTypeContainer, poolID)
 			if err != nil {
 				logger.Error("Failed renaming storage volume", ctxMap)
 				return err
@@ -3350,12 +3350,12 @@ func writeBackupFile(c container) error {
 	}
 
 	s := c.DaemonState()
-	poolID, pool, err := s.Node.StoragePoolGet(poolName)
+	poolID, pool, err := s.Cluster.StoragePoolGet(poolName)
 	if err != nil {
 		return err
 	}
 
-	_, volume, err := s.Node.StoragePoolVolumeGetType(c.Name(), storagePoolVolumeTypeContainer, poolID)
+	_, volume, err := s.Cluster.StoragePoolVolumeGetType(c.Name(), storagePoolVolumeTypeContainer, poolID)
 	if err != nil {
 		return err
 	}
@@ -3414,7 +3414,7 @@ func (c *containerLXC) Update(args db.ContainerArgs, userRequested bool) error {
 	}
 
 	// Validate the new devices
-	err = containerValidDevices(c.db, args.Devices, false, false)
+	err = containerValidDevices(c.state.Cluster, args.Devices, false, false)
 	if err != nil {
 		return err
 	}
@@ -3575,7 +3575,7 @@ func (c *containerLXC) Update(args db.ContainerArgs, userRequested bool) error {
 	}
 
 	// Do some validation of the devices diff
-	err = containerValidDevices(c.db, c.expandedDevices, false, true)
+	err = containerValidDevices(c.state.Cluster, c.expandedDevices, false, true)
 	if err != nil {
 		return err
 	}
@@ -7709,7 +7709,7 @@ func (c *containerLXC) StatePath() string {
 }
 
 func (c *containerLXC) StoragePool() (string, error) {
-	poolName, err := c.db.ContainerPool(c.Name())
+	poolName, err := c.state.Cluster.ContainerPool(c.Name())
 	if err != nil {
 		return "", err
 	}
diff --git a/lxd/containers_post.go b/lxd/containers_post.go
index 5ac3e0b34..60e120453 100644
--- a/lxd/containers_post.go
+++ b/lxd/containers_post.go
@@ -206,7 +206,7 @@ func createFromMigration(d *Daemon, req *api.ContainersPost) Response {
 
 	// Handle copying/moving between two storage-api LXD instances.
 	if storagePool != "" {
-		_, err := d.db.StoragePoolGetID(storagePool)
+		_, err := d.cluster.StoragePoolGetID(storagePool)
 		if err == db.NoSuchObjectError {
 			storagePool = ""
 			// Unset the local root disk device storage pool if not
@@ -235,7 +235,7 @@ func createFromMigration(d *Daemon, req *api.ContainersPost) Response {
 	logger.Debugf("No valid storage pool in the container's local root disk device and profiles found.")
 	// If there is just a single pool in the database, use that
 	if storagePool == "" {
-		pools, err := d.db.StoragePools()
+		pools, err := d.cluster.StoragePools()
 		if err != nil {
 			if err == db.NoSuchObjectError {
 				return BadRequest(fmt.Errorf("This LXD instance does not have any storage pools configured."))
@@ -524,7 +524,7 @@ func containersPost(d *Daemon, r *http.Request) Response {
 	}
 
 	// If no storage pool is found, error out.
-	pools, err := d.db.StoragePools()
+	pools, err := d.cluster.StoragePools()
 	if err != nil || len(pools) == 0 {
 		return BadRequest(fmt.Errorf("No storage pool found. Please create a new storage pool."))
 	}
diff --git a/lxd/daemon_images.go b/lxd/daemon_images.go
index d6f3f9e9c..20a137e27 100644
--- a/lxd/daemon_images.go
+++ b/lxd/daemon_images.go
@@ -255,13 +255,13 @@ func (d *Daemon) ImageDownload(op *operation, server string, protocol string, ce
 		}
 
 		// Get the ID of the target storage pool
-		poolID, err := d.db.StoragePoolGetID(storagePool)
+		poolID, err := d.cluster.StoragePoolGetID(storagePool)
 		if err != nil {
 			return nil, err
 		}
 
 		// Check if the image is already in the pool
-		poolIDs, err := d.db.ImageGetPools(info.Fingerprint)
+		poolIDs, err := d.cluster.ImageGetPools(info.Fingerprint)
 		if err != nil {
 			return nil, err
 		}
diff --git a/lxd/db/containers.go b/lxd/db/containers.go
index fa74d13a8..e51df08d3 100644
--- a/lxd/db/containers.go
+++ b/lxd/db/containers.go
@@ -509,7 +509,7 @@ func (n *Node) ContainerNextSnapshot(name string) int {
 }
 
 // Get the storage pool of a given container.
-func (n *Node) ContainerPool(containerName string) (string, error) {
+func (c *Cluster) ContainerPool(containerName string) (string, error) {
 	// Get container storage volume. Since container names are globally
 	// unique, and their storage volumes carry the same name, their storage
 	// volumes are unique too.
@@ -520,7 +520,7 @@ WHERE storage_volumes.name=? AND storage_volumes.type=?`
 	inargs := []interface{}{containerName, StoragePoolVolumeTypeContainer}
 	outargs := []interface{}{&poolName}
 
-	err := dbQueryRowScan(n.db, query, inargs, outargs)
+	err := dbQueryRowScan(c.db, query, inargs, outargs)
 	if err != nil {
 		if err == sql.ErrNoRows {
 			return "", NoSuchObjectError
diff --git a/lxd/db/images.go b/lxd/db/images.go
index 7e63f8b07..244d5777d 100644
--- a/lxd/db/images.go
+++ b/lxd/db/images.go
@@ -530,13 +530,13 @@ func (n *Node) ImageInsert(fp string, fname string, sz int64, public bool, autoU
 }
 
 // Get the names of all storage pools on which a given image exists.
-func (n *Node) ImageGetPools(imageFingerprint string) ([]int64, error) {
+func (c *Cluster) ImageGetPools(imageFingerprint string) ([]int64, error) {
 	poolID := int64(-1)
 	query := "SELECT storage_pool_id FROM storage_volumes WHERE name=? AND type=?"
 	inargs := []interface{}{imageFingerprint, StoragePoolVolumeTypeImage}
 	outargs := []interface{}{poolID}
 
-	result, err := queryScan(n.db, query, inargs, outargs)
+	result, err := queryScan(c.db, query, inargs, outargs)
 	if err != nil {
 		return []int64{}, err
 	}
diff --git a/lxd/db/migration.go b/lxd/db/migration.go
index 084c14b85..f9d2a7f64 100644
--- a/lxd/db/migration.go
+++ b/lxd/db/migration.go
@@ -76,10 +76,33 @@ func (c *Cluster) ImportPreClusteringData(dump *Dump) error {
 				}
 			}
 			columns := dump.Schema[table]
+
+			nullNodeID := false // Whether config-related rows should have a NULL node ID
+			appendNodeID := func() {
+				columns = append(columns, "node_id")
+				if nullNodeID {
+					row = append(row, nil)
+				} else {
+					row = append(row, int64(1))
+				}
+			}
+
 			switch table {
 			case "networks_config":
-				columns = append(columns, "node_id")
-				row = append(row, int64(1))
+				appendNodeID()
+			case "storage_pools_config":
+				// The "source" config key is the only one
+				// which is not global to the cluster, so all
+				// other keys will have a NULL node_id.
+				for i, column := range columns {
+					if column == "key" && row[i] != "source" {
+						nullNodeID = true
+						break
+					}
+				}
+				appendNodeID()
+			case "storage_volumes_config":
+				appendNodeID()
 			}
 			stmt := fmt.Sprintf("INSERT INTO %s(%s)", table, strings.Join(columns, ", "))
 			stmt += fmt.Sprintf(" VALUES %s", query.Params(len(columns)))
@@ -115,4 +138,8 @@ var preClusteringTables = []string{
 	"config",
 	"networks",
 	"networks_config",
+	"storage_pools",
+	"storage_pools_config",
+	"storage_volumes",
+	"storage_volumes_config",
 }
diff --git a/lxd/db/migration_test.go b/lxd/db/migration_test.go
index ac201ace4..b590907b4 100644
--- a/lxd/db/migration_test.go
+++ b/lxd/db/migration_test.go
@@ -58,6 +58,20 @@ func TestImportPreClusteringData(t *testing.T) {
 	require.NoError(t, err)
 	assert.Equal(t, int64(1), id)
 	assert.Equal(t, "true", network.Config["ipv4.nat"])
+
+	// storage
+	pools, err := cluster.StoragePools()
+	require.NoError(t, err)
+	assert.Equal(t, []string{"default"}, pools)
+	id, pool, err := cluster.StoragePoolGet("default")
+	require.NoError(t, err)
+	assert.Equal(t, int64(1), id)
+	assert.Equal(t, "/foo/bar", pool.Config["source"])
+	assert.Equal(t, "123", pool.Config["size"])
+	volumes, err := cluster.StoragePoolVolumesGet(id, []int{1})
+	require.NoError(t, err)
+	assert.Len(t, volumes, 1)
+	assert.Equal(t, "/foo/bar", volumes[0].Config["source"])
 }
 
 // Return a sql.Tx against a memory database populated with pre-clustering
@@ -74,6 +88,11 @@ func newPreClusteringTx(t *testing.T) *sql.Tx {
 		"INSERT INTO config VALUES(1, 'core.https_address', '1.2.3.4:666')",
 		"INSERT INTO networks VALUES(1, 'lxcbr0', 'LXD bridge')",
 		"INSERT INTO networks_config VALUES(1, 1, 'ipv4.nat', 'true')",
+		"INSERT INTO storage_pools VALUES (1, 'default', 'dir', '')",
+		"INSERT INTO storage_pools_config VALUES(1, 1, 'source', '/foo/bar')",
+		"INSERT INTO storage_pools_config VALUES(2, 1, 'size', '123')",
+		"INSERT INTO storage_volumes VALUES (1, 'dev', 1, 1, '')",
+		"INSERT INTO storage_volumes_config VALUES(1, 1, 'source', '/foo/bar')",
 	}
 	for _, stmt := range stmts {
 		_, err := tx.Exec(stmt)
diff --git a/lxd/db/node/schema.go b/lxd/db/node/schema.go
index 7a0511f92..c8105de7f 100644
--- a/lxd/db/node/schema.go
+++ b/lxd/db/node/schema.go
@@ -146,38 +146,6 @@ CREATE TABLE raft_nodes (
     address TEXT NOT NULL,
     UNIQUE (address)
 );
-CREATE TABLE storage_pools (
-    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
-    name VARCHAR(255) NOT NULL,
-    driver VARCHAR(255) NOT NULL,
-    description TEXT,
-    UNIQUE (name)
-);
-CREATE TABLE storage_pools_config (
-    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
-    storage_pool_id INTEGER NOT NULL,
-    key VARCHAR(255) NOT NULL,
-    value TEXT,
-    UNIQUE (storage_pool_id, key),
-    FOREIGN KEY (storage_pool_id) REFERENCES storage_pools (id) ON DELETE CASCADE
-);
-CREATE TABLE storage_volumes (
-    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
-    name VARCHAR(255) NOT NULL,
-    storage_pool_id INTEGER NOT NULL,
-    type INTEGER NOT NULL,
-    description TEXT,
-    UNIQUE (storage_pool_id, name, type),
-    FOREIGN KEY (storage_pool_id) REFERENCES storage_pools (id) ON DELETE CASCADE
-);
-CREATE TABLE storage_volumes_config (
-    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
-    storage_volume_id INTEGER NOT NULL,
-    key VARCHAR(255) NOT NULL,
-    value TEXT,
-    UNIQUE (storage_volume_id, key),
-    FOREIGN KEY (storage_volume_id) REFERENCES storage_volumes (id) ON DELETE CASCADE
-);
 
 INSERT INTO schema (version, updated_at) VALUES (37, strftime("%s"))
 `
diff --git a/lxd/db/node/update.go b/lxd/db/node/update.go
index 0866bd63d..1e20ca8ba 100644
--- a/lxd/db/node/update.go
+++ b/lxd/db/node/update.go
@@ -120,6 +120,10 @@ CREATE TABLE raft_nodes (
 DELETE FROM config WHERE NOT key='core.https_address';
 DROP TABLE networks_config;
 DROP TABLE networks;
+DROP TABLE storage_volumes_config;
+DROP TABLE storage_volumes;
+DROP TABLE storage_pools_config;
+DROP TABLE storage_pools;
 `
 	_, err := tx.Exec(stmts)
 	return err
diff --git a/lxd/db/storage_pools.go b/lxd/db/storage_pools.go
index 0f2036633..99392f90c 100644
--- a/lxd/db/storage_pools.go
+++ b/lxd/db/storage_pools.go
@@ -10,13 +10,13 @@ import (
 )
 
 // Get all storage pools.
-func (n *Node) StoragePools() ([]string, error) {
+func (c *Cluster) StoragePools() ([]string, error) {
 	var name string
 	query := "SELECT name FROM storage_pools"
 	inargs := []interface{}{}
 	outargs := []interface{}{name}
 
-	result, err := queryScan(n.db, query, inargs, outargs)
+	result, err := queryScan(c.db, query, inargs, outargs)
 	if err != nil {
 		return []string{}, err
 	}
@@ -34,13 +34,13 @@ func (n *Node) StoragePools() ([]string, error) {
 }
 
 // Get the names of all storage volumes attached to a given storage pool.
-func (n *Node) StoragePoolsGetDrivers() ([]string, error) {
+func (c *Cluster) StoragePoolsGetDrivers() ([]string, error) {
 	var poolDriver string
 	query := "SELECT DISTINCT driver FROM storage_pools"
 	inargs := []interface{}{}
 	outargs := []interface{}{poolDriver}
 
-	result, err := queryScan(n.db, query, inargs, outargs)
+	result, err := queryScan(c.db, query, inargs, outargs)
 	if err != nil {
 		return []string{}, err
 	}
@@ -58,13 +58,13 @@ func (n *Node) StoragePoolsGetDrivers() ([]string, error) {
 }
 
 // Get id of a single storage pool.
-func (n *Node) StoragePoolGetID(poolName string) (int64, error) {
+func (c *Cluster) StoragePoolGetID(poolName string) (int64, error) {
 	poolID := int64(-1)
 	query := "SELECT id FROM storage_pools WHERE name=?"
 	inargs := []interface{}{poolName}
 	outargs := []interface{}{&poolID}
 
-	err := dbQueryRowScan(n.db, query, inargs, outargs)
+	err := dbQueryRowScan(c.db, query, inargs, outargs)
 	if err != nil {
 		if err == sql.ErrNoRows {
 			return -1, NoSuchObjectError
@@ -75,7 +75,7 @@ func (n *Node) StoragePoolGetID(poolName string) (int64, error) {
 }
 
 // Get a single storage pool.
-func (n *Node) StoragePoolGet(poolName string) (int64, *api.StoragePool, error) {
+func (c *Cluster) StoragePoolGet(poolName string) (int64, *api.StoragePool, error) {
 	var poolDriver string
 	poolID := int64(-1)
 	description := sql.NullString{}
@@ -84,7 +84,7 @@ func (n *Node) StoragePoolGet(poolName string) (int64, *api.StoragePool, error)
 	inargs := []interface{}{poolName}
 	outargs := []interface{}{&poolID, &poolDriver, &description}
 
-	err := dbQueryRowScan(n.db, query, inargs, outargs)
+	err := dbQueryRowScan(c.db, query, inargs, outargs)
 	if err != nil {
 		if err == sql.ErrNoRows {
 			return -1, nil, NoSuchObjectError
@@ -92,7 +92,7 @@ func (n *Node) StoragePoolGet(poolName string) (int64, *api.StoragePool, error)
 		return -1, nil, err
 	}
 
-	config, err := n.StoragePoolConfigGet(poolID)
+	config, err := c.StoragePoolConfigGet(poolID)
 	if err != nil {
 		return -1, nil, err
 	}
@@ -108,13 +108,13 @@ func (n *Node) StoragePoolGet(poolName string) (int64, *api.StoragePool, error)
 }
 
 // Get config of a storage pool.
-func (n *Node) StoragePoolConfigGet(poolID int64) (map[string]string, error) {
+func (c *Cluster) StoragePoolConfigGet(poolID int64) (map[string]string, error) {
 	var key, value string
-	query := "SELECT key, value FROM storage_pools_config WHERE storage_pool_id=?"
-	inargs := []interface{}{poolID}
+	query := "SELECT key, value FROM storage_pools_config WHERE storage_pool_id=? AND (node_id=? OR node_id IS NULL)"
+	inargs := []interface{}{poolID, c.id}
 	outargs := []interface{}{key, value}
 
-	results, err := queryScan(n.db, query, inargs, outargs)
+	results, err := queryScan(c.db, query, inargs, outargs)
 	if err != nil {
 		return nil, err
 	}
@@ -132,8 +132,8 @@ func (n *Node) StoragePoolConfigGet(poolID int64) (map[string]string, error) {
 }
 
 // Create new storage pool.
-func (n *Node) StoragePoolCreate(poolName string, poolDescription string, poolDriver string, poolConfig map[string]string) (int64, error) {
-	tx, err := begin(n.db)
+func (c *Cluster) StoragePoolCreate(poolName string, poolDescription string, poolDriver string, poolConfig map[string]string) (int64, error) {
+	tx, err := begin(c.db)
 	if err != nil {
 		return -1, err
 	}
@@ -150,7 +150,7 @@ func (n *Node) StoragePoolCreate(poolName string, poolDescription string, poolDr
 		return -1, err
 	}
 
-	err = StoragePoolConfigAdd(tx, id, poolConfig)
+	err = StoragePoolConfigAdd(tx, id, c.id, poolConfig)
 	if err != nil {
 		tx.Rollback()
 		return -1, err
@@ -165,8 +165,8 @@ func (n *Node) StoragePoolCreate(poolName string, poolDescription string, poolDr
 }
 
 // Add new storage pool config.
-func StoragePoolConfigAdd(tx *sql.Tx, poolID int64, poolConfig map[string]string) error {
-	str := "INSERT INTO storage_pools_config (storage_pool_id, key, value) VALUES(?, ?, ?)"
+func StoragePoolConfigAdd(tx *sql.Tx, poolID, nodeID int64, poolConfig map[string]string) error {
+	str := "INSERT INTO storage_pools_config (storage_pool_id, node_id, key, value) VALUES(?, ?, ?, ?)"
 	stmt, err := tx.Prepare(str)
 	defer stmt.Close()
 	if err != nil {
@@ -177,8 +177,14 @@ func StoragePoolConfigAdd(tx *sql.Tx, poolID int64, poolConfig map[string]string
 		if v == "" {
 			continue
 		}
+		var nodeIDValue interface{}
+		if k != "source" {
+			nodeIDValue = nil
+		} else {
+			nodeIDValue = nodeID
+		}
 
-		_, err = stmt.Exec(poolID, k, v)
+		_, err = stmt.Exec(poolID, nodeIDValue, k, v)
 		if err != nil {
 			return err
 		}
@@ -188,13 +194,13 @@ func StoragePoolConfigAdd(tx *sql.Tx, poolID int64, poolConfig map[string]string
 }
 
 // Update storage pool.
-func (n *Node) StoragePoolUpdate(poolName, description string, poolConfig map[string]string) error {
-	poolID, _, err := n.StoragePoolGet(poolName)
+func (c *Cluster) StoragePoolUpdate(poolName, description string, poolConfig map[string]string) error {
+	poolID, _, err := c.StoragePoolGet(poolName)
 	if err != nil {
 		return err
 	}
 
-	tx, err := begin(n.db)
+	tx, err := begin(c.db)
 	if err != nil {
 		return err
 	}
@@ -205,13 +211,13 @@ func (n *Node) StoragePoolUpdate(poolName, description string, poolConfig map[st
 		return err
 	}
 
-	err = StoragePoolConfigClear(tx, poolID)
+	err = StoragePoolConfigClear(tx, poolID, c.id)
 	if err != nil {
 		tx.Rollback()
 		return err
 	}
 
-	err = StoragePoolConfigAdd(tx, poolID, poolConfig)
+	err = StoragePoolConfigAdd(tx, poolID, c.id, poolConfig)
 	if err != nil {
 		tx.Rollback()
 		return err
@@ -227,8 +233,8 @@ func StoragePoolUpdateDescription(tx *sql.Tx, id int64, description string) erro
 }
 
 // Delete storage pool config.
-func StoragePoolConfigClear(tx *sql.Tx, poolID int64) error {
-	_, err := tx.Exec("DELETE FROM storage_pools_config WHERE storage_pool_id=?", poolID)
+func StoragePoolConfigClear(tx *sql.Tx, poolID, nodeID int64) error {
+	_, err := tx.Exec("DELETE FROM storage_pools_config WHERE storage_pool_id=? AND (node_id=? OR node_id IS NULL)", poolID, nodeID)
 	if err != nil {
 		return err
 	}
@@ -237,13 +243,13 @@ func StoragePoolConfigClear(tx *sql.Tx, poolID int64) error {
 }
 
 // Delete storage pool.
-func (n *Node) StoragePoolDelete(poolName string) (*api.StoragePool, error) {
-	poolID, pool, err := n.StoragePoolGet(poolName)
+func (c *Cluster) StoragePoolDelete(poolName string) (*api.StoragePool, error) {
+	poolID, pool, err := c.StoragePoolGet(poolName)
 	if err != nil {
 		return nil, err
 	}
 
-	_, err = exec(n.db, "DELETE FROM storage_pools WHERE id=?", poolID)
+	_, err = exec(c.db, "DELETE FROM storage_pools WHERE id=?", poolID)
 	if err != nil {
 		return nil, err
 	}
@@ -252,13 +258,13 @@ func (n *Node) StoragePoolDelete(poolName string) (*api.StoragePool, error) {
 }
 
 // Get the names of all storage volumes attached to a given storage pool.
-func (n *Node) StoragePoolVolumesGetNames(poolID int64) (int, error) {
+func (c *Cluster) StoragePoolVolumesGetNames(poolID int64) (int, error) {
 	var volumeName string
 	query := "SELECT name FROM storage_volumes WHERE storage_pool_id=?"
 	inargs := []interface{}{poolID}
 	outargs := []interface{}{volumeName}
 
-	result, err := queryScan(n.db, query, inargs, outargs)
+	result, err := queryScan(c.db, query, inargs, outargs)
 	if err != nil {
 		return -1, err
 	}
@@ -271,17 +277,17 @@ func (n *Node) StoragePoolVolumesGetNames(poolID int64) (int, error) {
 }
 
 // Get all storage volumes attached to a given storage pool.
-func (n *Node) StoragePoolVolumesGet(poolID int64, volumeTypes []int) ([]*api.StorageVolume, error) {
+func (c *Cluster) StoragePoolVolumesGet(poolID int64, volumeTypes []int) ([]*api.StorageVolume, error) {
 	// Get all storage volumes of all types attached to a given storage
 	// pool.
 	result := []*api.StorageVolume{}
 	for _, volumeType := range volumeTypes {
-		volumeNames, err := n.StoragePoolVolumesGetType(volumeType, poolID)
+		volumeNames, err := c.StoragePoolVolumesGetType(volumeType, poolID)
 		if err != nil && err != sql.ErrNoRows {
 			return nil, err
 		}
 		for _, volumeName := range volumeNames {
-			_, volume, err := n.StoragePoolVolumeGetType(volumeName, volumeType, poolID)
+			_, volume, err := c.StoragePoolVolumeGetType(volumeName, volumeType, poolID)
 			if err != nil {
 				return nil, err
 			}
@@ -298,13 +304,13 @@ func (n *Node) StoragePoolVolumesGet(poolID int64, volumeTypes []int) ([]*api.St
 
 // Get all storage volumes attached to a given storage pool of a given volume
 // type.
-func (n *Node) StoragePoolVolumesGetType(volumeType int, poolID int64) ([]string, error) {
+func (c *Cluster) StoragePoolVolumesGetType(volumeType int, poolID int64) ([]string, error) {
 	var poolName string
 	query := "SELECT name FROM storage_volumes WHERE storage_pool_id=? AND type=?"
 	inargs := []interface{}{poolID, volumeType}
 	outargs := []interface{}{poolName}
 
-	result, err := queryScan(n.db, query, inargs, outargs)
+	result, err := queryScan(c.db, query, inargs, outargs)
 	if err != nil {
 		return []string{}, err
 	}
@@ -318,18 +324,18 @@ func (n *Node) StoragePoolVolumesGetType(volumeType int, poolID int64) ([]string
 }
 
 // Get a single storage volume attached to a given storage pool of a given type.
-func (n *Node) StoragePoolVolumeGetType(volumeName string, volumeType int, poolID int64) (int64, *api.StorageVolume, error) {
-	volumeID, err := n.StoragePoolVolumeGetTypeID(volumeName, volumeType, poolID)
+func (c *Cluster) StoragePoolVolumeGetType(volumeName string, volumeType int, poolID int64) (int64, *api.StorageVolume, error) {
+	volumeID, err := c.StoragePoolVolumeGetTypeID(volumeName, volumeType, poolID)
 	if err != nil {
 		return -1, nil, err
 	}
 
-	volumeConfig, err := n.StorageVolumeConfigGet(volumeID)
+	volumeConfig, err := c.StorageVolumeConfigGet(volumeID)
 	if err != nil {
 		return -1, nil, err
 	}
 
-	volumeDescription, err := n.StorageVolumeDescriptionGet(volumeID)
+	volumeDescription, err := c.StorageVolumeDescriptionGet(volumeID)
 	if err != nil {
 		return -1, nil, err
 	}
@@ -350,24 +356,24 @@ func (n *Node) StoragePoolVolumeGetType(volumeName string, volumeType int, poolI
 }
 
 // Update storage volume attached to a given storage pool.
-func (n *Node) StoragePoolVolumeUpdate(volumeName string, volumeType int, poolID int64, volumeDescription string, volumeConfig map[string]string) error {
-	volumeID, _, err := n.StoragePoolVolumeGetType(volumeName, volumeType, poolID)
+func (c *Cluster) StoragePoolVolumeUpdate(volumeName string, volumeType int, poolID int64, volumeDescription string, volumeConfig map[string]string) error {
+	volumeID, _, err := c.StoragePoolVolumeGetType(volumeName, volumeType, poolID)
 	if err != nil {
 		return err
 	}
 
-	tx, err := begin(n.db)
+	tx, err := begin(c.db)
 	if err != nil {
 		return err
 	}
 
-	err = StorageVolumeConfigClear(tx, volumeID)
+	err = StorageVolumeConfigClear(tx, volumeID, c.id)
 	if err != nil {
 		tx.Rollback()
 		return err
 	}
 
-	err = StorageVolumeConfigAdd(tx, volumeID, volumeConfig)
+	err = StorageVolumeConfigAdd(tx, volumeID, c.id, volumeConfig)
 	if err != nil {
 		tx.Rollback()
 		return err
@@ -383,13 +389,13 @@ func (n *Node) StoragePoolVolumeUpdate(volumeName string, volumeType int, poolID
 }
 
 // Delete storage volume attached to a given storage pool.
-func (n *Node) StoragePoolVolumeDelete(volumeName string, volumeType int, poolID int64) error {
-	volumeID, _, err := n.StoragePoolVolumeGetType(volumeName, volumeType, poolID)
+func (c *Cluster) StoragePoolVolumeDelete(volumeName string, volumeType int, poolID int64) error {
+	volumeID, _, err := c.StoragePoolVolumeGetType(volumeName, volumeType, poolID)
 	if err != nil {
 		return err
 	}
 
-	_, err = exec(n.db, "DELETE FROM storage_volumes WHERE id=?", volumeID)
+	_, err = exec(c.db, "DELETE FROM storage_volumes WHERE id=?", volumeID)
 	if err != nil {
 		return err
 	}
@@ -398,13 +404,13 @@ func (n *Node) StoragePoolVolumeDelete(volumeName string, volumeType int, poolID
 }
 
 // Rename storage volume attached to a given storage pool.
-func (n *Node) StoragePoolVolumeRename(oldVolumeName string, newVolumeName string, volumeType int, poolID int64) error {
-	volumeID, _, err := n.StoragePoolVolumeGetType(oldVolumeName, volumeType, poolID)
+func (c *Cluster) StoragePoolVolumeRename(oldVolumeName string, newVolumeName string, volumeType int, poolID int64) error {
+	volumeID, _, err := c.StoragePoolVolumeGetType(oldVolumeName, volumeType, poolID)
 	if err != nil {
 		return err
 	}
 
-	tx, err := begin(n.db)
+	tx, err := begin(c.db)
 	if err != nil {
 		return err
 	}
@@ -419,8 +425,8 @@ func (n *Node) StoragePoolVolumeRename(oldVolumeName string, newVolumeName strin
 }
 
 // Create new storage volume attached to a given storage pool.
-func (n *Node) StoragePoolVolumeCreate(volumeName, volumeDescription string, volumeType int, poolID int64, volumeConfig map[string]string) (int64, error) {
-	tx, err := begin(n.db)
+func (c *Cluster) StoragePoolVolumeCreate(volumeName, volumeDescription string, volumeType int, poolID int64, volumeConfig map[string]string) (int64, error) {
+	tx, err := begin(c.db)
 	if err != nil {
 		return -1, err
 	}
@@ -438,7 +444,7 @@ func (n *Node) StoragePoolVolumeCreate(volumeName, volumeDescription string, vol
 		return -1, err
 	}
 
-	err = StorageVolumeConfigAdd(tx, volumeID, volumeConfig)
+	err = StorageVolumeConfigAdd(tx, volumeID, c.id, volumeConfig)
 	if err != nil {
 		tx.Rollback()
 		return -1, err
@@ -454,7 +460,7 @@ func (n *Node) StoragePoolVolumeCreate(volumeName, volumeDescription string, vol
 
 // Get ID of a storage volume on a given storage pool of a given storage volume
 // type.
-func (n *Node) StoragePoolVolumeGetTypeID(volumeName string, volumeType int, poolID int64) (int64, error) {
+func (c *Cluster) StoragePoolVolumeGetTypeID(volumeName string, volumeType int, poolID int64) (int64, error) {
 	volumeID := int64(-1)
 	query := `SELECT storage_volumes.id
 FROM storage_volumes
@@ -465,7 +471,7 @@ AND storage_volumes.name=? AND storage_volumes.type=?`
 	inargs := []interface{}{poolID, volumeName, volumeType}
 	outargs := []interface{}{&volumeID}
 
-	err := dbQueryRowScan(n.db, query, inargs, outargs)
+	err := dbQueryRowScan(c.db, query, inargs, outargs)
 	if err != nil {
 		return -1, NoSuchObjectError
 	}
@@ -505,7 +511,7 @@ func StoragePoolVolumeTypeToName(volumeType int) (string, error) {
 	return "", fmt.Errorf("invalid storage volume type")
 }
 
-func (n *Node) StoragePoolInsertZfsDriver() error {
-	_, err := exec(n.db, "UPDATE storage_pools SET driver='zfs', description='' WHERE driver=''")
+func (c *Cluster) StoragePoolInsertZfsDriver() error {
+	_, err := exec(c.db, "UPDATE storage_pools SET driver='zfs', description='' WHERE driver=''")
 	return err
 }
diff --git a/lxd/db/storage_volumes.go b/lxd/db/storage_volumes.go
index 49da9d45a..95d164234 100644
--- a/lxd/db/storage_volumes.go
+++ b/lxd/db/storage_volumes.go
@@ -2,18 +2,20 @@ package db
 
 import (
 	"database/sql"
+	"fmt"
 
+	"github.com/lxc/lxd/lxd/db/query"
 	_ "github.com/mattn/go-sqlite3"
 )
 
 // Get config of a storage volume.
-func (n *Node) StorageVolumeConfigGet(volumeID int64) (map[string]string, error) {
+func (c *Cluster) StorageVolumeConfigGet(volumeID int64) (map[string]string, error) {
 	var key, value string
-	query := "SELECT key, value FROM storage_volumes_config WHERE storage_volume_id=?"
-	inargs := []interface{}{volumeID}
+	query := "SELECT key, value FROM storage_volumes_config WHERE storage_volume_id=? AND node_id=?"
+	inargs := []interface{}{volumeID, c.id}
 	outargs := []interface{}{key, value}
 
-	results, err := queryScan(n.db, query, inargs, outargs)
+	results, err := queryScan(c.db, query, inargs, outargs)
 	if err != nil {
 		return nil, err
 	}
@@ -31,13 +33,13 @@ func (n *Node) StorageVolumeConfigGet(volumeID int64) (map[string]string, error)
 }
 
 // Get the description of a storage volume.
-func (n *Node) StorageVolumeDescriptionGet(volumeID int64) (string, error) {
+func (c *Cluster) StorageVolumeDescriptionGet(volumeID int64) (string, error) {
 	description := sql.NullString{}
 	query := "SELECT description FROM storage_volumes WHERE id=?"
 	inargs := []interface{}{volumeID}
 	outargs := []interface{}{&description}
 
-	err := dbQueryRowScan(n.db, query, inargs, outargs)
+	err := dbQueryRowScan(c.db, query, inargs, outargs)
 	if err != nil {
 		if err == sql.ErrNoRows {
 			return "", NoSuchObjectError
@@ -54,8 +56,8 @@ func StorageVolumeDescriptionUpdate(tx *sql.Tx, volumeID int64, description stri
 }
 
 // Add new storage volume config into database.
-func StorageVolumeConfigAdd(tx *sql.Tx, volumeID int64, volumeConfig map[string]string) error {
-	str := "INSERT INTO storage_volumes_config (storage_volume_id, key, value) VALUES(?, ?, ?)"
+func StorageVolumeConfigAdd(tx *sql.Tx, volumeID, nodeID int64, volumeConfig map[string]string) error {
+	str := "INSERT INTO storage_volumes_config (storage_volume_id, node_id, key, value) VALUES(?, ?, ?, ?)"
 	stmt, err := tx.Prepare(str)
 	defer stmt.Close()
 	if err != nil {
@@ -67,7 +69,7 @@ func StorageVolumeConfigAdd(tx *sql.Tx, volumeID int64, volumeConfig map[string]
 			continue
 		}
 
-		_, err = stmt.Exec(volumeID, k, v)
+		_, err = stmt.Exec(volumeID, nodeID, k, v)
 		if err != nil {
 			return err
 		}
@@ -77,8 +79,8 @@ func StorageVolumeConfigAdd(tx *sql.Tx, volumeID int64, volumeConfig map[string]
 }
 
 // Delete storage volume config.
-func StorageVolumeConfigClear(tx *sql.Tx, volumeID int64) error {
-	_, err := tx.Exec("DELETE FROM storage_volumes_config WHERE storage_volume_id=?", volumeID)
+func StorageVolumeConfigClear(tx *sql.Tx, volumeID, nodeID int64) error {
+	_, err := tx.Exec("DELETE FROM storage_volumes_config WHERE storage_volume_id=? AND node_id", volumeID, nodeID)
 	if err != nil {
 		return err
 	}
@@ -86,18 +88,25 @@ func StorageVolumeConfigClear(tx *sql.Tx, volumeID int64) error {
 	return nil
 }
 
-func (n *Node) StorageVolumeCleanupImages() error {
-	_, err := exec(n.db, "DELETE FROM storage_volumes WHERE type=? AND name NOT IN (SELECT fingerprint FROM images);", StoragePoolVolumeTypeImage)
+func (c *Cluster) StorageVolumeCleanupImages(fingerprints []string) error {
+	stmt := fmt.Sprintf(
+		"DELETE FROM storage_volumes WHERE type=? AND name NOT IN %s",
+		query.Params(len(fingerprints)))
+	args := []interface{}{StoragePoolVolumeTypeImage}
+	for _, fingerprint := range fingerprints {
+		args = append(args, fingerprint)
+	}
+	_, err := exec(c.db, stmt, args...)
 	return err
 }
 
-func (n *Node) StorageVolumeMoveToLVMThinPoolNameKey() error {
-	_, err := exec(n.db, "UPDATE storage_pools_config SET key='lvm.thinpool_name' WHERE key='volume.lvm.thinpool_name';")
+func (c *Cluster) StorageVolumeMoveToLVMThinPoolNameKey() error {
+	_, err := exec(c.db, "UPDATE storage_pools_config SET key='lvm.thinpool_name' WHERE key='volume.lvm.thinpool_name';")
 	if err != nil {
 		return err
 	}
 
-	_, err = exec(n.db, "DELETE FROM storage_volumes_config WHERE key='lvm.thinpool_name';")
+	_, err = exec(c.db, "DELETE FROM storage_volumes_config WHERE key='lvm.thinpool_name';")
 	if err != nil {
 		return err
 	}
diff --git a/lxd/images.go b/lxd/images.go
index 8e6d1078d..cd7916969 100644
--- a/lxd/images.go
+++ b/lxd/images.go
@@ -931,7 +931,7 @@ func autoUpdateImage(d *Daemon, op *operation, id int, info *api.Image) error {
 
 	// Get the IDs of all storage pools on which a storage volume
 	// for the requested image currently exists.
-	poolIDs, err := d.db.ImageGetPools(fingerprint)
+	poolIDs, err := d.cluster.ImageGetPools(fingerprint)
 	if err != nil {
 		logger.Error("Error getting image pools", log.Ctx{"err": err, "fp": fingerprint})
 		return err
@@ -1113,7 +1113,7 @@ func pruneExpiredImages(ctx context.Context, d *Daemon) {
 
 		// Get the IDs of all storage pools on which a storage volume
 		// for the requested image currently exists.
-		poolIDs, err := d.db.ImageGetPools(fp)
+		poolIDs, err := d.cluster.ImageGetPools(fp)
 		if err != nil {
 			continue
 		}
@@ -1191,7 +1191,7 @@ func imageDelete(d *Daemon, r *http.Request) Response {
 			return err
 		}
 
-		poolIDs, err := d.db.ImageGetPools(imgInfo.Fingerprint)
+		poolIDs, err := d.cluster.ImageGetPools(imgInfo.Fingerprint)
 		if err != nil {
 			return err
 		}
diff --git a/lxd/main_test.go b/lxd/main_test.go
index 2c1acfd54..4bed14446 100644
--- a/lxd/main_test.go
+++ b/lxd/main_test.go
@@ -70,7 +70,7 @@ func (suite *lxdTestSuite) SetupTest() {
 	mockStorage, _ := storageTypeToString(storageTypeMock)
 	// Create the database entry for the storage pool.
 	poolDescription := fmt.Sprintf("%s storage pool", lxdTestSuiteDefaultStoragePool)
-	_, err = dbStoragePoolCreateAndUpdateCache(suite.d.db, lxdTestSuiteDefaultStoragePool, poolDescription, mockStorage, poolConfig)
+	_, err = dbStoragePoolCreateAndUpdateCache(suite.d.cluster, lxdTestSuiteDefaultStoragePool, poolDescription, mockStorage, poolConfig)
 	if err != nil {
 		suite.T().Fatalf("failed to create default storage pool: %v", err)
 	}
diff --git a/lxd/patches.go b/lxd/patches.go
index 8d3fcf2f6..f8dbb2dd9 100644
--- a/lxd/patches.go
+++ b/lxd/patches.go
@@ -319,7 +319,7 @@ func upgradeFromStorageTypeBtrfs(name string, d *Daemon, defaultPoolName string,
 	}
 
 	poolID := int64(-1)
-	pools, err := d.db.StoragePools()
+	pools, err := d.cluster.StoragePools()
 	if err == nil { // Already exist valid storage pools.
 		// Check if the storage pool already has a db entry.
 		if shared.StringInSlice(defaultPoolName, pools) {
@@ -328,7 +328,7 @@ func upgradeFromStorageTypeBtrfs(name string, d *Daemon, defaultPoolName string,
 
 		// Get the pool ID as we need it for storage volume creation.
 		// (Use a tmp variable as Go's scoping is freaking me out.)
-		tmp, pool, err := d.db.StoragePoolGet(defaultPoolName)
+		tmp, pool, err := d.cluster.StoragePoolGet(defaultPoolName)
 		if err != nil {
 			logger.Errorf("Failed to query database: %s.", err)
 			return err
@@ -341,12 +341,12 @@ func upgradeFromStorageTypeBtrfs(name string, d *Daemon, defaultPoolName string,
 		if pool.Config == nil {
 			pool.Config = poolConfig
 		}
-		err = d.db.StoragePoolUpdate(defaultPoolName, "", pool.Config)
+		err = d.cluster.StoragePoolUpdate(defaultPoolName, "", pool.Config)
 		if err != nil {
 			return err
 		}
 	} else if err == db.NoSuchObjectError { // Likely a pristine upgrade.
-		tmp, err := dbStoragePoolCreateAndUpdateCache(d.db, defaultPoolName, "", defaultStorageTypeName, poolConfig)
+		tmp, err := dbStoragePoolCreateAndUpdateCache(d.cluster, defaultPoolName, "", defaultStorageTypeName, poolConfig)
 		if err != nil {
 			return err
 		}
@@ -378,7 +378,7 @@ func upgradeFromStorageTypeBtrfs(name string, d *Daemon, defaultPoolName string,
 	}
 
 	// Get storage pool from the db after having updated it above.
-	_, defaultPool, err := d.db.StoragePoolGet(defaultPoolName)
+	_, defaultPool, err := d.cluster.StoragePoolGet(defaultPoolName)
 	if err != nil {
 		return err
 	}
@@ -392,16 +392,16 @@ func upgradeFromStorageTypeBtrfs(name string, d *Daemon, defaultPoolName string,
 			return err
 		}
 
-		_, err = d.db.StoragePoolVolumeGetTypeID(ct, storagePoolVolumeTypeContainer, poolID)
+		_, err = d.cluster.StoragePoolVolumeGetTypeID(ct, storagePoolVolumeTypeContainer, poolID)
 		if err == nil {
 			logger.Warnf("Storage volumes database already contains an entry for the container.")
-			err := d.db.StoragePoolVolumeUpdate(ct, storagePoolVolumeTypeContainer, poolID, "", containerPoolVolumeConfig)
+			err := d.cluster.StoragePoolVolumeUpdate(ct, storagePoolVolumeTypeContainer, poolID, "", containerPoolVolumeConfig)
 			if err != nil {
 				return err
 			}
 		} else if err == db.NoSuchObjectError {
 			// Insert storage volumes for containers into the database.
-			_, err := d.db.StoragePoolVolumeCreate(ct, "", storagePoolVolumeTypeContainer, poolID, containerPoolVolumeConfig)
+			_, err := d.cluster.StoragePoolVolumeCreate(ct, "", storagePoolVolumeTypeContainer, poolID, containerPoolVolumeConfig)
 			if err != nil {
 				logger.Errorf("Could not insert a storage volume for container \"%s\".", ct)
 				return err
@@ -480,16 +480,16 @@ func upgradeFromStorageTypeBtrfs(name string, d *Daemon, defaultPoolName string,
 				return err
 			}
 
-			_, err = d.db.StoragePoolVolumeGetTypeID(cs, storagePoolVolumeTypeContainer, poolID)
+			_, err = d.cluster.StoragePoolVolumeGetTypeID(cs, storagePoolVolumeTypeContainer, poolID)
 			if err == nil {
 				logger.Warnf("Storage volumes database already contains an entry for the snapshot.")
-				err := d.db.StoragePoolVolumeUpdate(cs, storagePoolVolumeTypeContainer, poolID, "", snapshotPoolVolumeConfig)
+				err := d.cluster.StoragePoolVolumeUpdate(cs, storagePoolVolumeTypeContainer, poolID, "", snapshotPoolVolumeConfig)
 				if err != nil {
 					return err
 				}
 			} else if err == db.NoSuchObjectError {
 				// Insert storage volumes for containers into the database.
-				_, err := d.db.StoragePoolVolumeCreate(cs, "", storagePoolVolumeTypeContainer, poolID, snapshotPoolVolumeConfig)
+				_, err := d.cluster.StoragePoolVolumeCreate(cs, "", storagePoolVolumeTypeContainer, poolID, snapshotPoolVolumeConfig)
 				if err != nil {
 					logger.Errorf("Could not insert a storage volume for snapshot \"%s\".", cs)
 					return err
@@ -561,16 +561,16 @@ func upgradeFromStorageTypeBtrfs(name string, d *Daemon, defaultPoolName string,
 			return err
 		}
 
-		_, err = d.db.StoragePoolVolumeGetTypeID(img, storagePoolVolumeTypeImage, poolID)
+		_, err = d.cluster.StoragePoolVolumeGetTypeID(img, storagePoolVolumeTypeImage, poolID)
 		if err == nil {
 			logger.Warnf("Storage volumes database already contains an entry for the image.")
-			err := d.db.StoragePoolVolumeUpdate(img, storagePoolVolumeTypeImage, poolID, "", imagePoolVolumeConfig)
+			err := d.cluster.StoragePoolVolumeUpdate(img, storagePoolVolumeTypeImage, poolID, "", imagePoolVolumeConfig)
 			if err != nil {
 				return err
 			}
 		} else if err == db.NoSuchObjectError {
 			// Insert storage volumes for containers into the database.
-			_, err := d.db.StoragePoolVolumeCreate(img, "", storagePoolVolumeTypeImage, poolID, imagePoolVolumeConfig)
+			_, err := d.cluster.StoragePoolVolumeCreate(img, "", storagePoolVolumeTypeImage, poolID, imagePoolVolumeConfig)
 			if err != nil {
 				logger.Errorf("Could not insert a storage volume for image \"%s\".", img)
 				return err
@@ -616,7 +616,7 @@ func upgradeFromStorageTypeDir(name string, d *Daemon, defaultPoolName string, d
 	}
 
 	poolID := int64(-1)
-	pools, err := d.db.StoragePools()
+	pools, err := d.cluster.StoragePools()
 	if err == nil { // Already exist valid storage pools.
 		// Check if the storage pool already has a db entry.
 		if shared.StringInSlice(defaultPoolName, pools) {
@@ -625,7 +625,7 @@ func upgradeFromStorageTypeDir(name string, d *Daemon, defaultPoolName string, d
 
 		// Get the pool ID as we need it for storage volume creation.
 		// (Use a tmp variable as Go's scoping is freaking me out.)
-		tmp, pool, err := d.db.StoragePoolGet(defaultPoolName)
+		tmp, pool, err := d.cluster.StoragePoolGet(defaultPoolName)
 		if err != nil {
 			logger.Errorf("Failed to query database: %s.", err)
 			return err
@@ -638,12 +638,12 @@ func upgradeFromStorageTypeDir(name string, d *Daemon, defaultPoolName string, d
 		if pool.Config == nil {
 			pool.Config = poolConfig
 		}
-		err = d.db.StoragePoolUpdate(defaultPoolName, pool.Description, pool.Config)
+		err = d.cluster.StoragePoolUpdate(defaultPoolName, pool.Description, pool.Config)
 		if err != nil {
 			return err
 		}
 	} else if err == db.NoSuchObjectError { // Likely a pristine upgrade.
-		tmp, err := dbStoragePoolCreateAndUpdateCache(d.db, defaultPoolName, "", defaultStorageTypeName, poolConfig)
+		tmp, err := dbStoragePoolCreateAndUpdateCache(d.cluster, defaultPoolName, "", defaultStorageTypeName, poolConfig)
 		if err != nil {
 			return err
 		}
@@ -664,7 +664,7 @@ func upgradeFromStorageTypeDir(name string, d *Daemon, defaultPoolName string, d
 	}
 
 	// Get storage pool from the db after having updated it above.
-	_, defaultPool, err := d.db.StoragePoolGet(defaultPoolName)
+	_, defaultPool, err := d.cluster.StoragePoolGet(defaultPoolName)
 	if err != nil {
 		return err
 	}
@@ -679,16 +679,16 @@ func upgradeFromStorageTypeDir(name string, d *Daemon, defaultPoolName string, d
 			return err
 		}
 
-		_, err = d.db.StoragePoolVolumeGetTypeID(ct, storagePoolVolumeTypeContainer, poolID)
+		_, err = d.cluster.StoragePoolVolumeGetTypeID(ct, storagePoolVolumeTypeContainer, poolID)
 		if err == nil {
 			logger.Warnf("Storage volumes database already contains an entry for the container.")
-			err := d.db.StoragePoolVolumeUpdate(ct, storagePoolVolumeTypeContainer, poolID, "", containerPoolVolumeConfig)
+			err := d.cluster.StoragePoolVolumeUpdate(ct, storagePoolVolumeTypeContainer, poolID, "", containerPoolVolumeConfig)
 			if err != nil {
 				return err
 			}
 		} else if err == db.NoSuchObjectError {
 			// Insert storage volumes for containers into the database.
-			_, err := d.db.StoragePoolVolumeCreate(ct, "", storagePoolVolumeTypeContainer, poolID, containerPoolVolumeConfig)
+			_, err := d.cluster.StoragePoolVolumeCreate(ct, "", storagePoolVolumeTypeContainer, poolID, containerPoolVolumeConfig)
 			if err != nil {
 				logger.Errorf("Could not insert a storage volume for container \"%s\".", ct)
 				return err
@@ -796,16 +796,16 @@ func upgradeFromStorageTypeDir(name string, d *Daemon, defaultPoolName string, d
 			return err
 		}
 
-		_, err = d.db.StoragePoolVolumeGetTypeID(cs, storagePoolVolumeTypeContainer, poolID)
+		_, err = d.cluster.StoragePoolVolumeGetTypeID(cs, storagePoolVolumeTypeContainer, poolID)
 		if err == nil {
 			logger.Warnf("Storage volumes database already contains an entry for the snapshot.")
-			err := d.db.StoragePoolVolumeUpdate(cs, storagePoolVolumeTypeContainer, poolID, "", snapshotPoolVolumeConfig)
+			err := d.cluster.StoragePoolVolumeUpdate(cs, storagePoolVolumeTypeContainer, poolID, "", snapshotPoolVolumeConfig)
 			if err != nil {
 				return err
 			}
 		} else if err == db.NoSuchObjectError {
 			// Insert storage volumes for containers into the database.
-			_, err := d.db.StoragePoolVolumeCreate(cs, "", storagePoolVolumeTypeContainer, poolID, snapshotPoolVolumeConfig)
+			_, err := d.cluster.StoragePoolVolumeCreate(cs, "", storagePoolVolumeTypeContainer, poolID, snapshotPoolVolumeConfig)
 			if err != nil {
 				logger.Errorf("Could not insert a storage volume for snapshot \"%s\".", cs)
 				return err
@@ -826,16 +826,16 @@ func upgradeFromStorageTypeDir(name string, d *Daemon, defaultPoolName string, d
 			return err
 		}
 
-		_, err = d.db.StoragePoolVolumeGetTypeID(img, storagePoolVolumeTypeImage, poolID)
+		_, err = d.cluster.StoragePoolVolumeGetTypeID(img, storagePoolVolumeTypeImage, poolID)
 		if err == nil {
 			logger.Warnf("Storage volumes database already contains an entry for the image.")
-			err := d.db.StoragePoolVolumeUpdate(img, storagePoolVolumeTypeImage, poolID, "", imagePoolVolumeConfig)
+			err := d.cluster.StoragePoolVolumeUpdate(img, storagePoolVolumeTypeImage, poolID, "", imagePoolVolumeConfig)
 			if err != nil {
 				return err
 			}
 		} else if err == db.NoSuchObjectError {
 			// Insert storage volumes for containers into the database.
-			_, err := d.db.StoragePoolVolumeCreate(img, "", storagePoolVolumeTypeImage, poolID, imagePoolVolumeConfig)
+			_, err := d.cluster.StoragePoolVolumeCreate(img, "", storagePoolVolumeTypeImage, poolID, imagePoolVolumeConfig)
 			if err != nil {
 				logger.Errorf("Could not insert a storage volume for image \"%s\".", img)
 				return err
@@ -915,7 +915,7 @@ func upgradeFromStorageTypeLvm(name string, d *Daemon, defaultPoolName string, d
 	// are already configured. If so, we can assume that a partial upgrade
 	// has been performed and can skip the next steps.
 	poolID := int64(-1)
-	pools, err := d.db.StoragePools()
+	pools, err := d.cluster.StoragePools()
 	if err == nil { // Already exist valid storage pools.
 		// Check if the storage pool already has a db entry.
 		if shared.StringInSlice(defaultPoolName, pools) {
@@ -924,7 +924,7 @@ func upgradeFromStorageTypeLvm(name string, d *Daemon, defaultPoolName string, d
 
 		// Get the pool ID as we need it for storage volume creation.
 		// (Use a tmp variable as Go's scoping is freaking me out.)
-		tmp, pool, err := d.db.StoragePoolGet(defaultPoolName)
+		tmp, pool, err := d.cluster.StoragePoolGet(defaultPoolName)
 		if err != nil {
 			logger.Errorf("Failed to query database: %s.", err)
 			return err
@@ -937,12 +937,12 @@ func upgradeFromStorageTypeLvm(name string, d *Daemon, defaultPoolName string, d
 		if pool.Config == nil {
 			pool.Config = poolConfig
 		}
-		err = d.db.StoragePoolUpdate(defaultPoolName, pool.Description, pool.Config)
+		err = d.cluster.StoragePoolUpdate(defaultPoolName, pool.Description, pool.Config)
 		if err != nil {
 			return err
 		}
 	} else if err == db.NoSuchObjectError { // Likely a pristine upgrade.
-		tmp, err := dbStoragePoolCreateAndUpdateCache(d.db, defaultPoolName, "", defaultStorageTypeName, poolConfig)
+		tmp, err := dbStoragePoolCreateAndUpdateCache(d.cluster, defaultPoolName, "", defaultStorageTypeName, poolConfig)
 		if err != nil {
 			return err
 		}
@@ -973,7 +973,7 @@ func upgradeFromStorageTypeLvm(name string, d *Daemon, defaultPoolName string, d
 	}
 
 	// Get storage pool from the db after having updated it above.
-	_, defaultPool, err := d.db.StoragePoolGet(defaultPoolName)
+	_, defaultPool, err := d.cluster.StoragePoolGet(defaultPoolName)
 	if err != nil {
 		return err
 	}
@@ -988,16 +988,16 @@ func upgradeFromStorageTypeLvm(name string, d *Daemon, defaultPoolName string, d
 			return err
 		}
 
-		_, err = d.db.StoragePoolVolumeGetTypeID(ct, storagePoolVolumeTypeContainer, poolID)
+		_, err = d.cluster.StoragePoolVolumeGetTypeID(ct, storagePoolVolumeTypeContainer, poolID)
 		if err == nil {
 			logger.Warnf("Storage volumes database already contains an entry for the container.")
-			err := d.db.StoragePoolVolumeUpdate(ct, storagePoolVolumeTypeContainer, poolID, "", containerPoolVolumeConfig)
+			err := d.cluster.StoragePoolVolumeUpdate(ct, storagePoolVolumeTypeContainer, poolID, "", containerPoolVolumeConfig)
 			if err != nil {
 				return err
 			}
 		} else if err == db.NoSuchObjectError {
 			// Insert storage volumes for containers into the database.
-			_, err := d.db.StoragePoolVolumeCreate(ct, "", storagePoolVolumeTypeContainer, poolID, containerPoolVolumeConfig)
+			_, err := d.cluster.StoragePoolVolumeCreate(ct, "", storagePoolVolumeTypeContainer, poolID, containerPoolVolumeConfig)
 			if err != nil {
 				logger.Errorf("Could not insert a storage volume for container \"%s\".", ct)
 				return err
@@ -1143,16 +1143,16 @@ func upgradeFromStorageTypeLvm(name string, d *Daemon, defaultPoolName string, d
 				return err
 			}
 
-			_, err = d.db.StoragePoolVolumeGetTypeID(cs, storagePoolVolumeTypeContainer, poolID)
+			_, err = d.cluster.StoragePoolVolumeGetTypeID(cs, storagePoolVolumeTypeContainer, poolID)
 			if err == nil {
 				logger.Warnf("Storage volumes database already contains an entry for the snapshot.")
-				err := d.db.StoragePoolVolumeUpdate(cs, storagePoolVolumeTypeContainer, poolID, "", snapshotPoolVolumeConfig)
+				err := d.cluster.StoragePoolVolumeUpdate(cs, storagePoolVolumeTypeContainer, poolID, "", snapshotPoolVolumeConfig)
 				if err != nil {
 					return err
 				}
 			} else if err == db.NoSuchObjectError {
 				// Insert storage volumes for containers into the database.
-				_, err := d.db.StoragePoolVolumeCreate(cs, "", storagePoolVolumeTypeContainer, poolID, snapshotPoolVolumeConfig)
+				_, err := d.cluster.StoragePoolVolumeCreate(cs, "", storagePoolVolumeTypeContainer, poolID, snapshotPoolVolumeConfig)
 				if err != nil {
 					logger.Errorf("Could not insert a storage volume for snapshot \"%s\".", cs)
 					return err
@@ -1314,16 +1314,16 @@ func upgradeFromStorageTypeLvm(name string, d *Daemon, defaultPoolName string, d
 			return err
 		}
 
-		_, err = d.db.StoragePoolVolumeGetTypeID(img, storagePoolVolumeTypeImage, poolID)
+		_, err = d.cluster.StoragePoolVolumeGetTypeID(img, storagePoolVolumeTypeImage, poolID)
 		if err == nil {
 			logger.Warnf("Storage volumes database already contains an entry for the image.")
-			err := d.db.StoragePoolVolumeUpdate(img, storagePoolVolumeTypeImage, poolID, "", imagePoolVolumeConfig)
+			err := d.cluster.StoragePoolVolumeUpdate(img, storagePoolVolumeTypeImage, poolID, "", imagePoolVolumeConfig)
 			if err != nil {
 				return err
 			}
 		} else if err == db.NoSuchObjectError {
 			// Insert storage volumes for containers into the database.
-			_, err := d.db.StoragePoolVolumeCreate(img, "", storagePoolVolumeTypeImage, poolID, imagePoolVolumeConfig)
+			_, err := d.cluster.StoragePoolVolumeCreate(img, "", storagePoolVolumeTypeImage, poolID, imagePoolVolumeConfig)
 			if err != nil {
 				logger.Errorf("Could not insert a storage volume for image \"%s\".", img)
 				return err
@@ -1375,7 +1375,7 @@ func upgradeFromStorageTypeLvm(name string, d *Daemon, defaultPoolName string, d
 			// This image didn't exist as a logical volume on the
 			// old LXD instance so we need to kick it from the
 			// storage volumes database for this pool.
-			err := d.db.StoragePoolVolumeDelete(img, storagePoolVolumeTypeImage, poolID)
+			err := d.cluster.StoragePoolVolumeDelete(img, storagePoolVolumeTypeImage, poolID)
 			if err != nil {
 				return err
 			}
@@ -1415,7 +1415,7 @@ func upgradeFromStorageTypeZfs(name string, d *Daemon, defaultPoolName string, d
 	// are already configured. If so, we can assume that a partial upgrade
 	// has been performed and can skip the next steps.
 	poolID := int64(-1)
-	pools, err := d.db.StoragePools()
+	pools, err := d.cluster.StoragePools()
 	if err == nil { // Already exist valid storage pools.
 		// Check if the storage pool already has a db entry.
 		if shared.StringInSlice(poolName, pools) {
@@ -1424,7 +1424,7 @@ func upgradeFromStorageTypeZfs(name string, d *Daemon, defaultPoolName string, d
 
 		// Get the pool ID as we need it for storage volume creation.
 		// (Use a tmp variable as Go's scoping is freaking me out.)
-		tmp, pool, err := d.db.StoragePoolGet(poolName)
+		tmp, pool, err := d.cluster.StoragePoolGet(poolName)
 		if err != nil {
 			logger.Errorf("Failed to query database: %s.", err)
 			return err
@@ -1437,7 +1437,7 @@ func upgradeFromStorageTypeZfs(name string, d *Daemon, defaultPoolName string, d
 		if pool.Config == nil {
 			pool.Config = poolConfig
 		}
-		err = d.db.StoragePoolUpdate(poolName, pool.Description, pool.Config)
+		err = d.cluster.StoragePoolUpdate(poolName, pool.Description, pool.Config)
 		if err != nil {
 			return err
 		}
@@ -1469,7 +1469,7 @@ func upgradeFromStorageTypeZfs(name string, d *Daemon, defaultPoolName string, d
 		}
 
 		// (Use a tmp variable as Go's scoping is freaking me out.)
-		tmp, err := dbStoragePoolCreateAndUpdateCache(d.db, poolName, "", defaultStorageTypeName, poolConfig)
+		tmp, err := dbStoragePoolCreateAndUpdateCache(d.cluster, poolName, "", defaultStorageTypeName, poolConfig)
 		if err != nil {
 			logger.Warnf("Storage pool already exists in the database. Proceeding...")
 		}
@@ -1480,7 +1480,7 @@ func upgradeFromStorageTypeZfs(name string, d *Daemon, defaultPoolName string, d
 	}
 
 	// Get storage pool from the db after having updated it above.
-	_, defaultPool, err := d.db.StoragePoolGet(poolName)
+	_, defaultPool, err := d.cluster.StoragePoolGet(poolName)
 	if err != nil {
 		return err
 	}
@@ -1505,16 +1505,16 @@ func upgradeFromStorageTypeZfs(name string, d *Daemon, defaultPoolName string, d
 			return err
 		}
 
-		_, err = d.db.StoragePoolVolumeGetTypeID(ct, storagePoolVolumeTypeContainer, poolID)
+		_, err = d.cluster.StoragePoolVolumeGetTypeID(ct, storagePoolVolumeTypeContainer, poolID)
 		if err == nil {
 			logger.Warnf("Storage volumes database already contains an entry for the container.")
-			err := d.db.StoragePoolVolumeUpdate(ct, storagePoolVolumeTypeContainer, poolID, "", containerPoolVolumeConfig)
+			err := d.cluster.StoragePoolVolumeUpdate(ct, storagePoolVolumeTypeContainer, poolID, "", containerPoolVolumeConfig)
 			if err != nil {
 				return err
 			}
 		} else if err == db.NoSuchObjectError {
 			// Insert storage volumes for containers into the database.
-			_, err := d.db.StoragePoolVolumeCreate(ct, "", storagePoolVolumeTypeContainer, poolID, containerPoolVolumeConfig)
+			_, err := d.cluster.StoragePoolVolumeCreate(ct, "", storagePoolVolumeTypeContainer, poolID, containerPoolVolumeConfig)
 			if err != nil {
 				logger.Errorf("Could not insert a storage volume for container \"%s\".", ct)
 				return err
@@ -1591,16 +1591,16 @@ func upgradeFromStorageTypeZfs(name string, d *Daemon, defaultPoolName string, d
 				return err
 			}
 
-			_, err = d.db.StoragePoolVolumeGetTypeID(cs, storagePoolVolumeTypeContainer, poolID)
+			_, err = d.cluster.StoragePoolVolumeGetTypeID(cs, storagePoolVolumeTypeContainer, poolID)
 			if err == nil {
 				logger.Warnf("Storage volumes database already contains an entry for the snapshot.")
-				err := d.db.StoragePoolVolumeUpdate(cs, storagePoolVolumeTypeContainer, poolID, "", snapshotPoolVolumeConfig)
+				err := d.cluster.StoragePoolVolumeUpdate(cs, storagePoolVolumeTypeContainer, poolID, "", snapshotPoolVolumeConfig)
 				if err != nil {
 					return err
 				}
 			} else if err == db.NoSuchObjectError {
 				// Insert storage volumes for containers into the database.
-				_, err := d.db.StoragePoolVolumeCreate(cs, "", storagePoolVolumeTypeContainer, poolID, snapshotPoolVolumeConfig)
+				_, err := d.cluster.StoragePoolVolumeCreate(cs, "", storagePoolVolumeTypeContainer, poolID, snapshotPoolVolumeConfig)
 				if err != nil {
 					logger.Errorf("Could not insert a storage volume for snapshot \"%s\".", cs)
 					return err
@@ -1647,16 +1647,16 @@ func upgradeFromStorageTypeZfs(name string, d *Daemon, defaultPoolName string, d
 			return err
 		}
 
-		_, err = d.db.StoragePoolVolumeGetTypeID(img, storagePoolVolumeTypeImage, poolID)
+		_, err = d.cluster.StoragePoolVolumeGetTypeID(img, storagePoolVolumeTypeImage, poolID)
 		if err == nil {
 			logger.Warnf("Storage volumes database already contains an entry for the image.")
-			err := d.db.StoragePoolVolumeUpdate(img, storagePoolVolumeTypeImage, poolID, "", imagePoolVolumeConfig)
+			err := d.cluster.StoragePoolVolumeUpdate(img, storagePoolVolumeTypeImage, poolID, "", imagePoolVolumeConfig)
 			if err != nil {
 				return err
 			}
 		} else if err == db.NoSuchObjectError {
 			// Insert storage volumes for containers into the database.
-			_, err := d.db.StoragePoolVolumeCreate(img, "", storagePoolVolumeTypeImage, poolID, imagePoolVolumeConfig)
+			_, err := d.cluster.StoragePoolVolumeCreate(img, "", storagePoolVolumeTypeImage, poolID, imagePoolVolumeConfig)
 			if err != nil {
 				logger.Errorf("Could not insert a storage volume for image \"%s\".", img)
 				return err
@@ -1861,7 +1861,7 @@ func updatePoolPropertyForAllObjects(d *Daemon, poolName string, allcontainers [
 }
 
 func patchStorageApiV1(name string, d *Daemon) error {
-	pools, err := d.db.StoragePools()
+	pools, err := d.cluster.StoragePools()
 	if err != nil && err == db.NoSuchObjectError {
 		// No pool was configured in the previous update. So we're on a
 		// pristine LXD instance.
@@ -1898,15 +1898,19 @@ func patchStorageApiV1(name string, d *Daemon) error {
 }
 
 func patchStorageApiDirCleanup(name string, d *Daemon) error {
-	return d.db.StorageVolumeCleanupImages()
+	fingerprints, err := d.db.ImagesGet(false)
+	if err != nil {
+		return err
+	}
+	return d.cluster.StorageVolumeCleanupImages(fingerprints)
 }
 
 func patchStorageApiLvmKeys(name string, d *Daemon) error {
-	return d.db.StorageVolumeMoveToLVMThinPoolNameKey()
+	return d.cluster.StorageVolumeMoveToLVMThinPoolNameKey()
 }
 
 func patchStorageApiKeys(name string, d *Daemon) error {
-	pools, err := d.db.StoragePools()
+	pools, err := d.cluster.StoragePools()
 	if err != nil && err == db.NoSuchObjectError {
 		// No pool was configured in the previous update. So we're on a
 		// pristine LXD instance.
@@ -1918,7 +1922,7 @@ func patchStorageApiKeys(name string, d *Daemon) error {
 	}
 
 	for _, poolName := range pools {
-		_, pool, err := d.db.StoragePoolGet(poolName)
+		_, pool, err := d.cluster.StoragePoolGet(poolName)
 		if err != nil {
 			logger.Errorf("Failed to query database: %s", err)
 			return err
@@ -1951,7 +1955,7 @@ func patchStorageApiKeys(name string, d *Daemon) error {
 		}
 
 		// Update the config in the database.
-		err = d.db.StoragePoolUpdate(poolName, pool.Description, pool.Config)
+		err = d.cluster.StoragePoolUpdate(poolName, pool.Description, pool.Config)
 		if err != nil {
 			return err
 		}
@@ -1963,7 +1967,7 @@ func patchStorageApiKeys(name string, d *Daemon) error {
 // In case any of the objects images/containers/snapshots are missing storage
 // volume configuration entries, let's add the defaults.
 func patchStorageApiUpdateStorageConfigs(name string, d *Daemon) error {
-	pools, err := d.db.StoragePools()
+	pools, err := d.cluster.StoragePools()
 	if err != nil {
 		if err == db.NoSuchObjectError {
 			return nil
@@ -1973,7 +1977,7 @@ func patchStorageApiUpdateStorageConfigs(name string, d *Daemon) error {
 	}
 
 	for _, poolName := range pools {
-		poolID, pool, err := d.db.StoragePoolGet(poolName)
+		poolID, pool, err := d.cluster.StoragePoolGet(poolName)
 		if err != nil {
 			logger.Errorf("Failed to query database: %s", err)
 			return err
@@ -2039,13 +2043,13 @@ func patchStorageApiUpdateStorageConfigs(name string, d *Daemon) error {
 		}
 
 		// Update the storage pool config.
-		err = d.db.StoragePoolUpdate(poolName, pool.Description, pool.Config)
+		err = d.cluster.StoragePoolUpdate(poolName, pool.Description, pool.Config)
 		if err != nil {
 			return err
 		}
 
 		// Get all storage volumes on the storage pool.
-		volumes, err := d.db.StoragePoolVolumesGet(poolID, supportedVolumeTypes)
+		volumes, err := d.cluster.StoragePoolVolumesGet(poolID, supportedVolumeTypes)
 		if err != nil {
 			if err == db.NoSuchObjectError {
 				continue
@@ -2100,7 +2104,7 @@ func patchStorageApiUpdateStorageConfigs(name string, d *Daemon) error {
 			// exist in the db, so it's safe to ignore the error.
 			volumeType, _ := storagePoolVolumeTypeNameToType(volume.Type)
 			// Update the volume config.
-			err = d.db.StoragePoolVolumeUpdate(volume.Name, volumeType, poolID, volume.Description, volume.Config)
+			err = d.cluster.StoragePoolVolumeUpdate(volume.Name, volumeType, poolID, volume.Description, volume.Config)
 			if err != nil {
 				return err
 			}
@@ -2111,7 +2115,7 @@ func patchStorageApiUpdateStorageConfigs(name string, d *Daemon) error {
 }
 
 func patchStorageApiLxdOnBtrfs(name string, d *Daemon) error {
-	pools, err := d.db.StoragePools()
+	pools, err := d.cluster.StoragePools()
 	if err != nil {
 		if err == db.NoSuchObjectError {
 			return nil
@@ -2121,7 +2125,7 @@ func patchStorageApiLxdOnBtrfs(name string, d *Daemon) error {
 	}
 
 	for _, poolName := range pools {
-		_, pool, err := d.db.StoragePoolGet(poolName)
+		_, pool, err := d.cluster.StoragePoolGet(poolName)
 		if err != nil {
 			logger.Errorf("Failed to query database: %s", err)
 			return err
@@ -2156,7 +2160,7 @@ func patchStorageApiLxdOnBtrfs(name string, d *Daemon) error {
 		pool.Config["source"] = getStoragePoolMountPoint(poolName)
 
 		// Update the storage pool config.
-		err = d.db.StoragePoolUpdate(poolName, pool.Description, pool.Config)
+		err = d.cluster.StoragePoolUpdate(poolName, pool.Description, pool.Config)
 		if err != nil {
 			return err
 		}
@@ -2168,7 +2172,7 @@ func patchStorageApiLxdOnBtrfs(name string, d *Daemon) error {
 }
 
 func patchStorageApiDetectLVSize(name string, d *Daemon) error {
-	pools, err := d.db.StoragePools()
+	pools, err := d.cluster.StoragePools()
 	if err != nil {
 		if err == db.NoSuchObjectError {
 			return nil
@@ -2178,7 +2182,7 @@ func patchStorageApiDetectLVSize(name string, d *Daemon) error {
 	}
 
 	for _, poolName := range pools {
-		poolID, pool, err := d.db.StoragePoolGet(poolName)
+		poolID, pool, err := d.cluster.StoragePoolGet(poolName)
 		if err != nil {
 			logger.Errorf("Failed to query database: %s", err)
 			return err
@@ -2201,7 +2205,7 @@ func patchStorageApiDetectLVSize(name string, d *Daemon) error {
 		}
 
 		// Get all storage volumes on the storage pool.
-		volumes, err := d.db.StoragePoolVolumesGet(poolID, supportedVolumeTypes)
+		volumes, err := d.cluster.StoragePoolVolumesGet(poolID, supportedVolumeTypes)
 		if err != nil {
 			if err == db.NoSuchObjectError {
 				continue
@@ -2248,7 +2252,7 @@ func patchStorageApiDetectLVSize(name string, d *Daemon) error {
 			// exist in the db, so it's safe to ignore the error.
 			volumeType, _ := storagePoolVolumeTypeNameToType(volume.Type)
 			// Update the volume config.
-			err = d.db.StoragePoolVolumeUpdate(volume.Name, volumeType, poolID, volume.Description, volume.Config)
+			err = d.cluster.StoragePoolVolumeUpdate(volume.Name, volumeType, poolID, volume.Description, volume.Config)
 			if err != nil {
 				return err
 			}
@@ -2259,11 +2263,11 @@ func patchStorageApiDetectLVSize(name string, d *Daemon) error {
 }
 
 func patchStorageApiInsertZfsDriver(name string, d *Daemon) error {
-	return d.db.StoragePoolInsertZfsDriver()
+	return d.cluster.StoragePoolInsertZfsDriver()
 }
 
 func patchStorageZFSnoauto(name string, d *Daemon) error {
-	pools, err := d.db.StoragePools()
+	pools, err := d.cluster.StoragePools()
 	if err != nil {
 		if err == db.NoSuchObjectError {
 			return nil
@@ -2273,7 +2277,7 @@ func patchStorageZFSnoauto(name string, d *Daemon) error {
 	}
 
 	for _, poolName := range pools {
-		_, pool, err := d.db.StoragePoolGet(poolName)
+		_, pool, err := d.cluster.StoragePoolGet(poolName)
 		if err != nil {
 			logger.Errorf("Failed to query database: %s", err)
 			return err
@@ -2326,7 +2330,7 @@ func patchStorageZFSnoauto(name string, d *Daemon) error {
 }
 
 func patchStorageZFSVolumeSize(name string, d *Daemon) error {
-	pools, err := d.db.StoragePools()
+	pools, err := d.cluster.StoragePools()
 	if err != nil && err == db.NoSuchObjectError {
 		// No pool was configured in the previous update. So we're on a
 		// pristine LXD instance.
@@ -2338,7 +2342,7 @@ func patchStorageZFSVolumeSize(name string, d *Daemon) error {
 	}
 
 	for _, poolName := range pools {
-		poolID, pool, err := d.db.StoragePoolGet(poolName)
+		poolID, pool, err := d.cluster.StoragePoolGet(poolName)
 		if err != nil {
 			logger.Errorf("Failed to query database: %s", err)
 			return err
@@ -2350,7 +2354,7 @@ func patchStorageZFSVolumeSize(name string, d *Daemon) error {
 		}
 
 		// Get all storage volumes on the storage pool.
-		volumes, err := d.db.StoragePoolVolumesGet(poolID, supportedVolumeTypes)
+		volumes, err := d.cluster.StoragePoolVolumesGet(poolID, supportedVolumeTypes)
 		if err != nil {
 			if err == db.NoSuchObjectError {
 				continue
@@ -2378,7 +2382,7 @@ func patchStorageZFSVolumeSize(name string, d *Daemon) error {
 			// exist in the db, so it's safe to ignore the error.
 			volumeType, _ := storagePoolVolumeTypeNameToType(volume.Type)
 			// Update the volume config.
-			err = d.db.StoragePoolVolumeUpdate(volume.Name,
+			err = d.cluster.StoragePoolVolumeUpdate(volume.Name,
 				volumeType, poolID, volume.Description,
 				volume.Config)
 			if err != nil {
@@ -2412,7 +2416,7 @@ func patchNetworkDnsmasqHosts(name string, d *Daemon) error {
 }
 
 func patchStorageApiDirBindMount(name string, d *Daemon) error {
-	pools, err := d.db.StoragePools()
+	pools, err := d.cluster.StoragePools()
 	if err != nil && err == db.NoSuchObjectError {
 		// No pool was configured in the previous update. So we're on a
 		// pristine LXD instance.
@@ -2424,7 +2428,7 @@ func patchStorageApiDirBindMount(name string, d *Daemon) error {
 	}
 
 	for _, poolName := range pools {
-		_, pool, err := d.db.StoragePoolGet(poolName)
+		_, pool, err := d.cluster.StoragePoolGet(poolName)
 		if err != nil {
 			logger.Errorf("Failed to query database: %s", err)
 			return err
@@ -2498,7 +2502,7 @@ func patchFixUploadedAt(name string, d *Daemon) error {
 }
 
 func patchStorageApiCephSizeRemove(name string, d *Daemon) error {
-	pools, err := d.db.StoragePools()
+	pools, err := d.cluster.StoragePools()
 	if err != nil && err == db.NoSuchObjectError {
 		// No pool was configured in the previous update. So we're on a
 		// pristine LXD instance.
@@ -2510,7 +2514,7 @@ func patchStorageApiCephSizeRemove(name string, d *Daemon) error {
 	}
 
 	for _, poolName := range pools {
-		_, pool, err := d.db.StoragePoolGet(poolName)
+		_, pool, err := d.cluster.StoragePoolGet(poolName)
 		if err != nil {
 			logger.Errorf("Failed to query database: %s", err)
 			return err
@@ -2527,7 +2531,7 @@ func patchStorageApiCephSizeRemove(name string, d *Daemon) error {
 		}
 
 		// Update the config in the database.
-		err = d.db.StoragePoolUpdate(poolName, pool.Description,
+		err = d.cluster.StoragePoolUpdate(poolName, pool.Description,
 			pool.Config)
 		if err != nil {
 			return err
diff --git a/lxd/profiles.go b/lxd/profiles.go
index e92a08034..6bafb47f0 100644
--- a/lxd/profiles.go
+++ b/lxd/profiles.go
@@ -84,7 +84,7 @@ func profilesPost(d *Daemon, r *http.Request) Response {
 		return BadRequest(err)
 	}
 
-	err = containerValidDevices(d.db, req.Devices, true, false)
+	err = containerValidDevices(d.cluster, req.Devices, true, false)
 	if err != nil {
 		return BadRequest(err)
 	}
diff --git a/lxd/profiles_utils.go b/lxd/profiles_utils.go
index 2a36f24d0..d43e17bc2 100644
--- a/lxd/profiles_utils.go
+++ b/lxd/profiles_utils.go
@@ -15,7 +15,7 @@ func doProfileUpdate(d *Daemon, name string, id int64, profile *api.Profile, req
 		return BadRequest(err)
 	}
 
-	err = containerValidDevices(d.db, req.Devices, true, false)
+	err = containerValidDevices(d.cluster, req.Devices, true, false)
 	if err != nil {
 		return BadRequest(err)
 	}
diff --git a/lxd/storage.go b/lxd/storage.go
index 582bd6403..8ace1596a 100644
--- a/lxd/storage.go
+++ b/lxd/storage.go
@@ -284,7 +284,7 @@ func storageCoreInit(driver string) (storage, error) {
 
 func storageInit(s *state.State, poolName string, volumeName string, volumeType int) (storage, error) {
 	// Load the storage pool.
-	poolID, pool, err := s.Node.StoragePoolGet(poolName)
+	poolID, pool, err := s.Cluster.StoragePoolGet(poolName)
 	if err != nil {
 		return nil, err
 	}
@@ -299,7 +299,7 @@ func storageInit(s *state.State, poolName string, volumeName string, volumeType
 	// Load the storage volume.
 	volume := &api.StorageVolume{}
 	if volumeName != "" && volumeType >= 0 {
-		_, volume, err = s.Node.StoragePoolVolumeGetType(volumeName, volumeType, poolID)
+		_, volume, err = s.Cluster.StoragePoolVolumeGetType(volumeName, volumeType, poolID)
 		if err != nil {
 			return nil, err
 		}
@@ -317,7 +317,6 @@ func storageInit(s *state.State, poolName string, volumeName string, volumeType
 		btrfs.pool = pool
 		btrfs.volume = volume
 		btrfs.s = s
-		btrfs.db = s.Node
 		err = btrfs.StoragePoolInit()
 		if err != nil {
 			return nil, err
@@ -329,7 +328,6 @@ func storageInit(s *state.State, poolName string, volumeName string, volumeType
 		dir.pool = pool
 		dir.volume = volume
 		dir.s = s
-		dir.db = s.Node
 		err = dir.StoragePoolInit()
 		if err != nil {
 			return nil, err
@@ -341,7 +339,6 @@ func storageInit(s *state.State, poolName string, volumeName string, volumeType
 		ceph.pool = pool
 		ceph.volume = volume
 		ceph.s = s
-		ceph.db = s.Node
 		err = ceph.StoragePoolInit()
 		if err != nil {
 			return nil, err
@@ -353,7 +350,6 @@ func storageInit(s *state.State, poolName string, volumeName string, volumeType
 		lvm.pool = pool
 		lvm.volume = volume
 		lvm.s = s
-		lvm.db = s.Node
 		err = lvm.StoragePoolInit()
 		if err != nil {
 			return nil, err
@@ -365,7 +361,6 @@ func storageInit(s *state.State, poolName string, volumeName string, volumeType
 		mock.pool = pool
 		mock.volume = volume
 		mock.s = s
-		mock.db = s.Node
 		err = mock.StoragePoolInit()
 		if err != nil {
 			return nil, err
@@ -377,7 +372,6 @@ func storageInit(s *state.State, poolName string, volumeName string, volumeType
 		zfs.pool = pool
 		zfs.volume = volume
 		zfs.s = s
-		zfs.db = s.Node
 		err = zfs.StoragePoolInit()
 		if err != nil {
 			return nil, err
@@ -518,11 +512,11 @@ func storagePoolVolumeAttachInit(s *state.State, poolName string, volumeName str
 
 	st.SetStoragePoolVolumeWritable(&poolVolumePut)
 
-	poolID, err := s.Node.StoragePoolGetID(poolName)
+	poolID, err := s.Cluster.StoragePoolGetID(poolName)
 	if err != nil {
 		return nil, err
 	}
-	err = s.Node.StoragePoolVolumeUpdate(volumeName, volumeType, poolID, poolVolumePut.Description, poolVolumePut.Config)
+	err = s.Cluster.StoragePoolVolumeUpdate(volumeName, volumeType, poolID, poolVolumePut.Description, poolVolumePut.Config)
 	if err != nil {
 		return nil, err
 	}
@@ -545,7 +539,7 @@ func storagePoolVolumeContainerCreateInit(s *state.State, poolName string, conta
 
 func storagePoolVolumeContainerLoadInit(s *state.State, containerName string) (storage, error) {
 	// Get the storage pool of a given container.
-	poolName, err := s.Node.ContainerPool(containerName)
+	poolName, err := s.Cluster.ContainerPool(containerName)
 	if err != nil {
 		return nil, err
 	}
@@ -811,7 +805,7 @@ func StorageProgressWriter(op *operation, key string, description string) func(i
 }
 
 func SetupStorageDriver(s *state.State, forceCheck bool) error {
-	pools, err := s.Node.StoragePools()
+	pools, err := s.Cluster.StoragePools()
 	if err != nil {
 		if err == db.NoSuchObjectError {
 			logger.Debugf("No existing storage pools detected.")
@@ -864,7 +858,7 @@ func SetupStorageDriver(s *state.State, forceCheck bool) error {
 	// appropriate. (Should be cheaper then querying the db all the time,
 	// especially if we keep adding more storage drivers.)
 	if !storagePoolDriversCacheInitialized {
-		tmp, err := s.Node.StoragePoolsGetDrivers()
+		tmp, err := s.Cluster.StoragePoolsGetDrivers()
 		if err != nil && err != db.NoSuchObjectError {
 			return nil
 		}
diff --git a/lxd/storage_btrfs.go b/lxd/storage_btrfs.go
index 20a2dd45b..913d6e62b 100644
--- a/lxd/storage_btrfs.go
+++ b/lxd/storage_btrfs.go
@@ -576,7 +576,7 @@ func (s *storageBtrfs) StoragePoolVolumeDelete() error {
 		}
 	}
 
-	err = s.db.StoragePoolVolumeDelete(
+	err = s.s.Cluster.StoragePoolVolumeDelete(
 		s.volume.Name,
 		storagePoolVolumeTypeCustom,
 		s.poolID)
@@ -672,7 +672,7 @@ func (s *storageBtrfs) StoragePoolVolumeRename(newName string) error {
 	logger.Infof(`Renamed BTRFS storage volume on storage pool "%s" from "%s" to "%s`,
 		s.pool.Name, s.volume.Name, newName)
 
-	return s.db.StoragePoolVolumeRename(s.volume.Name, newName,
+	return s.s.Cluster.StoragePoolVolumeRename(s.volume.Name, newName,
 		storagePoolVolumeTypeCustom, s.poolID)
 }
 
diff --git a/lxd/storage_ceph.go b/lxd/storage_ceph.go
index 0c6c7f0ba..d5303a936 100644
--- a/lxd/storage_ceph.go
+++ b/lxd/storage_ceph.go
@@ -520,7 +520,7 @@ func (s *storageCeph) StoragePoolVolumeDelete() error {
 			s.volume.Name, s.pool.Name)
 	}
 
-	err = s.db.StoragePoolVolumeDelete(
+	err = s.s.Cluster.StoragePoolVolumeDelete(
 		s.volume.Name,
 		storagePoolVolumeTypeCustom,
 		s.poolID)
@@ -760,7 +760,7 @@ func (s *storageCeph) StoragePoolVolumeRename(newName string) error {
 	logger.Infof(`Renamed CEPH storage volume on OSD storage pool "%s" from "%s" to "%s`,
 		s.pool.Name, s.volume.Name, newName)
 
-	return s.db.StoragePoolVolumeRename(s.volume.Name, newName,
+	return s.s.Cluster.StoragePoolVolumeRename(s.volume.Name, newName,
 		storagePoolVolumeTypeCustom, s.poolID)
 }
 
@@ -972,7 +972,7 @@ func (s *storageCeph) ContainerCreateFromImage(container container, fingerprint
 			fingerprint, storagePoolVolumeTypeNameImage, s.UserName)
 
 		if ok {
-			_, volume, err := s.s.Node.StoragePoolVolumeGetType(fingerprint, db.StoragePoolVolumeTypeImage, s.poolID)
+			_, volume, err := s.s.Cluster.StoragePoolVolumeGetType(fingerprint, db.StoragePoolVolumeTypeImage, s.poolID)
 			if err != nil {
 				return err
 			}
@@ -2758,7 +2758,7 @@ func (s *storageCeph) StorageEntitySetQuota(volumeType int, size int64, data int
 
 	// Update the database
 	s.volume.Config["size"] = shared.GetByteSizeString(size, 0)
-	err = s.db.StoragePoolVolumeUpdate(
+	err = s.s.Cluster.StoragePoolVolumeUpdate(
 		s.volume.Name,
 		volumeType,
 		s.poolID,
diff --git a/lxd/storage_dir.go b/lxd/storage_dir.go
index 78e562fa7..c5aadf74d 100644
--- a/lxd/storage_dir.go
+++ b/lxd/storage_dir.go
@@ -359,7 +359,7 @@ func (s *storageDir) StoragePoolVolumeDelete() error {
 		return err
 	}
 
-	err = s.db.StoragePoolVolumeDelete(
+	err = s.s.Cluster.StoragePoolVolumeDelete(
 		s.volume.Name,
 		storagePoolVolumeTypeCustom,
 		s.poolID)
@@ -433,7 +433,7 @@ func (s *storageDir) StoragePoolVolumeRename(newName string) error {
 	logger.Infof(`Renamed DIR storage volume on storage pool "%s" from "%s" to "%s`,
 		s.pool.Name, s.volume.Name, newName)
 
-	return s.db.StoragePoolVolumeRename(s.volume.Name, newName,
+	return s.s.Cluster.StoragePoolVolumeRename(s.volume.Name, newName,
 		storagePoolVolumeTypeCustom, s.poolID)
 }
 
diff --git a/lxd/storage_lvm.go b/lxd/storage_lvm.go
index 95c596b1c..aa648e1f0 100644
--- a/lxd/storage_lvm.go
+++ b/lxd/storage_lvm.go
@@ -284,7 +284,7 @@ func (s *storageLvm) StoragePoolCreate() error {
 		}
 
 		// Check that we don't already use this volume group.
-		inUse, user, err := lxdUsesPool(s.db, poolName, s.pool.Driver, "lvm.vg_name")
+		inUse, user, err := lxdUsesPool(s.s.Cluster, poolName, s.pool.Driver, "lvm.vg_name")
 		if err != nil {
 			return err
 		}
@@ -555,7 +555,7 @@ func (s *storageLvm) StoragePoolVolumeDelete() error {
 		}
 	}
 
-	err = s.db.StoragePoolVolumeDelete(
+	err = s.s.Cluster.StoragePoolVolumeDelete(
 		s.volume.Name,
 		storagePoolVolumeTypeCustom,
 		s.poolID)
@@ -852,7 +852,7 @@ func (s *storageLvm) StoragePoolVolumeRename(newName string) error {
 	logger.Infof(`Renamed ZFS storage volume on storage pool "%s" from "%s" to "%s`,
 		s.pool.Name, s.volume.Name, newName)
 
-	return s.db.StoragePoolVolumeRename(s.volume.Name, newName,
+	return s.s.Cluster.StoragePoolVolumeRename(s.volume.Name, newName,
 		storagePoolVolumeTypeCustom, s.poolID)
 }
 
@@ -1794,7 +1794,7 @@ func (s *storageLvm) StorageEntitySetQuota(volumeType int, size int64, data inte
 
 	// Update the database
 	s.volume.Config["size"] = shared.GetByteSizeString(size, 0)
-	err = s.db.StoragePoolVolumeUpdate(
+	err = s.s.Cluster.StoragePoolVolumeUpdate(
 		s.volume.Name,
 		volumeType,
 		s.poolID,
diff --git a/lxd/storage_lvm_utils.go b/lxd/storage_lvm_utils.go
index 261e457b9..52878c689 100644
--- a/lxd/storage_lvm_utils.go
+++ b/lxd/storage_lvm_utils.go
@@ -497,7 +497,7 @@ func (s *storageLvm) containerCreateFromImageThinLv(c container, fp string) erro
 		var imgerr error
 		ok, _ := storageLVExists(imageLvmDevPath)
 		if ok {
-			_, volume, err := s.s.Node.StoragePoolVolumeGetType(fp, db.StoragePoolVolumeTypeImage, s.poolID)
+			_, volume, err := s.s.Cluster.StoragePoolVolumeGetType(fp, db.StoragePoolVolumeTypeImage, s.poolID)
 			if err != nil {
 				return err
 			}
diff --git a/lxd/storage_pools.go b/lxd/storage_pools.go
index b08e0125a..bed199461 100644
--- a/lxd/storage_pools.go
+++ b/lxd/storage_pools.go
@@ -23,7 +23,7 @@ func storagePoolsGet(d *Daemon, r *http.Request) Response {
 		recursion = 0
 	}
 
-	pools, err := d.db.StoragePools()
+	pools, err := d.cluster.StoragePools()
 	if err != nil && err != db.NoSuchObjectError {
 		return SmartError(err)
 	}
@@ -34,13 +34,13 @@ func storagePoolsGet(d *Daemon, r *http.Request) Response {
 		if recursion == 0 {
 			resultString = append(resultString, fmt.Sprintf("/%s/storage-pools/%s", version.APIVersion, pool))
 		} else {
-			plID, pl, err := d.db.StoragePoolGet(pool)
+			plID, pl, err := d.cluster.StoragePoolGet(pool)
 			if err != nil {
 				continue
 			}
 
 			// Get all users of the storage pool.
-			poolUsedBy, err := storagePoolUsedByGet(d.db, plID, pool)
+			poolUsedBy, err := storagePoolUsedByGet(d.State(), plID, pool)
 			if err != nil {
 				return SmartError(err)
 			}
@@ -93,13 +93,13 @@ func storagePoolGet(d *Daemon, r *http.Request) Response {
 	poolName := mux.Vars(r)["name"]
 
 	// Get the existing storage pool.
-	poolID, pool, err := d.db.StoragePoolGet(poolName)
+	poolID, pool, err := d.cluster.StoragePoolGet(poolName)
 	if err != nil {
 		return SmartError(err)
 	}
 
 	// Get all users of the storage pool.
-	poolUsedBy, err := storagePoolUsedByGet(d.db, poolID, poolName)
+	poolUsedBy, err := storagePoolUsedByGet(d.State(), poolID, poolName)
 	if err != nil && err != db.NoSuchObjectError {
 		return SmartError(err)
 	}
@@ -116,7 +116,7 @@ func storagePoolPut(d *Daemon, r *http.Request) Response {
 	poolName := mux.Vars(r)["name"]
 
 	// Get the existing storage pool.
-	_, dbInfo, err := d.db.StoragePoolGet(poolName)
+	_, dbInfo, err := d.cluster.StoragePoolGet(poolName)
 	if err != nil {
 		return SmartError(err)
 	}
@@ -154,7 +154,7 @@ func storagePoolPatch(d *Daemon, r *http.Request) Response {
 	poolName := mux.Vars(r)["name"]
 
 	// Get the existing network
-	_, dbInfo, err := d.db.StoragePoolGet(poolName)
+	_, dbInfo, err := d.cluster.StoragePoolGet(poolName)
 	if dbInfo != nil {
 		return SmartError(err)
 	}
@@ -203,14 +203,14 @@ func storagePoolPatch(d *Daemon, r *http.Request) Response {
 func storagePoolDelete(d *Daemon, r *http.Request) Response {
 	poolName := mux.Vars(r)["name"]
 
-	poolID, err := d.db.StoragePoolGetID(poolName)
+	poolID, err := d.cluster.StoragePoolGetID(poolName)
 	if err != nil {
 		return NotFound
 	}
 
 	// Check if the storage pool has any volumes associated with it, if so
 	// error out.
-	volumeCount, err := d.db.StoragePoolVolumesGetNames(poolID)
+	volumeCount, err := d.cluster.StoragePoolVolumesGetNames(poolID)
 	if err != nil {
 		return InternalError(err)
 	}
@@ -239,7 +239,7 @@ func storagePoolDelete(d *Daemon, r *http.Request) Response {
 		return InternalError(err)
 	}
 
-	err = dbStoragePoolDeleteAndUpdateCache(d.db, poolName)
+	err = dbStoragePoolDeleteAndUpdateCache(d.cluster, poolName)
 	if err != nil {
 		return SmartError(err)
 	}
diff --git a/lxd/storage_pools_utils.go b/lxd/storage_pools_utils.go
index 849100675..3eb356013 100644
--- a/lxd/storage_pools_utils.go
+++ b/lxd/storage_pools_utils.go
@@ -62,7 +62,7 @@ func storagePoolUpdate(state *state.State, name, newDescription string, newConfi
 
 	// Update the database if something changed
 	if len(changedConfig) != 0 || newDescription != oldDescription {
-		err = state.Node.StoragePoolUpdate(name, newDescription, newConfig)
+		err = state.Cluster.StoragePoolUpdate(name, newDescription, newConfig)
 		if err != nil {
 			return err
 		}
@@ -80,15 +80,15 @@ func storagePoolUpdate(state *state.State, name, newDescription string, newConfi
 // /1.0/containers/alp1/snapshots/snap0
 // /1.0/images/cedce20b5b236f1071134beba7a5fd2aa923fda49eea4c66454dd559a5d6e906
 // /1.0/profiles/default
-func storagePoolUsedByGet(dbObj *db.Node, poolID int64, poolName string) ([]string, error) {
+func storagePoolUsedByGet(state *state.State, poolID int64, poolName string) ([]string, error) {
 	// Retrieve all non-custom volumes that exist on this storage pool.
-	volumes, err := dbObj.StoragePoolVolumesGet(poolID, []int{storagePoolVolumeTypeContainer, storagePoolVolumeTypeImage, storagePoolVolumeTypeCustom})
+	volumes, err := state.Cluster.StoragePoolVolumesGet(poolID, []int{storagePoolVolumeTypeContainer, storagePoolVolumeTypeImage, storagePoolVolumeTypeCustom})
 	if err != nil && err != db.NoSuchObjectError {
 		return []string{}, err
 	}
 
 	// Retrieve all profiles that exist on this storage pool.
-	profiles, err := profilesUsingPoolGetNames(dbObj, poolName)
+	profiles, err := profilesUsingPoolGetNames(state.Node, poolName)
 
 	if err != nil {
 		return []string{}, err
@@ -164,7 +164,7 @@ func storagePoolDBCreate(s *state.State, poolName, poolDescription string, drive
 	}
 
 	// Check that the storage pool does not already exist.
-	_, err = s.Node.StoragePoolGetID(poolName)
+	_, err = s.Cluster.StoragePoolGetID(poolName)
 	if err == nil {
 		return fmt.Errorf("The storage pool already exists")
 	}
@@ -187,7 +187,7 @@ func storagePoolDBCreate(s *state.State, poolName, poolDescription string, drive
 	}
 
 	// Create the database entry for the storage pool.
-	_, err = dbStoragePoolCreateAndUpdateCache(s.Node, poolName, poolDescription, driver, config)
+	_, err = dbStoragePoolCreateAndUpdateCache(s.Cluster, poolName, poolDescription, driver, config)
 	if err != nil {
 		return fmt.Errorf("Error inserting %s into database: %s", poolName, err)
 	}
@@ -209,7 +209,7 @@ func storagePoolCreateInternal(state *state.State, poolName, poolDescription str
 		if !tryUndo {
 			return
 		}
-		dbStoragePoolDeleteAndUpdateCache(state.Node, poolName)
+		dbStoragePoolDeleteAndUpdateCache(state.Cluster, poolName)
 	}()
 
 	s, err := storagePoolInit(state, poolName)
@@ -238,7 +238,7 @@ func storagePoolCreateInternal(state *state.State, poolName, poolDescription str
 	configDiff, _ := storageConfigDiff(config, postCreateConfig)
 	if len(configDiff) > 0 {
 		// Create the database entry for the storage pool.
-		err = state.Node.StoragePoolUpdate(poolName, poolDescription, postCreateConfig)
+		err = state.Cluster.StoragePoolUpdate(poolName, poolDescription, postCreateConfig)
 		if err != nil {
 			return fmt.Errorf("Error inserting %s into database: %s", poolName, err)
 		}
@@ -252,7 +252,7 @@ func storagePoolCreateInternal(state *state.State, poolName, poolDescription str
 
 // Helper around the low-level DB API, which also updates the driver names
 // cache.
-func dbStoragePoolCreateAndUpdateCache(db *db.Node, poolName string, poolDescription string, poolDriver string, poolConfig map[string]string) (int64, error) {
+func dbStoragePoolCreateAndUpdateCache(db *db.Cluster, poolName string, poolDescription string, poolDriver string, poolConfig map[string]string) (int64, error) {
 	id, err := db.StoragePoolCreate(poolName, poolDescription, poolDriver, poolConfig)
 	if err != nil {
 		return id, err
@@ -272,7 +272,7 @@ func dbStoragePoolCreateAndUpdateCache(db *db.Node, poolName string, poolDescrip
 
 // Helper around the low-level DB API, which also updates the driver names
 // cache.
-func dbStoragePoolDeleteAndUpdateCache(db *db.Node, poolName string) error {
+func dbStoragePoolDeleteAndUpdateCache(db *db.Cluster, poolName string) error {
 	pool, err := db.StoragePoolDelete(poolName)
 	if err != nil {
 		return err
diff --git a/lxd/storage_shared.go b/lxd/storage_shared.go
index f2e1b692d..caabf3f59 100644
--- a/lxd/storage_shared.go
+++ b/lxd/storage_shared.go
@@ -3,7 +3,6 @@ package main
 import (
 	"fmt"
 
-	"github.com/lxc/lxd/lxd/db"
 	"github.com/lxc/lxd/lxd/state"
 	"github.com/lxc/lxd/shared"
 	"github.com/lxc/lxd/shared/api"
@@ -15,8 +14,7 @@ type storageShared struct {
 	sTypeName    string
 	sTypeVersion string
 
-	db *db.Node
-	s  *state.State
+	s *state.State
 
 	poolID int64
 	pool   *api.StoragePool
@@ -109,7 +107,7 @@ func (s *storageShared) createImageDbPoolVolume(fingerprint string) error {
 	}
 
 	// Create a db entry for the storage volume of the image.
-	_, err = s.db.StoragePoolVolumeCreate(fingerprint, "", storagePoolVolumeTypeImage, s.poolID, volumeConfig)
+	_, err = s.s.Cluster.StoragePoolVolumeCreate(fingerprint, "", storagePoolVolumeTypeImage, s.poolID, volumeConfig)
 	if err != nil {
 		// Try to delete the db entry on error.
 		s.deleteImageDbPoolVolume(fingerprint)
@@ -120,7 +118,7 @@ func (s *storageShared) createImageDbPoolVolume(fingerprint string) error {
 }
 
 func (s *storageShared) deleteImageDbPoolVolume(fingerprint string) error {
-	err := s.db.StoragePoolVolumeDelete(fingerprint, storagePoolVolumeTypeImage, s.poolID)
+	err := s.s.Cluster.StoragePoolVolumeDelete(fingerprint, storagePoolVolumeTypeImage, s.poolID)
 	if err != nil {
 		return err
 	}
diff --git a/lxd/storage_utils.go b/lxd/storage_utils.go
index 90019575d..002adcc28 100644
--- a/lxd/storage_utils.go
+++ b/lxd/storage_utils.go
@@ -164,7 +164,7 @@ const imagesDirMode os.FileMode = 0700
 const snapshotsDirMode os.FileMode = 0700
 
 // Detect whether LXD already uses the given storage pool.
-func lxdUsesPool(dbObj *db.Node, onDiskPoolName string, driver string, onDiskProperty string) (bool, string, error) {
+func lxdUsesPool(dbObj *db.Cluster, onDiskPoolName string, driver string, onDiskProperty string) (bool, string, error) {
 	pools, err := dbObj.StoragePools()
 	if err != nil && err != db.NoSuchObjectError {
 		return false, "", err
diff --git a/lxd/storage_volumes.go b/lxd/storage_volumes.go
index 32bdd955e..48d013566 100644
--- a/lxd/storage_volumes.go
+++ b/lxd/storage_volumes.go
@@ -27,14 +27,14 @@ func storagePoolVolumesGet(d *Daemon, r *http.Request) Response {
 
 	// Retrieve ID of the storage pool (and check if the storage pool
 	// exists).
-	poolID, err := d.db.StoragePoolGetID(poolName)
+	poolID, err := d.cluster.StoragePoolGetID(poolName)
 	if err != nil {
 		return SmartError(err)
 	}
 
 	// Get all volumes currently attached to the storage pool by ID of the
 	// pool.
-	volumes, err := d.db.StoragePoolVolumesGet(poolID, supportedVolumeTypes)
+	volumes, err := d.cluster.StoragePoolVolumesGet(poolID, supportedVolumeTypes)
 	if err != nil && err != db.NoSuchObjectError {
 		return SmartError(err)
 	}
@@ -94,14 +94,14 @@ func storagePoolVolumesTypeGet(d *Daemon, r *http.Request) Response {
 
 	// Retrieve ID of the storage pool (and check if the storage pool
 	// exists).
-	poolID, err := d.db.StoragePoolGetID(poolName)
+	poolID, err := d.cluster.StoragePoolGetID(poolName)
 	if err != nil {
 		return SmartError(err)
 	}
 
 	// Get the names of all storage volumes of a given volume type currently
 	// attached to the storage pool.
-	volumes, err := d.db.StoragePoolVolumesGetType(volumeType, poolID)
+	volumes, err := d.cluster.StoragePoolVolumesGetType(volumeType, poolID)
 	if err != nil {
 		return SmartError(err)
 	}
@@ -116,7 +116,7 @@ func storagePoolVolumesTypeGet(d *Daemon, r *http.Request) Response {
 			}
 			resultString = append(resultString, fmt.Sprintf("/%s/storage-pools/%s/volumes/%s/%s", version.APIVersion, poolName, apiEndpoint, volume))
 		} else {
-			_, vol, err := d.db.StoragePoolVolumeGetType(volume, volumeType, poolID)
+			_, vol, err := d.cluster.StoragePoolVolumeGetType(volume, volumeType, poolID)
 			if err != nil {
 				continue
 			}
@@ -222,13 +222,13 @@ func storagePoolVolumeTypePost(d *Daemon, r *http.Request) Response {
 
 	// Retrieve ID of the storage pool (and check if the storage pool
 	// exists).
-	poolID, err := d.db.StoragePoolGetID(poolName)
+	poolID, err := d.cluster.StoragePoolGetID(poolName)
 	if err != nil {
 		return SmartError(err)
 	}
 
 	// Check that the name isn't already in use.
-	_, err = d.db.StoragePoolVolumeGetTypeID(req.Name,
+	_, err = d.cluster.StoragePoolVolumeGetTypeID(req.Name,
 		storagePoolVolumeTypeCustom, poolID)
 	if err == nil || err != nil && err != db.NoSuchObjectError {
 		return Conflict
@@ -272,13 +272,13 @@ func storagePoolVolumeTypeGet(d *Daemon, r *http.Request) Response {
 
 	// Get the ID of the storage pool the storage volume is supposed to be
 	// attached to.
-	poolID, err := d.db.StoragePoolGetID(poolName)
+	poolID, err := d.cluster.StoragePoolGetID(poolName)
 	if err != nil {
 		return SmartError(err)
 	}
 
 	// Get the storage volume.
-	_, volume, err := d.db.StoragePoolVolumeGetType(volumeName, volumeType, poolID)
+	_, volume, err := d.cluster.StoragePoolVolumeGetType(volumeName, volumeType, poolID)
 	if err != nil {
 		return SmartError(err)
 	}
@@ -316,13 +316,13 @@ func storagePoolVolumeTypePut(d *Daemon, r *http.Request) Response {
 		return BadRequest(fmt.Errorf("invalid storage volume type %s", volumeTypeName))
 	}
 
-	poolID, pool, err := d.db.StoragePoolGet(poolName)
+	poolID, pool, err := d.cluster.StoragePoolGet(poolName)
 	if err != nil {
 		return SmartError(err)
 	}
 
 	// Get the existing storage volume.
-	_, volume, err := d.db.StoragePoolVolumeGetType(volumeName, volumeType, poolID)
+	_, volume, err := d.cluster.StoragePoolVolumeGetType(volumeName, volumeType, poolID)
 	if err != nil {
 		return SmartError(err)
 	}
@@ -378,13 +378,13 @@ func storagePoolVolumeTypePatch(d *Daemon, r *http.Request) Response {
 
 	// Get the ID of the storage pool the storage volume is supposed to be
 	// attached to.
-	poolID, pool, err := d.db.StoragePoolGet(poolName)
+	poolID, pool, err := d.cluster.StoragePoolGet(poolName)
 	if err != nil {
 		return SmartError(err)
 	}
 
 	// Get the existing storage volume.
-	_, volume, err := d.db.StoragePoolVolumeGetType(volumeName, volumeType, poolID)
+	_, volume, err := d.cluster.StoragePoolVolumeGetType(volumeName, volumeType, poolID)
 	if err != nil {
 		return SmartError(err)
 	}
diff --git a/lxd/storage_volumes_utils.go b/lxd/storage_volumes_utils.go
index c79b1e461..ef691645a 100644
--- a/lxd/storage_volumes_utils.go
+++ b/lxd/storage_volumes_utils.go
@@ -151,14 +151,14 @@ func storagePoolVolumeUpdate(state *state.State, poolName string, volumeName str
 		s.SetStoragePoolVolumeWritable(&newWritable)
 	}
 
-	poolID, err := state.Node.StoragePoolGetID(poolName)
+	poolID, err := state.Cluster.StoragePoolGetID(poolName)
 	if err != nil {
 		return err
 	}
 
 	// Update the database if something changed
 	if len(changedConfig) != 0 || newDescription != oldDescription {
-		err = state.Node.StoragePoolVolumeUpdate(volumeName, volumeType, poolID, newDescription, newConfig)
+		err = state.Cluster.StoragePoolVolumeUpdate(volumeName, volumeType, poolID, newDescription, newConfig)
 		if err != nil {
 			return err
 		}
@@ -302,14 +302,14 @@ func storagePoolVolumeDBCreate(s *state.State, poolName string, volumeName, volu
 	}
 
 	// Load storage pool the volume will be attached to.
-	poolID, poolStruct, err := s.Node.StoragePoolGet(poolName)
+	poolID, poolStruct, err := s.Cluster.StoragePoolGet(poolName)
 	if err != nil {
 		return err
 	}
 
 	// Check that a storage volume of the same storage volume type does not
 	// already exist.
-	volumeID, _ := s.Node.StoragePoolVolumeGetTypeID(volumeName, volumeType, poolID)
+	volumeID, _ := s.Cluster.StoragePoolVolumeGetTypeID(volumeName, volumeType, poolID)
 	if volumeID > 0 {
 		return fmt.Errorf("a storage volume of type %s does already exist", volumeTypeName)
 	}
@@ -331,7 +331,7 @@ func storagePoolVolumeDBCreate(s *state.State, poolName string, volumeName, volu
 	}
 
 	// Create the database entry for the storage volume.
-	_, err = s.Node.StoragePoolVolumeCreate(volumeName, volumeDescription, volumeType, poolID, volumeConfig)
+	_, err = s.Cluster.StoragePoolVolumeCreate(volumeName, volumeDescription, volumeType, poolID, volumeConfig)
 	if err != nil {
 		return fmt.Errorf("Error inserting %s of type %s into database: %s", poolName, volumeTypeName, err)
 	}
@@ -361,7 +361,7 @@ func storagePoolVolumeCreateInternal(state *state.State, poolName string, volume
 	// Create storage volume.
 	err = s.StoragePoolVolumeCreate()
 	if err != nil {
-		state.Node.StoragePoolVolumeDelete(volumeName, volumeType, poolID)
+		state.Cluster.StoragePoolVolumeDelete(volumeName, volumeType, poolID)
 		return err
 	}
 
diff --git a/lxd/storage_zfs.go b/lxd/storage_zfs.go
index 30a616b93..021e5b13e 100644
--- a/lxd/storage_zfs.go
+++ b/lxd/storage_zfs.go
@@ -436,7 +436,7 @@ func (s *storageZfs) StoragePoolVolumeDelete() error {
 		}
 	}
 
-	err := s.db.StoragePoolVolumeDelete(
+	err := s.s.Cluster.StoragePoolVolumeDelete(
 		s.volume.Name,
 		storagePoolVolumeTypeCustom,
 		s.poolID)
@@ -640,7 +640,7 @@ func (s *storageZfs) StoragePoolVolumeRename(newName string) error {
 	logger.Infof(`Renamed ZFS storage volume on storage pool "%s" from "%s" to "%s`,
 		s.pool.Name, s.volume.Name, newName)
 
-	return s.db.StoragePoolVolumeRename(s.volume.Name, newName,
+	return s.s.Cluster.StoragePoolVolumeRename(s.volume.Name, newName,
 		storagePoolVolumeTypeCustom, s.poolID)
 }
 
diff --git a/test/includes/lxd.sh b/test/includes/lxd.sh
index f02cb36d7..511446a53 100644
--- a/test/includes/lxd.sh
+++ b/test/includes/lxd.sh
@@ -195,16 +195,16 @@ kill_lxd() {
         check_empty_table "${daemon_dir}/lxd.db" "profiles_config"
         check_empty_table "${daemon_dir}/lxd.db" "profiles_devices"
         check_empty_table "${daemon_dir}/lxd.db" "profiles_devices_config"
-        check_empty_table "${daemon_dir}/lxd.db" "storage_pools"
-        check_empty_table "${daemon_dir}/lxd.db" "storage_pools_config"
-        check_empty_table "${daemon_dir}/lxd.db" "storage_volumes"
-        check_empty_table "${daemon_dir}/lxd.db" "storage_volumes_config"
 
         echo "==> Checking for leftover cluster DB entries"
 	# FIXME: we should not use the command line sqlite client, since it's
         #        not compatible with dqlite
         check_empty_table "${daemon_dir}/raft/db.bin" "networks"
         check_empty_table "${daemon_dir}/raft/db.bin" "networks_config"
+        check_empty_table "${daemon_dir}/raft/db.bin" "storage_pools"
+        check_empty_table "${daemon_dir}/raft/db.bin" "storage_pools_config"
+        check_empty_table "${daemon_dir}/raft/db.bin" "storage_volumes"
+        check_empty_table "${daemon_dir}/raft/db.bin" "storage_volumes_config"
     fi
 
     # teardown storage
diff --git a/test/suites/backup.sh b/test/suites/backup.sh
index 150c14be0..84b304836 100644
--- a/test/suites/backup.sh
+++ b/test/suites/backup.sh
@@ -18,7 +18,7 @@ test_container_import() {
     lxd import ctImport --force
     kill -9 "${pid}"
     sqlite3 "${LXD_DIR}/lxd.db" "PRAGMA foreign_keys=ON; DELETE FROM containers WHERE name='ctImport'"
-    sqlite3 "${LXD_DIR}/lxd.db" "PRAGMA foreign_keys=ON; DELETE FROM storage_volumes WHERE name='ctImport'"
+    lxd sql "PRAGMA foreign_keys=ON; DELETE FROM storage_volumes WHERE name='ctImport'"
     lxd import ctImport --force
     lxc start ctImport
     lxc delete --force ctImport
@@ -65,7 +65,7 @@ test_container_import() {
     kill -9 "${pid}"
     sqlite3 "${LXD_DIR}/lxd.db" "PRAGMA foreign_keys=ON; DELETE FROM containers WHERE name='ctImport'"
     sqlite3 "${LXD_DIR}/lxd.db" "PRAGMA foreign_keys=ON; DELETE FROM containers WHERE name='ctImport/snap0'"
-    sqlite3 "${LXD_DIR}/lxd.db" "PRAGMA foreign_keys=ON; DELETE FROM storage_volumes WHERE name='ctImport'"
+    lxd sql "PRAGMA foreign_keys=ON; DELETE FROM storage_volumes WHERE name='ctImport'"
     ! lxd import ctImport
     lxd import ctImport --force
     lxc info ctImport | grep snap0
@@ -79,8 +79,8 @@ test_container_import() {
     kill -9 "${pid}"
     sqlite3 "${LXD_DIR}/lxd.db" "PRAGMA foreign_keys=ON; DELETE FROM containers WHERE name='ctImport'"
     sqlite3 "${LXD_DIR}/lxd.db" "PRAGMA foreign_keys=ON; DELETE FROM containers WHERE name='ctImport/snap0'"
-    sqlite3 "${LXD_DIR}/lxd.db" "PRAGMA foreign_keys=ON; DELETE FROM storage_volumes WHERE name='ctImport'"
-    sqlite3 "${LXD_DIR}/lxd.db" "PRAGMA foreign_keys=ON; DELETE FROM storage_volumes WHERE name='ctImport/snap0'"
+    lxd sql "PRAGMA foreign_keys=ON; DELETE FROM storage_volumes WHERE name='ctImport'"
+    lxd sql "PRAGMA foreign_keys=ON; DELETE FROM storage_volumes WHERE name='ctImport/snap0'"
     lxd import ctImport
     lxd import ctImport --force
     lxc info ctImport | grep snap0
@@ -100,7 +100,7 @@ test_container_import() {
     kill -9 "${pid}"
     sqlite3 "${LXD_DIR}/lxd.db" "PRAGMA foreign_keys=ON; DELETE FROM containers WHERE name='ctImport'"
     sqlite3 "${LXD_DIR}/lxd.db" "PRAGMA foreign_keys=ON; DELETE FROM containers WHERE name='ctImport/snap0'"
-    sqlite3 "${LXD_DIR}/lxd.db" "PRAGMA foreign_keys=ON; DELETE FROM storage_volumes WHERE name='ctImport'"
+    lxd sql "PRAGMA foreign_keys=ON; DELETE FROM storage_volumes WHERE name='ctImport'"
     ! lxd import ctImport
     lxd import ctImport --force
     lxc info ctImport | grep snap0
diff --git a/test/suites/database_update.sh b/test/suites/database_update.sh
index 4af380d32..a1f00a835 100644
--- a/test/suites/database_update.sh
+++ b/test/suites/database_update.sh
@@ -9,12 +9,12 @@ test_database_update(){
   spawn_lxd "${LXD_MIGRATE_DIR}" true
 
   # Assert there are enough tables.
-  expected_tables=22
+  expected_tables=18
   tables=$(sqlite3 "${MIGRATE_DB}" ".dump" | grep -c "CREATE TABLE")
   [ "${tables}" -eq "${expected_tables}" ] || { echo "FAIL: Wrong number of tables after database migration. Found: ${tables}, expected ${expected_tables}"; false; }
 
-  # There should be 15 "ON DELETE CASCADE" occurrences
-  expected_cascades=14
+  # There should be 12 "ON DELETE CASCADE" occurrences
+  expected_cascades=11
   cascades=$(sqlite3 "${MIGRATE_DB}" ".dump" | grep -c "ON DELETE CASCADE")
   [ "${cascades}" -eq "${expected_cascades}" ] || { echo "FAIL: Wrong number of ON DELETE CASCADE foreign keys. Found: ${cascades}, exected: ${expected_cascades}"; false; }
 

From 2344d06c1e832d6e16acb54ee53f100c9cf6c8e8 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Wed, 25 Oct 2017 08:45:21 +0000
Subject: [PATCH 058/116] Ask about storage pools configs when joining a
 cluster

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/api_cluster.go         | 13 +++++++++++++
 lxd/main_init.go           | 23 +++++++++++++++++++++++
 lxd/main_init_test.go      | 14 ++++++++++++--
 shared/api/cluster.go      |  3 ++-
 shared/cmd/context.go      |  4 +++-
 shared/cmd/context_test.go |  1 +
 6 files changed, 54 insertions(+), 4 deletions(-)

diff --git a/lxd/api_cluster.go b/lxd/api_cluster.go
index 28b3c46f7..66641ef50 100644
--- a/lxd/api_cluster.go
+++ b/lxd/api_cluster.go
@@ -46,6 +46,19 @@ func clusterGet(d *Daemon, r *http.Request) Response {
 		cluster.Networks = append(cluster.Networks, *network)
 	}
 
+	// Fill the StoragePools attribute
+	pools, err := d.cluster.StoragePools()
+	if err != nil {
+		return SmartError(err)
+	}
+	for _, name := range pools {
+		_, pool, err := d.cluster.StoragePoolGet(name)
+		if err != nil {
+			return SmartError(err)
+		}
+		cluster.StoragePools = append(cluster.StoragePools, *pool)
+	}
+
 	return SyncResponse(true, cluster)
 }
 
diff --git a/lxd/main_init.go b/lxd/main_init.go
index 2116c02e6..686bc9198 100644
--- a/lxd/main_init.go
+++ b/lxd/main_init.go
@@ -183,6 +183,10 @@ func (cmd *CmdInit) fillDataInteractive(data *cmdInitData, client lxd.ContainerS
 			if err != nil {
 				return err
 			}
+			data.Pools, err = cmd.askClusteringStoragePools(cluster)
+			if err != nil {
+				return err
+			}
 			data.Networks, err = cmd.askClusteringNetworks(cluster)
 			if err != nil {
 				return err
@@ -836,6 +840,25 @@ join:
 	return params, nil
 }
 
+func (cmd *CmdInit) askClusteringStoragePools(cluster *api.Cluster) ([]api.StoragePoolsPost, error) {
+	pools := make([]api.StoragePoolsPost, len(cluster.StoragePools))
+	for i, pool := range cluster.StoragePools {
+		post := api.StoragePoolsPost{}
+		post.Name = pool.Name
+		post.Driver = pool.Driver
+		post.Config = pool.Config
+		// The only config key to ask is 'source', which is the only one node-specific.
+		key := "source"
+		question := fmt.Sprintf(
+			`Enter local value for key "%s" of storage pool "%s": `, key, post.Name)
+		// Dummy validator for allowing empty strings.
+		validator := func(string) error { return nil }
+		post.Config[key] = cmd.Context.AskString(question, "", validator)
+		pools[i] = post
+	}
+	return pools, nil
+}
+
 func (cmd *CmdInit) askClusteringNetworks(cluster *api.Cluster) ([]api.NetworksPost, error) {
 	networks := make([]api.NetworksPost, len(cluster.Networks))
 	for i, network := range cluster.Networks {
diff --git a/lxd/main_init_test.go b/lxd/main_init_test.go
index 6eef12c19..873521f38 100644
--- a/lxd/main_init_test.go
+++ b/lxd/main_init_test.go
@@ -170,13 +170,22 @@ func (suite *cmdInitTestSuite) TestCmdInit_InteractiveClusteringJoin() {
 		"ipv4.nat": "true",
 	}
 	client := f.ClientUnix(leader)
-	client.CreateNetwork(network)
+	suite.Req.NoError(client.CreateNetwork(network))
+
+	pool := api.StoragePoolsPost{
+		Name:   "mypool",
+		Driver: "dir",
+	}
+	pool.Config = map[string]string{
+		"source": "",
+	}
+	suite.Req.NoError(client.CreateStoragePool(pool))
 
 	suite.command.PasswordReader = func(int) ([]byte, error) {
 		return []byte("sekret"), nil
 	}
 	port, err := shared.AllocatePort()
-	suite.Req.Nil(err)
+	suite.Req.NoError(err)
 	answers := &cmdInitAnswers{
 		WantClustering:           true,
 		ClusterName:              "rusp",
@@ -186,6 +195,7 @@ func (suite *cmdInitTestSuite) TestCmdInit_InteractiveClusteringJoin() {
 		ClusterAcceptFingerprint: true,
 		ClusterConfirmLosingData: true,
 		ClusterConfig: []string{
+			"",               // storage source
 			"10.23.189.2/24", // ipv4.address
 			"true",           // ipv4.nat
 			"aaaa:bbbb:cccc:dddd::1/64", // ipv6.address
diff --git a/shared/api/cluster.go b/shared/api/cluster.go
index 5000394c5..045411d64 100644
--- a/shared/api/cluster.go
+++ b/shared/api/cluster.go
@@ -2,7 +2,8 @@ package api
 
 // Cluster represents high-level information about a LXD cluster.
 type Cluster struct {
-	Networks []Network
+	StoragePools []StoragePool
+	Networks     []Network
 }
 
 // ClusterPost represents the fields required to bootstrap or join a LXD
diff --git a/shared/cmd/context.go b/shared/cmd/context.go
index 0caa946b4..94979b167 100644
--- a/shared/cmd/context.go
+++ b/shared/cmd/context.go
@@ -3,13 +3,14 @@ package cmd
 import (
 	"bufio"
 	"fmt"
-	"gopkg.in/yaml.v2"
 	"io"
 	"io/ioutil"
 	"os"
 	"strconv"
 	"strings"
 
+	"gopkg.in/yaml.v2"
+
 	"github.com/lxc/lxd/shared"
 )
 
@@ -101,6 +102,7 @@ func (c *Context) AskString(question string, defaultAnswer string, validate func
 				fmt.Fprintf(c.stderr, "Invalid input: %s\n\n", error)
 				continue
 			}
+			return answer
 		}
 		if len(answer) != 0 {
 			return answer
diff --git a/shared/cmd/context_test.go b/shared/cmd/context_test.go
index 1e4b0c6bc..7f73e57fd 100644
--- a/shared/cmd/context_test.go
+++ b/shared/cmd/context_test.go
@@ -129,6 +129,7 @@ func TestAskString(t *testing.T) {
 			}
 			return nil
 		}, "Name?Name?", "Invalid input: ugly name\n\n", "Ted\nJohn", "John"},
+		{"Name?", "", func(string) error { return nil }, "Name?", "", "\n", ""},
 	}
 	for _, c := range cases {
 		streams := cmd.NewMemoryStreams(c.input)

From 361abd7237ada43e3f8b773b1a5e4771241941e8 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Wed, 25 Oct 2017 08:55:00 +0000
Subject: [PATCH 059/116] Add local storage pools to cluster database when
 joining

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/cluster/membership.go | 41 +++++++++++++++++++++++++++-----
 lxd/db/storage_pools.go   | 59 ++++++++++++++++++++++++++++++++++++++++++++---
 2 files changed, 91 insertions(+), 9 deletions(-)

diff --git a/lxd/cluster/membership.go b/lxd/cluster/membership.go
index e763f110a..1d0f6e4b2 100644
--- a/lxd/cluster/membership.go
+++ b/lxd/cluster/membership.go
@@ -229,12 +229,20 @@ func Join(state *state.State, gateway *Gateway, cert *shared.CertInfo, name stri
 	}
 
 	// Get the local config keys for the cluster networks. It assumes that
-	// the local networks match the cluster networks, if not an error will
-	// be returned.
+	// the local storage pools and networks match the cluster networks, if
+	// not an error will be returned.
+	var pools map[string]map[string]string
 	var networks map[string]map[string]string
 	err = state.Cluster.Transaction(func(tx *db.ClusterTx) error {
+		pools, err = tx.StoragePoolConfigs()
+		if err != nil {
+			return err
+		}
 		networks, err = tx.NetworkConfigs()
-		return err
+		if err != nil {
+			return err
+		}
+		return nil
 	})
 	if err != nil {
 		return err
@@ -285,15 +293,36 @@ func Join(state *state.State, gateway *Gateway, cert *shared.CertInfo, name stri
 	// Make sure we can actually connect to the cluster database through
 	// the network endpoint. This also makes the Go SQL pooling system
 	// invalidate the old connection, so new queries will be executed over
-	// the new gRPC network connection. Also, update the networks table
-	// with our local configuration.
+	// the new gRPC network connection. Also, update the storage_pools and
+	// networks tables with our local configuration.
 	err = state.Cluster.Transaction(func(tx *db.ClusterTx) error {
 		node, err := tx.Node(address)
 		if err != nil {
 			return errors.Wrap(err, "failed to get ID of joining node")
 		}
 		state.Cluster.ID(node.ID)
-		ids, err := tx.NetworkIDs()
+
+		// Storage pools.
+		ids, err := tx.StoragePoolIDs()
+		if err != nil {
+			return errors.Wrap(err, "failed to get cluster storage pool IDs")
+		}
+		for name, id := range ids {
+			config, ok := pools[name]
+			if !ok {
+				return fmt.Errorf("joining node has no config for pool %s", name)
+			}
+			// We only need to add the source key, since the other keys are global and
+			// are already there.
+			config = map[string]string{"source": config["source"]}
+			err := tx.StoragePoolConfigAdd(id, node.ID, config)
+			if err != nil {
+				return errors.Wrap(err, "failed to add joining node's pool config")
+			}
+		}
+
+		// Networks.
+		ids, err = tx.NetworkIDs()
 		if err != nil {
 			return errors.Wrap(err, "failed to get cluster network IDs")
 		}
diff --git a/lxd/db/storage_pools.go b/lxd/db/storage_pools.go
index 99392f90c..ad2fad60e 100644
--- a/lxd/db/storage_pools.go
+++ b/lxd/db/storage_pools.go
@@ -6,9 +6,62 @@ import (
 
 	_ "github.com/mattn/go-sqlite3"
 
+	"github.com/lxc/lxd/lxd/db/query"
 	"github.com/lxc/lxd/shared/api"
 )
 
+// StoragePoolConfigs returns a map associating each storage pool name to its
+// config values.
+func (c *ClusterTx) StoragePoolConfigs() (map[string]map[string]string, error) {
+	names, err := query.SelectStrings(c.tx, "SELECT name FROM storage_pools")
+	if err != nil {
+		return nil, err
+	}
+	pools := make(map[string]map[string]string, len(names))
+	for _, name := range names {
+		table := `
+storage_pools_config JOIN storage_pools ON storage_pools.id=storage_pools_config.storage_pool_id
+`
+		filter := fmt.Sprintf("storage_pools.name='%s'", name)
+		config, err := query.SelectConfig(c.tx, table, filter)
+		if err != nil {
+			return nil, err
+		}
+		pools[name] = config
+	}
+	return pools, nil
+}
+
+// StoragePoolIDs returns a map associating each storage pool name to its ID.
+func (c *ClusterTx) StoragePoolIDs() (map[string]int64, error) {
+	pools := []struct {
+		id   int64
+		name string
+	}{}
+	dest := func(i int) []interface{} {
+		pools = append(pools, struct {
+			id   int64
+			name string
+		}{})
+		return []interface{}{&pools[i].id, &pools[i].name}
+
+	}
+	err := query.SelectObjects(c.tx, dest, "SELECT id, name FROM storage_pools")
+	if err != nil {
+		return nil, err
+	}
+	ids := map[string]int64{}
+	for _, pool := range pools {
+		ids[pool.name] = pool.id
+	}
+	return ids, nil
+}
+
+// StoragePoolConfigAdd adds a new entry in the storage_pools_config table
+func (c *ClusterTx) StoragePoolConfigAdd(poolID, nodeID int64, config map[string]string) error {
+	return storagePoolConfigAdd(c.tx, poolID, nodeID, config)
+}
+
 // Get all storage pools.
 func (c *Cluster) StoragePools() ([]string, error) {
 	var name string
@@ -150,7 +203,7 @@ func (c *Cluster) StoragePoolCreate(poolName string, poolDescription string, poo
 		return -1, err
 	}
 
-	err = StoragePoolConfigAdd(tx, id, c.id, poolConfig)
+	err = storagePoolConfigAdd(tx, id, c.id, poolConfig)
 	if err != nil {
 		tx.Rollback()
 		return -1, err
@@ -165,7 +218,7 @@ func (c *Cluster) StoragePoolCreate(poolName string, poolDescription string, poo
 }
 
 // Add new storage pool config.
-func StoragePoolConfigAdd(tx *sql.Tx, poolID, nodeID int64, poolConfig map[string]string) error {
+func storagePoolConfigAdd(tx *sql.Tx, poolID, nodeID int64, poolConfig map[string]string) error {
 	str := "INSERT INTO storage_pools_config (storage_pool_id, node_id, key, value) VALUES(?, ?, ?, ?)"
 	stmt, err := tx.Prepare(str)
 	defer stmt.Close()
@@ -217,7 +270,7 @@ func (c *Cluster) StoragePoolUpdate(poolName, description string, poolConfig map
 		return err
 	}
 
-	err = StoragePoolConfigAdd(tx, poolID, c.id, poolConfig)
+	err = storagePoolConfigAdd(tx, poolID, c.id, poolConfig)
 	if err != nil {
 		tx.Rollback()
 		return err

From 40f677d8d258e7316239bdbd6b7bb8885a5d3c17 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Wed, 25 Oct 2017 09:27:11 +0000
Subject: [PATCH 060/116] Add certificates table to cluster database

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/db/cluster/schema.go      |  8 ++++++++
 lxd/db/cluster/update.go      |  8 ++++++++
 lxd/db/cluster/update_test.go | 13 +++++++++++++
 3 files changed, 29 insertions(+)

diff --git a/lxd/db/cluster/schema.go b/lxd/db/cluster/schema.go
index f6a8c1e1e..0141dc5cb 100644
--- a/lxd/db/cluster/schema.go
+++ b/lxd/db/cluster/schema.go
@@ -6,6 +6,14 @@ package cluster
 // modify the database schema, please add a new schema update to update.go
 // and the run 'make update-schema'.
 const freshSchema = `
+CREATE TABLE certificates (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    fingerprint TEXT NOT NULL,
+    type INTEGER NOT NULL,
+    name TEXT NOT NULL,
+    certificate TEXT NOT NULL,
+    UNIQUE (fingerprint)
+);
 CREATE TABLE config (
     id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
     key TEXT NOT NULL,
diff --git a/lxd/db/cluster/update.go b/lxd/db/cluster/update.go
index f63d88533..497280c01 100644
--- a/lxd/db/cluster/update.go
+++ b/lxd/db/cluster/update.go
@@ -30,6 +30,14 @@ var updates = map[int]schema.Update{
 func updateFromV1(tx *sql.Tx) error {
 	// config table
 	stmt := `
+CREATE TABLE certificates (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    fingerprint TEXT NOT NULL,
+    type INTEGER NOT NULL,
+    name TEXT NOT NULL,
+    certificate TEXT NOT NULL,
+    UNIQUE (fingerprint)
+);
 CREATE TABLE config (
     id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
     key TEXT NOT NULL,
diff --git a/lxd/db/cluster/update_test.go b/lxd/db/cluster/update_test.go
index 7c65fa9da..36a2a5a7b 100644
--- a/lxd/db/cluster/update_test.go
+++ b/lxd/db/cluster/update_test.go
@@ -31,6 +31,19 @@ func TestUpdateFromV0(t *testing.T) {
 	require.Error(t, err)
 }
 
+func TestUpdateFromV1_Certificates(t *testing.T) {
+	schema := cluster.Schema()
+	db, err := schema.ExerciseUpdate(2, nil)
+	require.NoError(t, err)
+
+	_, err = db.Exec("INSERT INTO certificates VALUES (1, 'abcd:efgh', 1, 'foo', 'FOO')")
+	require.NoError(t, err)
+
+	// Unique constraint on fingerprint.
+	_, err = db.Exec("INSERT INTO certificates VALUES (2, 'abcd:efgh', 2, 'bar', 'BAR')")
+	require.Error(t, err)
+}
+
 func TestUpdateFromV1_Config(t *testing.T) {
 	schema := cluster.Schema()
 	db, err := schema.ExerciseUpdate(2, nil)

From a37e3310385586e320d8da4ff7f934d96a0a34c2 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Wed, 25 Oct 2017 09:44:33 +0000
Subject: [PATCH 061/116] Migrate certificates data to cluster database

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/certificates.go            | 22 +++++++++++-----------
 lxd/db/certificates.go         | 20 ++++++++++----------
 lxd/db/migration.go            |  1 +
 lxd/db/migration_test.go       | 12 ++++++++++++
 lxd/db/node/schema.go          |  8 --------
 lxd/db/node/update.go          |  1 +
 test/suites/database_update.sh |  2 +-
 7 files changed, 36 insertions(+), 30 deletions(-)

diff --git a/lxd/certificates.go b/lxd/certificates.go
index fd3c2ea6c..e51e6a88f 100644
--- a/lxd/certificates.go
+++ b/lxd/certificates.go
@@ -26,7 +26,7 @@ func certificatesGet(d *Daemon, r *http.Request) Response {
 	if recursion {
 		certResponses := []api.Certificate{}
 
-		baseCerts, err := d.db.CertificatesGet()
+		baseCerts, err := d.cluster.CertificatesGet()
 		if err != nil {
 			return SmartError(err)
 		}
@@ -57,7 +57,7 @@ func certificatesGet(d *Daemon, r *http.Request) Response {
 func readSavedClientCAList(d *Daemon) {
 	d.clientCerts = []x509.Certificate{}
 
-	dbCerts, err := d.db.CertificatesGet()
+	dbCerts, err := d.cluster.CertificatesGet()
 	if err != nil {
 		logger.Infof("Error reading certificates from database: %s", err)
 		return
@@ -79,7 +79,7 @@ func readSavedClientCAList(d *Daemon) {
 	}
 }
 
-func saveCert(dbObj *db.Node, host string, cert *x509.Certificate) error {
+func saveCert(dbObj *db.Cluster, host string, cert *x509.Certificate) error {
 	baseCert := new(db.CertInfo)
 	baseCert.Fingerprint = shared.CertFingerprint(cert)
 	baseCert.Type = 1
@@ -148,7 +148,7 @@ func certificatesPost(d *Daemon, r *http.Request) Response {
 		}
 	}
 
-	err = saveCert(d.db, name, cert)
+	err = saveCert(d.cluster, name, cert)
 	if err != nil {
 		return SmartError(err)
 	}
@@ -163,7 +163,7 @@ var certificatesCmd = Command{name: "certificates", untrustedPost: true, get: ce
 func certificateFingerprintGet(d *Daemon, r *http.Request) Response {
 	fingerprint := mux.Vars(r)["fingerprint"]
 
-	cert, err := doCertificateGet(d.db, fingerprint)
+	cert, err := doCertificateGet(d.cluster, fingerprint)
 	if err != nil {
 		return SmartError(err)
 	}
@@ -171,7 +171,7 @@ func certificateFingerprintGet(d *Daemon, r *http.Request) Response {
 	return SyncResponseETag(true, cert, cert)
 }
 
-func doCertificateGet(db *db.Node, fingerprint string) (api.Certificate, error) {
+func doCertificateGet(db *db.Cluster, fingerprint string) (api.Certificate, error) {
 	resp := api.Certificate{}
 
 	dbCertInfo, err := db.CertificateGet(fingerprint)
@@ -194,7 +194,7 @@ func doCertificateGet(db *db.Node, fingerprint string) (api.Certificate, error)
 func certificateFingerprintPut(d *Daemon, r *http.Request) Response {
 	fingerprint := mux.Vars(r)["fingerprint"]
 
-	oldEntry, err := doCertificateGet(d.db, fingerprint)
+	oldEntry, err := doCertificateGet(d.cluster, fingerprint)
 	if err != nil {
 		return SmartError(err)
 	}
@@ -216,7 +216,7 @@ func certificateFingerprintPut(d *Daemon, r *http.Request) Response {
 func certificateFingerprintPatch(d *Daemon, r *http.Request) Response {
 	fingerprint := mux.Vars(r)["fingerprint"]
 
-	oldEntry, err := doCertificateGet(d.db, fingerprint)
+	oldEntry, err := doCertificateGet(d.cluster, fingerprint)
 	if err != nil {
 		return SmartError(err)
 	}
@@ -253,7 +253,7 @@ func doCertificateUpdate(d *Daemon, fingerprint string, req api.CertificatePut)
 		return BadRequest(fmt.Errorf("Unknown request type %s", req.Type))
 	}
 
-	err := d.db.CertUpdate(fingerprint, req.Name, 1)
+	err := d.cluster.CertUpdate(fingerprint, req.Name, 1)
 	if err != nil {
 		return SmartError(err)
 	}
@@ -264,12 +264,12 @@ func doCertificateUpdate(d *Daemon, fingerprint string, req api.CertificatePut)
 func certificateFingerprintDelete(d *Daemon, r *http.Request) Response {
 	fingerprint := mux.Vars(r)["fingerprint"]
 
-	certInfo, err := d.db.CertificateGet(fingerprint)
+	certInfo, err := d.cluster.CertificateGet(fingerprint)
 	if err != nil {
 		return NotFound
 	}
 
-	err = d.db.CertDelete(certInfo.Fingerprint)
+	err = d.cluster.CertDelete(certInfo.Fingerprint)
 	if err != nil {
 		return SmartError(err)
 	}
diff --git a/lxd/db/certificates.go b/lxd/db/certificates.go
index ebfd0224f..e773685e5 100644
--- a/lxd/db/certificates.go
+++ b/lxd/db/certificates.go
@@ -11,9 +11,9 @@ type CertInfo struct {
 }
 
 // CertificatesGet returns all certificates from the DB as CertBaseInfo objects.
-func (n *Node) CertificatesGet() (certs []*CertInfo, err error) {
+func (c *Cluster) CertificatesGet() (certs []*CertInfo, err error) {
 	rows, err := dbQuery(
-		n.db,
+		c.db,
 		"SELECT id, fingerprint, type, name, certificate FROM certificates",
 	)
 	if err != nil {
@@ -42,7 +42,7 @@ func (n *Node) CertificatesGet() (certs []*CertInfo, err error) {
 // pass a shortform and will get the full fingerprint.
 // There can never be more than one image with a given fingerprint, as it is
 // enforced by a UNIQUE constraint in the schema.
-func (n *Node) CertificateGet(fingerprint string) (cert *CertInfo, err error) {
+func (c *Cluster) CertificateGet(fingerprint string) (cert *CertInfo, err error) {
 	cert = new(CertInfo)
 
 	inargs := []interface{}{fingerprint + "%"}
@@ -61,7 +61,7 @@ func (n *Node) CertificateGet(fingerprint string) (cert *CertInfo, err error) {
 			certificates
 		WHERE fingerprint LIKE ?`
 
-	if err = dbQueryRowScan(n.db, query, inargs, outfmt); err != nil {
+	if err = dbQueryRowScan(c.db, query, inargs, outfmt); err != nil {
 		return nil, err
 	}
 
@@ -70,8 +70,8 @@ func (n *Node) CertificateGet(fingerprint string) (cert *CertInfo, err error) {
 
 // CertSave stores a CertBaseInfo object in the db,
 // it will ignore the ID field from the CertInfo.
-func (n *Node) CertSave(cert *CertInfo) error {
-	tx, err := begin(n.db)
+func (c *Cluster) CertSave(cert *CertInfo) error {
+	tx, err := begin(c.db)
 	if err != nil {
 		return err
 	}
@@ -103,8 +103,8 @@ func (n *Node) CertSave(cert *CertInfo) error {
 }
 
 // CertDelete deletes a certificate from the db.
-func (n *Node) CertDelete(fingerprint string) error {
-	_, err := exec(n.db, "DELETE FROM certificates WHERE fingerprint=?", fingerprint)
+func (c *Cluster) CertDelete(fingerprint string) error {
+	_, err := exec(c.db, "DELETE FROM certificates WHERE fingerprint=?", fingerprint)
 	if err != nil {
 		return err
 	}
@@ -112,8 +112,8 @@ func (n *Node) CertDelete(fingerprint string) error {
 	return nil
 }
 
-func (n *Node) CertUpdate(fingerprint string, certName string, certType int) error {
-	tx, err := begin(n.db)
+func (c *Cluster) CertUpdate(fingerprint string, certName string, certType int) error {
+	tx, err := begin(c.db)
 	if err != nil {
 		return err
 	}
diff --git a/lxd/db/migration.go b/lxd/db/migration.go
index f9d2a7f64..9744bb8a3 100644
--- a/lxd/db/migration.go
+++ b/lxd/db/migration.go
@@ -135,6 +135,7 @@ type Dump struct {
 }
 
 var preClusteringTables = []string{
+	"certificates",
 	"config",
 	"networks",
 	"networks_config",
diff --git a/lxd/db/migration_test.go b/lxd/db/migration_test.go
index b590907b4..8d2b392a7 100644
--- a/lxd/db/migration_test.go
+++ b/lxd/db/migration_test.go
@@ -40,6 +40,17 @@ func TestImportPreClusteringData(t *testing.T) {
 	err = cluster.ImportPreClusteringData(dump)
 	require.NoError(t, err)
 
+	// certificates
+	certs, err := cluster.CertificatesGet()
+	require.NoError(t, err)
+	assert.Len(t, certs, 1)
+	cert := certs[0]
+	assert.Equal(t, 1, cert.ID)
+	assert.Equal(t, "abcd:efgh", cert.Fingerprint)
+	assert.Equal(t, 1, cert.Type)
+	assert.Equal(t, "foo", cert.Name)
+	assert.Equal(t, "FOO", cert.Certificate)
+
 	// config
 	err = cluster.Transaction(func(tx *db.ClusterTx) error {
 		config, err := tx.Config()
@@ -85,6 +96,7 @@ func newPreClusteringTx(t *testing.T) *sql.Tx {
 
 	stmts := []string{
 		preClusteringNodeSchema,
+		"INSERT INTO certificates VALUES (1, 'abcd:efgh', 1, 'foo', 'FOO')",
 		"INSERT INTO config VALUES(1, 'core.https_address', '1.2.3.4:666')",
 		"INSERT INTO networks VALUES(1, 'lxcbr0', 'LXD bridge')",
 		"INSERT INTO networks_config VALUES(1, 1, 'ipv4.nat', 'true')",
diff --git a/lxd/db/node/schema.go b/lxd/db/node/schema.go
index c8105de7f..fd88a6ada 100644
--- a/lxd/db/node/schema.go
+++ b/lxd/db/node/schema.go
@@ -6,14 +6,6 @@ package node
 // modify the database schema, please add a new schema update to update.go
 // and the run 'make update-schema'.
 const freshSchema = `
-CREATE TABLE certificates (
-    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
-    fingerprint VARCHAR(255) NOT NULL,
-    type INTEGER NOT NULL,
-    name VARCHAR(255) NOT NULL,
-    certificate TEXT NOT NULL,
-    UNIQUE (fingerprint)
-);
 CREATE TABLE config (
     id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
     key VARCHAR(255) NOT NULL,
diff --git a/lxd/db/node/update.go b/lxd/db/node/update.go
index 1e20ca8ba..1153ecf8f 100644
--- a/lxd/db/node/update.go
+++ b/lxd/db/node/update.go
@@ -118,6 +118,7 @@ CREATE TABLE raft_nodes (
     UNIQUE (address)
 );
 DELETE FROM config WHERE NOT key='core.https_address';
+DROP TABLE certificates;
 DROP TABLE networks_config;
 DROP TABLE networks;
 DROP TABLE storage_volumes_config;
diff --git a/test/suites/database_update.sh b/test/suites/database_update.sh
index a1f00a835..b8b46301f 100644
--- a/test/suites/database_update.sh
+++ b/test/suites/database_update.sh
@@ -9,7 +9,7 @@ test_database_update(){
   spawn_lxd "${LXD_MIGRATE_DIR}" true
 
   # Assert there are enough tables.
-  expected_tables=18
+  expected_tables=17
   tables=$(sqlite3 "${MIGRATE_DB}" ".dump" | grep -c "CREATE TABLE")
   [ "${tables}" -eq "${expected_tables}" ] || { echo "FAIL: Wrong number of tables after database migration. Found: ${tables}, expected ${expected_tables}"; false; }
 

From bf29892fcc38b491769711e2586092632fb19d3d Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Wed, 25 Oct 2017 10:00:04 +0000
Subject: [PATCH 062/116] Add containers, images and profiles tables to the
 cluster database

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/db/cluster/schema.go      | 125 ++++++++++++++++++++++++++++++++++++++++++
 lxd/db/cluster/update.go      | 125 ++++++++++++++++++++++++++++++++++++++++++
 lxd/db/cluster/update_test.go |  23 ++++++++
 3 files changed, 273 insertions(+)

diff --git a/lxd/db/cluster/schema.go b/lxd/db/cluster/schema.go
index 0141dc5cb..ef7d93de5 100644
--- a/lxd/db/cluster/schema.go
+++ b/lxd/db/cluster/schema.go
@@ -20,6 +20,101 @@ CREATE TABLE config (
     value TEXT,
     UNIQUE (key)
 );
+CREATE TABLE containers (
+    id INTEGER primary key AUTOINCREMENT NOT NULL,
+    node_id INTEGER NOT NULL,
+    name TEXT NOT NULL,
+    architecture INTEGER NOT NULL,
+    type INTEGER NOT NULL,
+    ephemeral INTEGER NOT NULL DEFAULT 0,
+    creation_date DATETIME NOT NULL DEFAULT 0,
+    stateful INTEGER NOT NULL DEFAULT 0,
+    last_use_date DATETIME,
+    description TEXT,
+    UNIQUE (name),
+    FOREIGN KEY (node_id) REFERENCES nodes (id) ON DELETE CASCADE
+);
+CREATE TABLE containers_config (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    container_id INTEGER NOT NULL,
+    key TEXT NOT NULL,
+    value TEXT,
+    FOREIGN KEY (container_id) REFERENCES containers (id) ON DELETE CASCADE,
+    UNIQUE (container_id, key)
+);
+CREATE TABLE containers_devices (
+    id INTEGER primary key AUTOINCREMENT NOT NULL,
+    container_id INTEGER NOT NULL,
+    name TEXT NOT NULL,
+    type INTEGER NOT NULL default 0,
+    FOREIGN KEY (container_id) REFERENCES containers (id) ON DELETE CASCADE,
+    UNIQUE (container_id, name)
+);
+CREATE TABLE containers_devices_config (
+    id INTEGER primary key AUTOINCREMENT NOT NULL,
+    container_device_id INTEGER NOT NULL,
+    key TEXT NOT NULL,
+    value TEXT,
+    FOREIGN KEY (container_device_id) REFERENCES containers_devices (id) ON DELETE CASCADE,
+    UNIQUE (container_device_id, key)
+);
+CREATE TABLE containers_profiles (
+    id INTEGER primary key AUTOINCREMENT NOT NULL,
+    container_id INTEGER NOT NULL,
+    profile_id INTEGER NOT NULL,
+    apply_order INTEGER NOT NULL default 0,
+    UNIQUE (container_id, profile_id),
+    FOREIGN KEY (container_id) REFERENCES containers(id) ON DELETE CASCADE,
+    FOREIGN KEY (profile_id) REFERENCES profiles(id) ON DELETE CASCADE
+);
+CREATE TABLE images (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    fingerprint TEXT NOT NULL,
+    filename TEXT NOT NULL,
+    size INTEGER NOT NULL,
+    public INTEGER NOT NULL DEFAULT 0,
+    architecture INTEGER NOT NULL,
+    creation_date DATETIME,
+    expiry_date DATETIME,
+    upload_date DATETIME NOT NULL,
+    cached INTEGER NOT NULL DEFAULT 0,
+    last_use_date DATETIME,
+    auto_update INTEGER NOT NULL DEFAULT 0,
+    UNIQUE (fingerprint)
+);
+CREATE TABLE "images_aliases" (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    name TEXT NOT NULL,
+    image_id INTEGER NOT NULL,
+    description TEXT,
+    FOREIGN KEY (image_id) REFERENCES images (id) ON DELETE CASCADE,
+    UNIQUE (name)
+);
+CREATE TABLE images_nodes (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    image_id INTEGER NOT NULL,
+    node_id INTEGER NOT NULL,
+    UNIQUE (image_id, node_id),
+    FOREIGN KEY (image_id) REFERENCES images (id) ON DELETE CASCADE,
+    FOREIGN KEY (node_id) REFERENCES nodes (id) ON DELETE CASCADE
+);
+CREATE TABLE images_properties (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    image_id INTEGER NOT NULL,
+    type INTEGER NOT NULL,
+    key TEXT NOT NULL,
+    value TEXT,
+    FOREIGN KEY (image_id) REFERENCES images (id) ON DELETE CASCADE
+);
+CREATE TABLE images_source (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    image_id INTEGER NOT NULL,
+    server TEXT NOT NULL,
+    protocol INTEGER NOT NULL,
+    certificate TEXT NOT NULL,
+    alias TEXT NOT NULL,
+    FOREIGN KEY (image_id) REFERENCES images (id) ON DELETE CASCADE
+);
 CREATE TABLE networks (
     id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
     name TEXT NOT NULL,
@@ -47,6 +142,36 @@ CREATE TABLE nodes (
     UNIQUE (name),
     UNIQUE (address)
 );
+CREATE TABLE profiles (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    name TEXT NOT NULL,
+    description TEXT,
+    UNIQUE (name)
+);
+CREATE TABLE profiles_config (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    profile_id INTEGER NOT NULL,
+    key TEXT NOT NULL,
+    value TEXT,
+    UNIQUE (profile_id, key),
+    FOREIGN KEY (profile_id) REFERENCES profiles(id) ON DELETE CASCADE
+);
+CREATE TABLE profiles_devices (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    profile_id INTEGER NOT NULL,
+    name TEXT NOT NULL,
+    type INTEGER NOT NULL default 0,
+    UNIQUE (profile_id, name),
+    FOREIGN KEY (profile_id) REFERENCES profiles (id) ON DELETE CASCADE
+);
+CREATE TABLE profiles_devices_config (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    profile_device_id INTEGER NOT NULL,
+    key TEXT NOT NULL,
+    value TEXT,
+    UNIQUE (profile_device_id, key),
+    FOREIGN KEY (profile_device_id) REFERENCES profiles_devices (id) ON DELETE CASCADE
+);
 CREATE TABLE storage_pools (
     id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
     name TEXT NOT NULL,
diff --git a/lxd/db/cluster/update.go b/lxd/db/cluster/update.go
index 497280c01..c41311e6d 100644
--- a/lxd/db/cluster/update.go
+++ b/lxd/db/cluster/update.go
@@ -44,6 +44,101 @@ CREATE TABLE config (
     value TEXT,
     UNIQUE (key)
 );
+CREATE TABLE containers (
+    id INTEGER primary key AUTOINCREMENT NOT NULL,
+    node_id INTEGER NOT NULL,
+    name TEXT NOT NULL,
+    architecture INTEGER NOT NULL,
+    type INTEGER NOT NULL,
+    ephemeral INTEGER NOT NULL DEFAULT 0,
+    creation_date DATETIME NOT NULL DEFAULT 0,
+    stateful INTEGER NOT NULL DEFAULT 0,
+    last_use_date DATETIME,
+    description TEXT,
+    UNIQUE (name),
+    FOREIGN KEY (node_id) REFERENCES nodes (id) ON DELETE CASCADE
+);
+CREATE TABLE containers_config (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    container_id INTEGER NOT NULL,
+    key TEXT NOT NULL,
+    value TEXT,
+    FOREIGN KEY (container_id) REFERENCES containers (id) ON DELETE CASCADE,
+    UNIQUE (container_id, key)
+);
+CREATE TABLE containers_devices (
+    id INTEGER primary key AUTOINCREMENT NOT NULL,
+    container_id INTEGER NOT NULL,
+    name TEXT NOT NULL,
+    type INTEGER NOT NULL default 0,
+    FOREIGN KEY (container_id) REFERENCES containers (id) ON DELETE CASCADE,
+    UNIQUE (container_id, name)
+);
+CREATE TABLE containers_devices_config (
+    id INTEGER primary key AUTOINCREMENT NOT NULL,
+    container_device_id INTEGER NOT NULL,
+    key TEXT NOT NULL,
+    value TEXT,
+    FOREIGN KEY (container_device_id) REFERENCES containers_devices (id) ON DELETE CASCADE,
+    UNIQUE (container_device_id, key)
+);
+CREATE TABLE containers_profiles (
+    id INTEGER primary key AUTOINCREMENT NOT NULL,
+    container_id INTEGER NOT NULL,
+    profile_id INTEGER NOT NULL,
+    apply_order INTEGER NOT NULL default 0,
+    UNIQUE (container_id, profile_id),
+    FOREIGN KEY (container_id) REFERENCES containers(id) ON DELETE CASCADE,
+    FOREIGN KEY (profile_id) REFERENCES profiles(id) ON DELETE CASCADE
+);
+CREATE TABLE images (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    fingerprint TEXT NOT NULL,
+    filename TEXT NOT NULL,
+    size INTEGER NOT NULL,
+    public INTEGER NOT NULL DEFAULT 0,
+    architecture INTEGER NOT NULL,
+    creation_date DATETIME,
+    expiry_date DATETIME,
+    upload_date DATETIME NOT NULL,
+    cached INTEGER NOT NULL DEFAULT 0,
+    last_use_date DATETIME,
+    auto_update INTEGER NOT NULL DEFAULT 0,
+    UNIQUE (fingerprint)
+);
+CREATE TABLE "images_aliases" (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    name TEXT NOT NULL,
+    image_id INTEGER NOT NULL,
+    description TEXT,
+    FOREIGN KEY (image_id) REFERENCES images (id) ON DELETE CASCADE,
+    UNIQUE (name)
+);
+CREATE TABLE images_properties (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    image_id INTEGER NOT NULL,
+    type INTEGER NOT NULL,
+    key TEXT NOT NULL,
+    value TEXT,
+    FOREIGN KEY (image_id) REFERENCES images (id) ON DELETE CASCADE
+);
+CREATE TABLE images_source (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    image_id INTEGER NOT NULL,
+    server TEXT NOT NULL,
+    protocol INTEGER NOT NULL,
+    certificate TEXT NOT NULL,
+    alias TEXT NOT NULL,
+    FOREIGN KEY (image_id) REFERENCES images (id) ON DELETE CASCADE
+);
+CREATE TABLE images_nodes (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    image_id INTEGER NOT NULL,
+    node_id INTEGER NOT NULL,
+    UNIQUE (image_id, node_id),
+    FOREIGN KEY (image_id) REFERENCES images (id) ON DELETE CASCADE,
+    FOREIGN KEY (node_id) REFERENCES nodes (id) ON DELETE CASCADE
+);
 CREATE TABLE networks (
     id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
     name TEXT NOT NULL,
@@ -60,6 +155,36 @@ CREATE TABLE networks_config (
     FOREIGN KEY (network_id) REFERENCES networks (id) ON DELETE CASCADE,
     FOREIGN KEY (node_id) REFERENCES nodes (id) ON DELETE CASCADE
 );
+CREATE TABLE profiles (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    name TEXT NOT NULL,
+    description TEXT,
+    UNIQUE (name)
+);
+CREATE TABLE profiles_config (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    profile_id INTEGER NOT NULL,
+    key TEXT NOT NULL,
+    value TEXT,
+    UNIQUE (profile_id, key),
+    FOREIGN KEY (profile_id) REFERENCES profiles(id) ON DELETE CASCADE
+);
+CREATE TABLE profiles_devices (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    profile_id INTEGER NOT NULL,
+    name TEXT NOT NULL,
+    type INTEGER NOT NULL default 0,
+    UNIQUE (profile_id, name),
+    FOREIGN KEY (profile_id) REFERENCES profiles (id) ON DELETE CASCADE
+);
+CREATE TABLE profiles_devices_config (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    profile_device_id INTEGER NOT NULL,
+    key TEXT NOT NULL,
+    value TEXT,
+    UNIQUE (profile_device_id, key),
+    FOREIGN KEY (profile_device_id) REFERENCES profiles_devices (id) ON DELETE CASCADE
+);
 CREATE TABLE storage_pools (
     id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
     name TEXT NOT NULL,
diff --git a/lxd/db/cluster/update_test.go b/lxd/db/cluster/update_test.go
index 36a2a5a7b..b6f44ca28 100644
--- a/lxd/db/cluster/update_test.go
+++ b/lxd/db/cluster/update_test.go
@@ -57,6 +57,29 @@ func TestUpdateFromV1_Config(t *testing.T) {
 	require.Error(t, err)
 }
 
+func TestUpdateFromV1_Containers(t *testing.T) {
+	schema := cluster.Schema()
+	db, err := schema.ExerciseUpdate(2, nil)
+	require.NoError(t, err)
+
+	_, err = db.Exec("INSERT INTO nodes VALUES (1, 'one', '', '1.1.1.1', 666, 999, ?)", time.Now())
+	require.NoError(t, err)
+
+	_, err = db.Exec("INSERT INTO nodes VALUES (2, 'two', '', '2.2.2.2', 666, 999, ?)", time.Now())
+	require.NoError(t, err)
+
+	_, err = db.Exec(`
+INSERT INTO containers VALUES (1, 1, 'bionic', 1, 1, 0, ?, 0, ?, 'Bionic Beaver')
+`, time.Now(), time.Now())
+	require.NoError(t, err)
+
+	// Unique constraint on name
+	_, err = db.Exec(`
+INSERT INTO containers VALUES (2, 2, 'bionic', 2, 2, 1, ?, 1, ?, 'Ubuntu LTS')
+`, time.Now(), time.Now())
+	require.Error(t, err)
+}
+
 func TestUpdateFromV1_Network(t *testing.T) {
 	schema := cluster.Schema()
 	db, err := schema.ExerciseUpdate(2, nil)

From 2900419c2faf674a60d9f106033167b5a22cc14a Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Thu, 26 Oct 2017 13:35:26 +0000
Subject: [PATCH 063/116] Move containers, images and profiles tables to the
 cluster db

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/api_internal.go            |   8 +-
 lxd/container.go               |  34 ++++----
 lxd/container_lxc.go           |  50 ++++++------
 lxd/container_post.go          |   2 +-
 lxd/container_snapshot.go      |   4 +-
 lxd/container_test.go          |   4 +-
 lxd/containers.go              |   8 +-
 lxd/containers_get.go          |   2 +-
 lxd/containers_post.go         |  14 ++--
 lxd/daemon.go                  |  28 +++++--
 lxd/daemon_images.go           |  14 ++--
 lxd/daemon_images_test.go      |   6 +-
 lxd/db/cluster/open.go         |  10 ++-
 lxd/db/cluster/update_test.go  |  12 +++
 lxd/db/containers.go           |  82 +++++++++----------
 lxd/db/db.go                   |  18 ++++-
 lxd/db/db_internal_test.go     | 176 +++--------------------------------------
 lxd/db/devices.go              |   6 +-
 lxd/db/images.go               | 112 +++++++++++++-------------
 lxd/db/migration.go            |  41 ++++++++++
 lxd/db/migration_test.go       |   1 +
 lxd/db/node/schema.go          |  91 ---------------------
 lxd/db/node/update.go          |  10 +++
 lxd/db/profiles.go             |  46 +++++------
 lxd/devices.go                 |   6 +-
 lxd/devlxd.go                  |   2 +-
 lxd/images.go                  | 110 +++++++++++++-------------
 lxd/logging.go                 |   2 +-
 lxd/main_activateifneeded.go   |  44 ++++++++++-
 lxd/main_sql.go                |   2 +-
 lxd/main_test.go               |   4 +-
 lxd/networks.go                |   4 +-
 lxd/networks_utils.go          |   2 +-
 lxd/patches.go                 |  42 +++++-----
 lxd/profiles.go                |  22 +++---
 lxd/profiles_test.go           |  34 +++-----
 lxd/profiles_utils.go          |   4 +-
 lxd/storage_lvm_utils.go       |   4 +-
 lxd/storage_pools.go           |   2 +-
 lxd/storage_pools_utils.go     |   4 +-
 lxd/storage_volumes_utils.go   |   6 +-
 test/includes/lxd.sh           |  29 ++++---
 test/suites/backup.sh          |  18 ++---
 test/suites/database_update.sh |   4 +-
 test/suites/image.sh           |   2 +-
 test/suites/profiling.sh       |   3 +-
 46 files changed, 505 insertions(+), 624 deletions(-)

diff --git a/lxd/api_internal.go b/lxd/api_internal.go
index 07d8e9ec3..0c2ca82d6 100644
--- a/lxd/api_internal.go
+++ b/lxd/api_internal.go
@@ -589,7 +589,7 @@ func internalImport(d *Daemon, r *http.Request) Response {
 	}
 
 	// Check if an entry for the container already exists in the db.
-	_, containerErr := d.db.ContainerId(req.Name)
+	_, containerErr := d.cluster.ContainerId(req.Name)
 	if containerErr != nil {
 		if containerErr != sql.ErrNoRows {
 			return SmartError(containerErr)
@@ -634,7 +634,7 @@ func internalImport(d *Daemon, r *http.Request) Response {
 	if containerErr == nil {
 		// Remove the storage volume db entry for the container since
 		// force was specified.
-		err := d.db.ContainerRemove(req.Name)
+		err := d.cluster.ContainerRemove(req.Name)
 		if err != nil {
 			return SmartError(err)
 		}
@@ -642,7 +642,7 @@ func internalImport(d *Daemon, r *http.Request) Response {
 
 	for _, snap := range existingSnapshots {
 		// Check if an entry for the snapshot already exists in the db.
-		_, snapErr := d.db.ContainerId(snap.Name)
+		_, snapErr := d.cluster.ContainerId(snap.Name)
 		if snapErr != nil {
 			if snapErr != sql.ErrNoRows {
 				return SmartError(snapErr)
@@ -673,7 +673,7 @@ func internalImport(d *Daemon, r *http.Request) Response {
 		}
 
 		if snapErr == nil {
-			err := d.db.ContainerRemove(snap.Name)
+			err := d.cluster.ContainerRemove(snap.Name)
 			if err != nil {
 				return SmartError(err)
 			}
diff --git a/lxd/container.go b/lxd/container.go
index 708d30552..f205c395c 100644
--- a/lxd/container.go
+++ b/lxd/container.go
@@ -549,7 +549,7 @@ func containerCreateAsEmpty(d *Daemon, args db.ContainerArgs) (container, error)
 	// Now create the empty storage
 	err = c.Storage().ContainerCreate(c)
 	if err != nil {
-		d.db.ContainerRemove(args.Name)
+		d.cluster.ContainerRemove(args.Name)
 		return nil, err
 	}
 
@@ -573,7 +573,7 @@ func containerCreateEmptySnapshot(s *state.State, args db.ContainerArgs) (contai
 	// Now create the empty snapshot
 	err = c.Storage().ContainerSnapshotCreateEmpty(c)
 	if err != nil {
-		s.Node.ContainerRemove(args.Name)
+		s.Cluster.ContainerRemove(args.Name)
 		return nil, err
 	}
 
@@ -582,7 +582,7 @@ func containerCreateEmptySnapshot(s *state.State, args db.ContainerArgs) (contai
 
 func containerCreateFromImage(s *state.State, args db.ContainerArgs, hash string) (container, error) {
 	// Get the image properties
-	_, img, err := s.Node.ImageGet(hash, false, false)
+	_, img, err := s.Cluster.ImageGet(hash, false, false)
 	if err != nil {
 		return nil, err
 	}
@@ -603,16 +603,16 @@ func containerCreateFromImage(s *state.State, args db.ContainerArgs, hash string
 		return nil, err
 	}
 
-	err = s.Node.ImageLastAccessUpdate(hash, time.Now().UTC())
+	err = s.Cluster.ImageLastAccessUpdate(hash, time.Now().UTC())
 	if err != nil {
-		s.Node.ContainerRemove(args.Name)
+		s.Cluster.ContainerRemove(args.Name)
 		return nil, fmt.Errorf("Error updating image last use date: %s", err)
 	}
 
 	// Now create the storage from an image
 	err = c.Storage().ContainerCreateFromImage(c, hash)
 	if err != nil {
-		s.Node.ContainerRemove(args.Name)
+		s.Cluster.ContainerRemove(args.Name)
 		return nil, err
 	}
 
@@ -637,7 +637,7 @@ func containerCreateAsCopy(s *state.State, args db.ContainerArgs, sourceContaine
 	if !containerOnly {
 		snapshots, err := sourceContainer.Snapshots()
 		if err != nil {
-			s.Node.ContainerRemove(args.Name)
+			s.Cluster.ContainerRemove(args.Name)
 			return nil, err
 		}
 
@@ -669,9 +669,9 @@ func containerCreateAsCopy(s *state.State, args db.ContainerArgs, sourceContaine
 	err = ct.Storage().ContainerCopy(ct, sourceContainer, containerOnly)
 	if err != nil {
 		for _, v := range csList {
-			s.Node.ContainerRemove((*v).Name())
+			s.Cluster.ContainerRemove((*v).Name())
 		}
-		s.Node.ContainerRemove(args.Name)
+		s.Cluster.ContainerRemove(args.Name)
 		return nil, err
 	}
 
@@ -750,7 +750,7 @@ func containerCreateAsSnapshot(s *state.State, args db.ContainerArgs, sourceCont
 	// Clone the container
 	err = sourceContainer.Storage().ContainerSnapshotCreate(c, sourceContainer)
 	if err != nil {
-		s.Node.ContainerRemove(args.Name)
+		s.Cluster.ContainerRemove(args.Name)
 		return nil, err
 	}
 
@@ -829,7 +829,7 @@ func containerCreateInternal(s *state.State, args db.ContainerArgs) (container,
 	}
 
 	// Validate profiles
-	profiles, err := s.Node.Profiles()
+	profiles, err := s.Cluster.Profiles()
 	if err != nil {
 		return nil, err
 	}
@@ -841,7 +841,7 @@ func containerCreateInternal(s *state.State, args db.ContainerArgs) (container,
 	}
 
 	// Create the container entry
-	id, err := s.Node.ContainerCreate(args)
+	id, err := s.Cluster.ContainerCreate(args)
 	if err != nil {
 		if err == db.DbErrAlreadyDefined {
 			thing := "Container"
@@ -859,9 +859,9 @@ func containerCreateInternal(s *state.State, args db.ContainerArgs) (container,
 	args.Id = id
 
 	// Read the timestamp from the database
-	dbArgs, err := s.Node.ContainerGet(args.Name)
+	dbArgs, err := s.Cluster.ContainerGet(args.Name)
 	if err != nil {
-		s.Node.ContainerRemove(args.Name)
+		s.Cluster.ContainerRemove(args.Name)
 		return nil, err
 	}
 	args.CreationDate = dbArgs.CreationDate
@@ -870,7 +870,7 @@ func containerCreateInternal(s *state.State, args db.ContainerArgs) (container,
 	// Setup the container struct and finish creation (storage and idmap)
 	c, err := containerLXCCreate(s, args)
 	if err != nil {
-		s.Node.ContainerRemove(args.Name)
+		s.Cluster.ContainerRemove(args.Name)
 		return nil, err
 	}
 
@@ -925,7 +925,7 @@ func containerConfigureInternal(c container) error {
 
 func containerLoadById(s *state.State, id int) (container, error) {
 	// Get the DB record
-	name, err := s.Node.ContainerName(id)
+	name, err := s.Cluster.ContainerName(id)
 	if err != nil {
 		return nil, err
 	}
@@ -935,7 +935,7 @@ func containerLoadById(s *state.State, id int) (container, error) {
 
 func containerLoadByName(s *state.State, name string) (container, error) {
 	// Get the DB record
-	args, err := s.Node.ContainerGet(name)
+	args, err := s.Cluster.ContainerGet(name)
 	if err != nil {
 		return nil, err
 	}
diff --git a/lxd/container_lxc.go b/lxd/container_lxc.go
index d121df541..264ba1d95 100644
--- a/lxd/container_lxc.go
+++ b/lxd/container_lxc.go
@@ -274,7 +274,6 @@ func containerLXCCreate(s *state.State, args db.ContainerArgs) (container, error
 	// Create the container struct
 	c := &containerLXC{
 		state:        s,
-		db:           s.Node,
 		id:           args.Id,
 		name:         args.Name,
 		description:  args.Description,
@@ -442,7 +441,6 @@ func containerLXCLoad(s *state.State, args db.ContainerArgs) (container, error)
 	// Create the container struct
 	c := &containerLXC{
 		state:        s,
-		db:           s.Node,
 		id:           args.Id,
 		name:         args.Name,
 		description:  args.Description,
@@ -488,9 +486,9 @@ type containerLXC struct {
 	profiles        []string
 
 	// Cache
-	c        *lxc.Container
-	cConfig  bool
-	db       *db.Node
+	c       *lxc.Container
+	cConfig bool
+
 	state    *state.State
 	idmapset *idmap.IdmapSet
 
@@ -728,7 +726,7 @@ func findIdmap(state *state.State, cName string, isolatedStr string, configBase
 	idmapLock.Lock()
 	defer idmapLock.Unlock()
 
-	cs, err := state.Node.ContainersList(db.CTypeRegular)
+	cs, err := state.Cluster.ContainersList(db.CTypeRegular)
 	if err != nil {
 		return nil, 0, err
 	}
@@ -1618,7 +1616,7 @@ func (c *containerLXC) expandConfig() error {
 
 	// Apply all the profiles
 	for _, name := range c.profiles {
-		profileConfig, err := c.db.ProfileConfig(name)
+		profileConfig, err := c.state.Cluster.ProfileConfig(name)
 		if err != nil {
 			return err
 		}
@@ -1642,7 +1640,7 @@ func (c *containerLXC) expandDevices() error {
 
 	// Apply all the profiles
 	for _, p := range c.profiles {
-		profileDevices, err := c.db.Devices(p, true)
+		profileDevices, err := c.state.Cluster.Devices(p, true)
 		if err != nil {
 			return err
 		}
@@ -1770,7 +1768,7 @@ func (c *containerLXC) startCommon() (string, error) {
 		}
 
 		// Remove the volatile key from the DB
-		err = c.db.ContainerConfigRemove(c.id, "volatile.apply_quota")
+		err = c.state.Cluster.ContainerConfigRemove(c.id, "volatile.apply_quota")
 		if err != nil {
 			return "", err
 		}
@@ -2193,7 +2191,7 @@ func (c *containerLXC) startCommon() (string, error) {
 	}
 
 	// Update time container was last started
-	err = c.db.ContainerLastUsedUpdate(c.id, time.Now().UTC())
+	err = c.state.Cluster.ContainerLastUsedUpdate(c.id, time.Now().UTC())
 	if err != nil {
 		return "", fmt.Errorf("Error updating last used: %v", err)
 	}
@@ -2261,7 +2259,7 @@ func (c *containerLXC) Start(stateful bool) error {
 		os.RemoveAll(c.StatePath())
 		c.stateful = false
 
-		err = c.db.ContainerSetStateful(c.id, false)
+		err = c.state.Cluster.ContainerSetStateful(c.id, false)
 		if err != nil {
 			logger.Error("Failed starting container", ctxMap)
 			return err
@@ -2278,7 +2276,7 @@ func (c *containerLXC) Start(stateful bool) error {
 		}
 
 		c.stateful = false
-		err = c.db.ContainerSetStateful(c.id, false)
+		err = c.state.Cluster.ContainerSetStateful(c.id, false)
 		if err != nil {
 			return err
 		}
@@ -2371,7 +2369,7 @@ func (c *containerLXC) OnStart() error {
 		}
 
 		// Remove the volatile key from the DB
-		err := c.db.ContainerConfigRemove(c.id, key)
+		err := c.state.Cluster.ContainerConfigRemove(c.id, key)
 		if err != nil {
 			AADestroy(c)
 			if ourStart {
@@ -2425,7 +2423,7 @@ func (c *containerLXC) OnStart() error {
 	}
 
 	// Record current state
-	err = c.db.ContainerSetState(c.id, "RUNNING")
+	err = c.state.Cluster.ContainerSetState(c.id, "RUNNING")
 	if err != nil {
 		return err
 	}
@@ -2489,7 +2487,7 @@ func (c *containerLXC) Stop(stateful bool) error {
 		}
 
 		c.stateful = true
-		err = c.db.ContainerSetStateful(c.id, true)
+		err = c.state.Cluster.ContainerSetStateful(c.id, true)
 		if err != nil {
 			op.Done(err)
 			logger.Error("Failed stopping container", ctxMap)
@@ -2673,7 +2671,7 @@ func (c *containerLXC) OnStop(target string) error {
 		deviceTaskSchedulerTrigger("container", c.name, "stopped")
 
 		// Record current state
-		err = c.db.ContainerSetState(c.id, "STOPPED")
+		err = c.state.Cluster.ContainerSetState(c.id, "STOPPED")
 		if err != nil {
 			logger.Error("Failed to set container state", log.Ctx{"container": c.Name(), "err": err})
 		}
@@ -2867,7 +2865,7 @@ func (c *containerLXC) RenderState() (*api.ContainerState, error) {
 
 func (c *containerLXC) Snapshots() ([]container, error) {
 	// Get all the snapshots
-	snaps, err := c.db.ContainerGetSnapshots(c.name)
+	snaps, err := c.state.Cluster.ContainerGetSnapshots(c.name)
 	if err != nil {
 		return nil, err
 	}
@@ -3096,7 +3094,7 @@ func (c *containerLXC) Delete() error {
 	}
 
 	// Remove the database record
-	if err := c.db.ContainerRemove(c.Name()); err != nil {
+	if err := c.state.Cluster.ContainerRemove(c.Name()); err != nil {
 		logger.Error("Failed deleting container entry", log.Ctx{"name": c.Name(), "err": err})
 		return err
 	}
@@ -3189,7 +3187,7 @@ func (c *containerLXC) Rename(newName string) error {
 	}
 
 	// Rename the database entry
-	err = c.db.ContainerRename(oldName, newName)
+	err = c.state.Cluster.ContainerRename(oldName, newName)
 	if err != nil {
 		logger.Error("Failed renaming container", ctxMap)
 		return err
@@ -3205,7 +3203,7 @@ func (c *containerLXC) Rename(newName string) error {
 
 	if !c.IsSnapshot() {
 		// Rename all the snapshots
-		results, err := c.db.ContainerGetSnapshots(oldName)
+		results, err := c.state.Cluster.ContainerGetSnapshots(oldName)
 		if err != nil {
 			logger.Error("Failed renaming container", ctxMap)
 			return err
@@ -3215,7 +3213,7 @@ func (c *containerLXC) Rename(newName string) error {
 			// Rename the snapshot
 			baseSnapName := filepath.Base(sname)
 			newSnapshotName := newName + shared.SnapshotDelimiter + baseSnapName
-			err := c.db.ContainerRename(sname, newSnapshotName)
+			err := c.state.Cluster.ContainerRename(sname, newSnapshotName)
 			if err != nil {
 				logger.Error("Failed renaming container", ctxMap)
 				return err
@@ -3420,7 +3418,7 @@ func (c *containerLXC) Update(args db.ContainerArgs, userRequested bool) error {
 	}
 
 	// Validate the new profiles
-	profiles, err := c.db.Profiles()
+	profiles, err := c.state.Cluster.Profiles()
 	if err != nil {
 		return err
 	}
@@ -4314,7 +4312,7 @@ func (c *containerLXC) Update(args db.ContainerArgs, userRequested bool) error {
 	}
 
 	// Finally, apply the changes to the database
-	tx, err := c.db.Begin()
+	tx, err := c.state.Cluster.Begin()
 	if err != nil {
 		return err
 	}
@@ -6682,7 +6680,7 @@ func (c *containerLXC) fillNetworkDevice(name string, m types.Device) (types.Dev
 	}
 
 	updateKey := func(key string, value string) error {
-		tx, err := c.db.Begin()
+		tx, err := c.state.Cluster.Begin()
 		if err != nil {
 			return err
 		}
@@ -6716,7 +6714,7 @@ func (c *containerLXC) fillNetworkDevice(name string, m types.Device) (types.Dev
 			err = updateKey(configKey, volatileHwaddr)
 			if err != nil {
 				// Check if something else filled it in behind our back
-				value, err1 := c.db.ContainerConfigGet(c.id, configKey)
+				value, err1 := c.state.Cluster.ContainerConfigGet(c.id, configKey)
 				if err1 != nil || value == "" {
 					return nil, err
 				}
@@ -6746,7 +6744,7 @@ func (c *containerLXC) fillNetworkDevice(name string, m types.Device) (types.Dev
 			err = updateKey(configKey, volatileName)
 			if err != nil {
 				// Check if something else filled it in behind our back
-				value, err1 := c.db.ContainerConfigGet(c.id, configKey)
+				value, err1 := c.state.Cluster.ContainerConfigGet(c.id, configKey)
 				if err1 != nil || value == "" {
 					return nil, err
 				}
diff --git a/lxd/container_post.go b/lxd/container_post.go
index fa32e1c70..25e1fd04b 100644
--- a/lxd/container_post.go
+++ b/lxd/container_post.go
@@ -80,7 +80,7 @@ func containerPost(d *Daemon, r *http.Request) Response {
 	}
 
 	// Check that the name isn't already in use
-	id, _ := d.db.ContainerId(req.Name)
+	id, _ := d.cluster.ContainerId(req.Name)
 	if id > 0 {
 		return Conflict
 	}
diff --git a/lxd/container_snapshot.go b/lxd/container_snapshot.go
index fec770b5c..94ca9cbd8 100644
--- a/lxd/container_snapshot.go
+++ b/lxd/container_snapshot.go
@@ -88,7 +88,7 @@ func containerSnapshotsPost(d *Daemon, r *http.Request) Response {
 
 	if req.Name == "" {
 		// come up with a name
-		i := d.db.ContainerNextSnapshot(name)
+		i := d.cluster.ContainerNextSnapshot(name)
 		req.Name = fmt.Sprintf("snap%d", i)
 	}
 
@@ -247,7 +247,7 @@ func snapshotPost(d *Daemon, r *http.Request, sc container, containerName string
 	fullName := containerName + shared.SnapshotDelimiter + newName
 
 	// Check that the name isn't already in use
-	id, _ := d.db.ContainerId(fullName)
+	id, _ := d.cluster.ContainerId(fullName)
 	if id > 0 {
 		return Conflict
 	}
diff --git a/lxd/container_test.go b/lxd/container_test.go
index 9a0676159..2c546116e 100644
--- a/lxd/container_test.go
+++ b/lxd/container_test.go
@@ -41,7 +41,7 @@ func (suite *containerTestSuite) TestContainer_ProfilesDefault() {
 
 func (suite *containerTestSuite) TestContainer_ProfilesMulti() {
 	// Create an unprivileged profile
-	_, err := suite.d.db.ProfileCreate(
+	_, err := suite.d.cluster.ProfileCreate(
 		"unprivileged",
 		"unprivileged",
 		map[string]string{"security.privileged": "true"},
@@ -49,7 +49,7 @@ func (suite *containerTestSuite) TestContainer_ProfilesMulti() {
 
 	suite.Req.Nil(err, "Failed to create the unprivileged profile.")
 	defer func() {
-		suite.d.db.ProfileDelete("unprivileged")
+		suite.d.cluster.ProfileDelete("unprivileged")
 	}()
 
 	args := db.ContainerArgs{
diff --git a/lxd/containers.go b/lxd/containers.go
index e95d579ed..ea09e7802 100644
--- a/lxd/containers.go
+++ b/lxd/containers.go
@@ -106,7 +106,7 @@ func (slice containerAutostartList) Swap(i, j int) {
 
 func containersRestart(s *state.State) error {
 	// Get all the containers
-	result, err := s.Node.ContainersList(db.CTypeRegular)
+	result, err := s.Cluster.ContainersList(db.CTypeRegular)
 	if err != nil {
 		return err
 	}
@@ -153,13 +153,13 @@ func containersShutdown(s *state.State) error {
 	var wg sync.WaitGroup
 
 	// Get all the containers
-	results, err := s.Node.ContainersList(db.CTypeRegular)
+	results, err := s.Cluster.ContainersList(db.CTypeRegular)
 	if err != nil {
 		return err
 	}
 
 	// Reset all container states
-	err = s.Node.ContainersResetState()
+	err = s.Cluster.ContainersResetState()
 	if err != nil {
 		return err
 	}
@@ -207,7 +207,7 @@ func containerDeleteSnapshots(s *state.State, cname string) error {
 	logger.Debug("containerDeleteSnapshots",
 		log.Ctx{"container": cname})
 
-	results, err := s.Node.ContainerGetSnapshots(cname)
+	results, err := s.Cluster.ContainerGetSnapshots(cname)
 	if err != nil {
 		return err
 	}
diff --git a/lxd/containers_get.go b/lxd/containers_get.go
index 9ae37928b..29ac485ac 100644
--- a/lxd/containers_get.go
+++ b/lxd/containers_get.go
@@ -34,7 +34,7 @@ func containersGet(d *Daemon, r *http.Request) Response {
 }
 
 func doContainersGet(s *state.State, recursion bool) (interface{}, error) {
-	result, err := s.Node.ContainersList(db.CTypeRegular)
+	result, err := s.Cluster.ContainersList(db.CTypeRegular)
 	if err != nil {
 		return nil, err
 	}
diff --git a/lxd/containers_post.go b/lxd/containers_post.go
index 60e120453..666a3fbc2 100644
--- a/lxd/containers_post.go
+++ b/lxd/containers_post.go
@@ -32,7 +32,7 @@ func createFromImage(d *Daemon, req *api.ContainersPost) Response {
 		if req.Source.Server != "" {
 			hash = req.Source.Alias
 		} else {
-			_, alias, err := d.db.ImageAliasGet(req.Source.Alias, true)
+			_, alias, err := d.cluster.ImageAliasGet(req.Source.Alias, true)
 			if err != nil {
 				return SmartError(err)
 			}
@@ -44,7 +44,7 @@ func createFromImage(d *Daemon, req *api.ContainersPost) Response {
 			return BadRequest(fmt.Errorf("Property match is only supported for local images"))
 		}
 
-		hashes, err := d.db.ImagesGet(false)
+		hashes, err := d.cluster.ImagesGet(false)
 		if err != nil {
 			return SmartError(err)
 		}
@@ -52,7 +52,7 @@ func createFromImage(d *Daemon, req *api.ContainersPost) Response {
 		var image *api.Image
 
 		for _, imageHash := range hashes {
-			_, img, err := d.db.ImageGet(imageHash, false, true)
+			_, img, err := d.cluster.ImageGet(imageHash, false, true)
 			if err != nil {
 				continue
 			}
@@ -108,7 +108,7 @@ func createFromImage(d *Daemon, req *api.ContainersPost) Response {
 				return err
 			}
 		} else {
-			_, info, err = d.db.ImageGet(hash, false, false)
+			_, info, err = d.cluster.ImageGet(hash, false, false)
 			if err != nil {
 				return err
 			}
@@ -218,7 +218,7 @@ func createFromMigration(d *Daemon, req *api.ContainersPost) Response {
 	// If we don't have a valid pool yet, look through profiles
 	if storagePool == "" {
 		for _, pName := range req.Profiles {
-			_, p, err := d.db.ProfileGet(pName)
+			_, p, err := d.cluster.ProfileGet(pName)
 			if err != nil {
 				return SmartError(err)
 			}
@@ -292,7 +292,7 @@ func createFromMigration(d *Daemon, req *api.ContainersPost) Response {
 	 * point and just negotiate it over the migration control
 	 * socket. Anyway, it'll happen later :)
 	 */
-	_, _, err = d.db.ImageGet(req.Source.BaseImage, false, true)
+	_, _, err = d.cluster.ImageGet(req.Source.BaseImage, false, true)
 	if err != nil {
 		c, err = containerCreateAsEmpty(d, args)
 		if err != nil {
@@ -530,7 +530,7 @@ func containersPost(d *Daemon, r *http.Request) Response {
 	}
 
 	if req.Name == "" {
-		cs, err := d.db.ContainersList(db.CTypeRegular)
+		cs, err := d.cluster.ContainersList(db.CTypeRegular)
 		if err != nil {
 			return SmartError(err)
 		}
diff --git a/lxd/daemon.go b/lxd/daemon.go
index 183623690..f4f9caec2 100644
--- a/lxd/daemon.go
+++ b/lxd/daemon.go
@@ -539,7 +539,7 @@ func (d *Daemon) Ready() error {
 }
 
 func (d *Daemon) numRunningContainers() (int, error) {
-	results, err := d.db.ContainersList(db.CTypeRegular)
+	results, err := d.cluster.ContainersList(db.CTypeRegular)
 	if err != nil {
 		return 0, err
 	}
@@ -576,10 +576,21 @@ func (d *Daemon) Stop() error {
 
 	shouldUnmount := false
 	if d.db != nil {
-		if n, err := d.numRunningContainers(); err != nil || n == 0 {
+		// It might be that database nodes are all down, in that case
+		// we don't want to wait too much.
+		//
+		// FIXME: it should be possible to provide a context or a
+		//        timeout for database queries.
+		ch := make(chan bool)
+		go func() {
+			n, err := d.numRunningContainers()
+			ch <- err != nil || n == 0
+		}()
+		select {
+		case shouldUnmount = <-ch:
+		case <-time.After(2 * time.Second):
 			shouldUnmount = true
 		}
-
 		logger.Infof("Closing the database")
 		trackError(d.db.Close())
 	}
@@ -683,10 +694,15 @@ func initializeDbObject(d *Daemon) (*db.Dump, error) {
 	for i, patch := range legacyPatches {
 		legacy[i] = &db.LegacyPatch{
 			Hook: func(node *sql.DB) error {
-				// FIXME: Attach the local db to the Daemon, since at
-				//        this stage we're not fully initialized, yet
-				//        some legacy patches expect to find it here.
+				// FIXME: Use the low-level *node* SQL db as backend for both the
+				//        db.Node and db.Cluster objects, since at this point we
+				//        haven't migrated the data to the cluster database yet.
+				cluster := d.cluster
+				defer func() {
+					d.cluster = cluster
+				}()
 				d.db = db.ForLegacyPatches(node)
+				d.cluster = db.ForLocalInspection(node)
 				return patch(d)
 			},
 		}
diff --git a/lxd/daemon_images.go b/lxd/daemon_images.go
index 20a137e27..80635756d 100644
--- a/lxd/daemon_images.go
+++ b/lxd/daemon_images.go
@@ -237,14 +237,14 @@ func (d *Daemon) ImageDownload(op *operation, server string, protocol string, ce
 		return nil, err
 	}
 	if preferCached && interval > 0 && alias != fp {
-		cachedFingerprint, err := d.db.ImageSourceGetCachedFingerprint(server, protocol, alias)
+		cachedFingerprint, err := d.cluster.ImageSourceGetCachedFingerprint(server, protocol, alias)
 		if err == nil && cachedFingerprint != fp {
 			fp = cachedFingerprint
 		}
 	}
 
 	// Check if the image already exists (partial hash match)
-	_, imgInfo, err := d.db.ImageGet(fp, false, true)
+	_, imgInfo, err := d.cluster.ImageGet(fp, false, true)
 	if err == nil {
 		logger.Debug("Image already exists in the db", log.Ctx{"image": fp})
 		info = imgInfo
@@ -298,7 +298,7 @@ func (d *Daemon) ImageDownload(op *operation, server string, protocol string, ce
 		<-waitChannel
 
 		// Grab the database entry
-		_, imgInfo, err := d.db.ImageGet(fp, false, true)
+		_, imgInfo, err := d.cluster.ImageGet(fp, false, true)
 		if err != nil {
 			// Other download failed, lets try again
 			logger.Error("Other image download didn't succeed", log.Ctx{"image": fp})
@@ -519,7 +519,7 @@ func (d *Daemon) ImageDownload(op *operation, server string, protocol string, ce
 	}
 
 	// Create the database entry
-	err = d.db.ImageInsert(info.Fingerprint, info.Filename, info.Size, info.Public, info.AutoUpdate, info.Architecture, info.CreatedAt, info.ExpiresAt, info.Properties)
+	err = d.cluster.ImageInsert(info.Fingerprint, info.Filename, info.Size, info.Public, info.AutoUpdate, info.Architecture, info.CreatedAt, info.ExpiresAt, info.Properties)
 	if err != nil {
 		return nil, err
 	}
@@ -545,12 +545,12 @@ func (d *Daemon) ImageDownload(op *operation, server string, protocol string, ce
 
 	// Record the image source
 	if alias != fp {
-		id, _, err := d.db.ImageGet(fp, false, true)
+		id, _, err := d.cluster.ImageGet(fp, false, true)
 		if err != nil {
 			return nil, err
 		}
 
-		err = d.db.ImageSourceInsert(id, server, protocol, certificate, alias)
+		err = d.cluster.ImageSourceInsert(id, server, protocol, certificate, alias)
 		if err != nil {
 			return nil, err
 		}
@@ -566,7 +566,7 @@ func (d *Daemon) ImageDownload(op *operation, server string, protocol string, ce
 
 	// Mark the image as "cached" if downloading for a container
 	if forContainer {
-		err := d.db.ImageLastAccessInit(fp)
+		err := d.cluster.ImageLastAccessInit(fp)
 		if err != nil {
 			return nil, err
 		}
diff --git a/lxd/daemon_images_test.go b/lxd/daemon_images_test.go
index 7833cdfa2..68683dde7 100644
--- a/lxd/daemon_images_test.go
+++ b/lxd/daemon_images_test.go
@@ -19,11 +19,11 @@ type daemonImagesTestSuite struct {
 // newer image even if available, and just use the cached one.
 func (suite *daemonImagesTestSuite) TestUseCachedImagesIfAvailable() {
 	// Create an image with alias "test" and fingerprint "abcd".
-	err := suite.d.db.ImageInsert("abcd", "foo.xz", 1, false, true, "amd64", time.Now(), time.Now(), map[string]string{})
+	err := suite.d.cluster.ImageInsert("abcd", "foo.xz", 1, false, true, "amd64", time.Now(), time.Now(), map[string]string{})
 	suite.Req.Nil(err)
-	id, _, err := suite.d.db.ImageGet("abcd", false, true)
+	id, _, err := suite.d.cluster.ImageGet("abcd", false, true)
 	suite.Req.Nil(err)
-	err = suite.d.db.ImageSourceInsert(id, "img.srv", "simplestreams", "", "test")
+	err = suite.d.cluster.ImageSourceInsert(id, "img.srv", "simplestreams", "", "test")
 	suite.Req.Nil(err)
 
 	// Pretend we have already a non-expired entry for the remote
diff --git a/lxd/db/cluster/open.go b/lxd/db/cluster/open.go
index bcbb3a727..fbc678178 100644
--- a/lxd/db/cluster/open.go
+++ b/lxd/db/cluster/open.go
@@ -96,7 +96,8 @@ func EnsureSchema(db *sql.DB, address string) (bool, error) {
 	}
 
 	// When creating a database from scratch, insert an entry for node
-	// 1. This is needed for referential integrity with other tables.
+	// 1. This is needed for referential integrity with other tables. Also,
+	// create a default profile.
 	if initial == 0 {
 		stmt := `
 INSERT INTO nodes(id, name, address, schema, api_extensions) VALUES(1, 'none', '0.0.0.0', ?, ?)
@@ -106,6 +107,13 @@ INSERT INTO nodes(id, name, address, schema, api_extensions) VALUES(1, 'none', '
 			return false, err
 		}
 
+		stmt = `
+INSERT INTO profiles (name, description) VALUES ('default', 'Default LXD profile')
+`
+		_, err = db.Exec(stmt)
+		if err != nil {
+			return false, err
+		}
 	}
 
 	return true, err
diff --git a/lxd/db/cluster/update_test.go b/lxd/db/cluster/update_test.go
index b6f44ca28..b0865c245 100644
--- a/lxd/db/cluster/update_test.go
+++ b/lxd/db/cluster/update_test.go
@@ -78,6 +78,18 @@ INSERT INTO containers VALUES (1, 1, 'bionic', 1, 1, 0, ?, 0, ?, 'Bionic Beaver'
 INSERT INTO containers VALUES (2, 2, 'bionic', 2, 2, 1, ?, 1, ?, 'Ubuntu LTS')
 `, time.Now(), time.Now())
 	require.Error(t, err)
+
+	// Cascading delete
+	_, err = db.Exec("INSERT INTO containers_config VALUES (1, 1, 'thekey', 'thevalue')")
+	require.NoError(t, err)
+	_, err = db.Exec("DELETE FROM containers")
+	require.NoError(t, err)
+	result, err := db.Exec("DELETE FROM containers_config")
+	require.NoError(t, err)
+	n, err := result.RowsAffected()
+	require.NoError(t, err)
+	assert.Equal(t, int64(0), n) // The row was already deleted by the previous query
+
 }
 
 func TestUpdateFromV1_Network(t *testing.T) {
diff --git a/lxd/db/containers.go b/lxd/db/containers.go
index e51df08d3..337a432a1 100644
--- a/lxd/db/containers.go
+++ b/lxd/db/containers.go
@@ -40,13 +40,13 @@ const (
 	CTypeSnapshot ContainerType = 1
 )
 
-func (n *Node) ContainerRemove(name string) error {
-	id, err := n.ContainerId(name)
+func (c *Cluster) ContainerRemove(name string) error {
+	id, err := c.ContainerId(name)
 	if err != nil {
 		return err
 	}
 
-	_, err = exec(n.db, "DELETE FROM containers WHERE id=?", id)
+	_, err = exec(c.db, "DELETE FROM containers WHERE id=?", id)
 	if err != nil {
 		return err
 	}
@@ -54,25 +54,25 @@ func (n *Node) ContainerRemove(name string) error {
 	return nil
 }
 
-func (n *Node) ContainerName(id int) (string, error) {
+func (c *Cluster) ContainerName(id int) (string, error) {
 	q := "SELECT name FROM containers WHERE id=?"
 	name := ""
 	arg1 := []interface{}{id}
 	arg2 := []interface{}{&name}
-	err := dbQueryRowScan(n.db, q, arg1, arg2)
+	err := dbQueryRowScan(c.db, q, arg1, arg2)
 	return name, err
 }
 
-func (n *Node) ContainerId(name string) (int, error) {
+func (c *Cluster) ContainerId(name string) (int, error) {
 	q := "SELECT id FROM containers WHERE name=?"
 	id := -1
 	arg1 := []interface{}{name}
 	arg2 := []interface{}{&id}
-	err := dbQueryRowScan(n.db, q, arg1, arg2)
+	err := dbQueryRowScan(c.db, q, arg1, arg2)
 	return id, err
 }
 
-func (n *Node) ContainerGet(name string) (ContainerArgs, error) {
+func (c *Cluster) ContainerGet(name string) (ContainerArgs, error) {
 	var used *time.Time // Hold the db-returned time
 	description := sql.NullString{}
 
@@ -84,7 +84,7 @@ func (n *Node) ContainerGet(name string) (ContainerArgs, error) {
 	q := "SELECT id, description, architecture, type, ephemeral, stateful, creation_date, last_use_date FROM containers WHERE name=?"
 	arg1 := []interface{}{name}
 	arg2 := []interface{}{&args.Id, &description, &args.Architecture, &args.Ctype, &ephemInt, &statefulInt, &args.CreationDate, &used}
-	err := dbQueryRowScan(n.db, q, arg1, arg2)
+	err := dbQueryRowScan(c.db, q, arg1, arg2)
 	if err != nil {
 		return args, err
 	}
@@ -109,13 +109,13 @@ func (n *Node) ContainerGet(name string) (ContainerArgs, error) {
 		args.LastUsedDate = time.Unix(0, 0).UTC()
 	}
 
-	config, err := n.ContainerConfig(args.Id)
+	config, err := c.ContainerConfig(args.Id)
 	if err != nil {
 		return args, err
 	}
 	args.Config = config
 
-	profiles, err := n.ContainerProfiles(args.Id)
+	profiles, err := c.ContainerProfiles(args.Id)
 	if err != nil {
 		return args, err
 	}
@@ -123,7 +123,7 @@ func (n *Node) ContainerGet(name string) (ContainerArgs, error) {
 
 	/* get container_devices */
 	args.Devices = types.Devices{}
-	newdevs, err := n.Devices(name, false)
+	newdevs, err := c.Devices(name, false)
 	if err != nil {
 		return args, err
 	}
@@ -135,13 +135,13 @@ func (n *Node) ContainerGet(name string) (ContainerArgs, error) {
 	return args, nil
 }
 
-func (n *Node) ContainerCreate(args ContainerArgs) (int, error) {
-	_, err := n.ContainerId(args.Name)
+func (c *Cluster) ContainerCreate(args ContainerArgs) (int, error) {
+	_, err := c.ContainerId(args.Name)
 	if err == nil {
 		return 0, DbErrAlreadyDefined
 	}
 
-	tx, err := begin(n.db)
+	tx, err := begin(c.db)
 	if err != nil {
 		return 0, err
 	}
@@ -159,14 +159,14 @@ func (n *Node) ContainerCreate(args ContainerArgs) (int, error) {
 	args.CreationDate = time.Now().UTC()
 	args.LastUsedDate = time.Unix(0, 0).UTC()
 
-	str := fmt.Sprintf("INSERT INTO containers (name, architecture, type, ephemeral, creation_date, last_use_date, stateful) VALUES (?, ?, ?, ?, ?, ?, ?)")
+	str := fmt.Sprintf("INSERT INTO containers (node_id, name, architecture, type, ephemeral, creation_date, last_use_date, stateful) VALUES (?, ?, ?, ?, ?, ?, ?, ?)")
 	stmt, err := tx.Prepare(str)
 	if err != nil {
 		tx.Rollback()
 		return 0, err
 	}
 	defer stmt.Close()
-	result, err := stmt.Exec(args.Name, args.Architecture, args.Ctype, ephemInt, args.CreationDate.Unix(), args.LastUsedDate.Unix(), statefulInt)
+	result, err := stmt.Exec(c.id, args.Name, args.Architecture, args.Ctype, ephemInt, args.CreationDate.Unix(), args.LastUsedDate.Unix(), statefulInt)
 	if err != nil {
 		tx.Rollback()
 		return 0, err
@@ -238,27 +238,27 @@ func ContainerConfigInsert(tx *sql.Tx, id int, config map[string]string) error {
 	return nil
 }
 
-func (n *Node) ContainerConfigGet(id int, key string) (string, error) {
+func (c *Cluster) ContainerConfigGet(id int, key string) (string, error) {
 	q := "SELECT value FROM containers_config WHERE container_id=? AND key=?"
 	value := ""
 	arg1 := []interface{}{id, key}
 	arg2 := []interface{}{&value}
-	err := dbQueryRowScan(n.db, q, arg1, arg2)
+	err := dbQueryRowScan(c.db, q, arg1, arg2)
 	return value, err
 }
 
-func (n *Node) ContainerConfigRemove(id int, name string) error {
-	_, err := exec(n.db, "DELETE FROM containers_config WHERE key=? AND container_id=?", name, id)
+func (c *Cluster) ContainerConfigRemove(id int, name string) error {
+	_, err := exec(c.db, "DELETE FROM containers_config WHERE key=? AND container_id=?", name, id)
 	return err
 }
 
-func (n *Node) ContainerSetStateful(id int, stateful bool) error {
+func (c *Cluster) ContainerSetStateful(id int, stateful bool) error {
 	statefulInt := 0
 	if stateful {
 		statefulInt = 1
 	}
 
-	_, err := exec(n.db, "UPDATE containers SET stateful=? WHERE id=?", statefulInt, id)
+	_, err := exec(c.db, "UPDATE containers SET stateful=? WHERE id=?", statefulInt, id)
 	return err
 }
 
@@ -285,7 +285,7 @@ func ContainerProfilesInsert(tx *sql.Tx, id int, profiles []string) error {
 }
 
 // Get a list of profiles for a given container id.
-func (n *Node) ContainerProfiles(containerId int) ([]string, error) {
+func (c *Cluster) ContainerProfiles(containerId int) ([]string, error) {
 	var name string
 	var profiles []string
 
@@ -297,7 +297,7 @@ func (n *Node) ContainerProfiles(containerId int) ([]string, error) {
 	inargs := []interface{}{containerId}
 	outfmt := []interface{}{name}
 
-	results, err := queryScan(n.db, query, inargs, outfmt)
+	results, err := queryScan(c.db, query, inargs, outfmt)
 	if err != nil {
 		return nil, err
 	}
@@ -312,7 +312,7 @@ func (n *Node) ContainerProfiles(containerId int) ([]string, error) {
 }
 
 // ContainerConfig gets the container configuration map from the DB
-func (n *Node) ContainerConfig(containerId int) (map[string]string, error) {
+func (c *Cluster) ContainerConfig(containerId int) (map[string]string, error) {
 	var key, value string
 	q := `SELECT key, value FROM containers_config WHERE container_id=?`
 
@@ -320,7 +320,7 @@ func (n *Node) ContainerConfig(containerId int) (map[string]string, error) {
 	outfmt := []interface{}{key, value}
 
 	// Results is already a slice here, not db Rows anymore.
-	results, err := queryScan(n.db, q, inargs, outfmt)
+	results, err := queryScan(c.db, q, inargs, outfmt)
 	if err != nil {
 		return nil, err //SmartError will wrap this and make "not found" errors pretty
 	}
@@ -337,12 +337,12 @@ func (n *Node) ContainerConfig(containerId int) (map[string]string, error) {
 	return config, nil
 }
 
-func (n *Node) ContainersList(cType ContainerType) ([]string, error) {
+func (c *Cluster) ContainersList(cType ContainerType) ([]string, error) {
 	q := fmt.Sprintf("SELECT name FROM containers WHERE type=? ORDER BY name")
 	inargs := []interface{}{cType}
 	var container string
 	outfmt := []interface{}{container}
-	result, err := queryScan(n.db, q, inargs, outfmt)
+	result, err := queryScan(c.db, q, inargs, outfmt)
 	if err != nil {
 		return nil, err
 	}
@@ -355,14 +355,14 @@ func (n *Node) ContainersList(cType ContainerType) ([]string, error) {
 	return ret, nil
 }
 
-func (n *Node) ContainersResetState() error {
+func (c *Cluster) ContainersResetState() error {
 	// Reset all container states
-	_, err := exec(n.db, "DELETE FROM containers_config WHERE key='volatile.last_state.power'")
+	_, err := exec(c.db, "DELETE FROM containers_config WHERE key='volatile.last_state.power'")
 	return err
 }
 
-func (n *Node) ContainerSetState(id int, state string) error {
-	tx, err := begin(n.db)
+func (c *Cluster) ContainerSetState(id int, state string) error {
+	tx, err := begin(c.db)
 	if err != nil {
 		return err
 	}
@@ -398,8 +398,8 @@ func (n *Node) ContainerSetState(id int, state string) error {
 	return TxCommit(tx)
 }
 
-func (n *Node) ContainerRename(oldName string, newName string) error {
-	tx, err := begin(n.db)
+func (c *Cluster) ContainerRename(oldName string, newName string) error {
+	tx, err := begin(c.db)
 	if err != nil {
 		return err
 	}
@@ -446,13 +446,13 @@ func ContainerUpdate(tx *sql.Tx, id int, description string, architecture int, e
 	return nil
 }
 
-func (n *Node) ContainerLastUsedUpdate(id int, date time.Time) error {
+func (c *Cluster) ContainerLastUsedUpdate(id int, date time.Time) error {
 	stmt := `UPDATE containers SET last_use_date=? WHERE id=?`
-	_, err := exec(n.db, stmt, date, id)
+	_, err := exec(c.db, stmt, date, id)
 	return err
 }
 
-func (n *Node) ContainerGetSnapshots(name string) ([]string, error) {
+func (c *Cluster) ContainerGetSnapshots(name string) ([]string, error) {
 	result := []string{}
 
 	regexp := name + shared.SnapshotDelimiter
@@ -460,7 +460,7 @@ func (n *Node) ContainerGetSnapshots(name string) ([]string, error) {
 	q := "SELECT name FROM containers WHERE type=? AND SUBSTR(name,1,?)=?"
 	inargs := []interface{}{CTypeSnapshot, length, regexp}
 	outfmt := []interface{}{name}
-	dbResults, err := queryScan(n.db, q, inargs, outfmt)
+	dbResults, err := queryScan(c.db, q, inargs, outfmt)
 	if err != nil {
 		return result, err
 	}
@@ -476,14 +476,14 @@ func (n *Node) ContainerGetSnapshots(name string) ([]string, error) {
  * Note, the code below doesn't deal with snapshots of snapshots.
  * To do that, we'll need to weed out based on # slashes in names
  */
-func (n *Node) ContainerNextSnapshot(name string) int {
+func (c *Cluster) ContainerNextSnapshot(name string) int {
 	base := name + shared.SnapshotDelimiter + "snap"
 	length := len(base)
 	q := fmt.Sprintf("SELECT name FROM containers WHERE type=? AND SUBSTR(name,1,?)=?")
 	var numstr string
 	inargs := []interface{}{CTypeSnapshot, length, base}
 	outfmt := []interface{}{numstr}
-	results, err := queryScan(n.db, q, inargs, outfmt)
+	results, err := queryScan(c.db, q, inargs, outfmt)
 	if err != nil {
 		return 0
 	}
diff --git a/lxd/db/db.go b/lxd/db/db.go
index 76e20ed10..0dd5e6c7e 100644
--- a/lxd/db/db.go
+++ b/lxd/db/db.go
@@ -84,10 +84,6 @@ func OpenNode(dir string, fresh func(*Node) error, legacyPatches map[int]*Legacy
 	}
 
 	if initial == 0 {
-		err := node.ProfileCreateDefault()
-		if err != nil {
-			return nil, nil, err
-		}
 		if fresh != nil {
 			err := fresh(node)
 			if err != nil {
@@ -201,6 +197,13 @@ func OpenCluster(name string, dialer grpcsql.Dialer, address string) (*Cluster,
 	return cluster, nil
 }
 
+// ForLocalInspection is a aid for the hack in initializeDbObject, which
+// sets the db-related Deamon attributes upfront, to be backward compatible
+// with the legacy patches that need to interact with the database.
+func ForLocalInspection(db *sql.DB) *Cluster {
+	return &Cluster{db: db}
+}
+
 // Transaction creates a new ClusterTx object and transactionally executes the
 // cluster database interactions invoked by the given function. If the function
 // returns no error, all database changes are committed to the cluster database
@@ -252,6 +255,13 @@ func (c *Cluster) DB() *sql.DB {
 	return c.db
 }
 
+// Begin a new transaction against the cluster database.
+//
+// FIXME: legacy method.
+func (c *Cluster) Begin() (*sql.Tx, error) {
+	return begin(c.db)
+}
+
 // UpdateSchemasDotGo updates the schema.go files in the local/ and cluster/
 // sub-packages.
 func UpdateSchemasDotGo() error {
diff --git a/lxd/db/db_internal_test.go b/lxd/db/db_internal_test.go
index 9daf779a9..4ea4bd355 100644
--- a/lxd/db/db_internal_test.go
+++ b/lxd/db/db_internal_test.go
@@ -3,16 +3,11 @@ package db
 import (
 	"database/sql"
 	"fmt"
-	"io/ioutil"
-	"os"
-	"path/filepath"
 	"testing"
 	"time"
 
 	"github.com/stretchr/testify/suite"
 
-	"github.com/lxc/lxd/lxd/db/node"
-	"github.com/lxc/lxd/lxd/db/query"
 	"github.com/lxc/lxd/lxd/types"
 	"github.com/lxc/lxd/shared/api"
 	"github.com/lxc/lxd/shared/logger"
@@ -20,7 +15,7 @@ import (
 )
 
 const DB_FIXTURES string = `
-    INSERT INTO containers (name, architecture, type) VALUES ('thename', 1, 1);
+    INSERT INTO containers (node_id, name, architecture, type) VALUES (1, 'thename', 1, 1);
     INSERT INTO profiles (name) VALUES ('theprofile');
     INSERT INTO containers_profiles (container_id, profile_id) VALUES (1, 2);
     INSERT INTO containers_config (container_id, key, value) VALUES (1, 'thekey', 'thevalue');
@@ -37,23 +32,23 @@ const DB_FIXTURES string = `
 type dbTestSuite struct {
 	suite.Suite
 
-	dir string
-	db  *Node
+	dir     string
+	db      *Cluster
+	cleanup func()
 }
 
 func (s *dbTestSuite) SetupTest() {
-	s.db = s.CreateTestDb()
+	s.db, s.cleanup = s.CreateTestDb()
 	_, err := s.db.DB().Exec(DB_FIXTURES)
 	s.Nil(err)
 }
 
 func (s *dbTestSuite) TearDownTest() {
-	s.db.DB().Close()
-	os.RemoveAll(s.dir)
+	s.cleanup()
 }
 
 // Initialize a test in-memory DB.
-func (s *dbTestSuite) CreateTestDb() *Node {
+func (s *dbTestSuite) CreateTestDb() (*Cluster, func()) {
 	var err error
 
 	// Setup logging if main() hasn't been called/when testing
@@ -62,12 +57,8 @@ func (s *dbTestSuite) CreateTestDb() *Node {
 		s.Nil(err)
 	}
 
-	s.dir, err = ioutil.TempDir("", "lxd-db-test")
-	s.Nil(err)
-
-	db, _, err := OpenNode(s.dir, nil, nil)
-	s.Nil(err)
-	return db
+	db, cleanup := NewTestCluster(s.T())
+	return db, cleanup
 }
 
 func TestDBTestSuite(t *testing.T) {
@@ -169,155 +160,6 @@ func (s *dbTestSuite) Test_deleting_an_image_cascades_on_related_tables() {
 	s.Equal(count, 0, "Deleting an image didn't delete the related images_properties!")
 }
 
-func (s *dbTestSuite) Test_running_UpdateFromV6_adds_on_delete_cascade() {
-	// Upgrading the database schema with updateFromV6 adds ON DELETE CASCADE
-	// to sqlite tables that require it, and conserve the data.
-
-	var err error
-	var count int
-
-	db := s.CreateTestDb()
-	defer db.DB().Close()
-
-	statements := `
-CREATE TABLE IF NOT EXISTS containers (
-    id INTEGER primary key AUTOINCREMENT NOT NULL,
-    name VARCHAR(255) NOT NULL,
-    architecture INTEGER NOT NULL,
-    type INTEGER NOT NULL,
-    power_state INTEGER NOT NULL DEFAULT 0,
-    ephemeral INTEGER NOT NULL DEFAULT 0,
-    UNIQUE (name)
-);
-CREATE TABLE IF NOT EXISTS containers_config (
-    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
-    container_id INTEGER NOT NULL,
-    key VARCHAR(255) NOT NULL,
-    value TEXT,
-    FOREIGN KEY (container_id) REFERENCES containers (id),
-    UNIQUE (container_id, key)
-);
-
-INSERT INTO containers (name, architecture, type) VALUES ('thename', 1, 1);
-INSERT INTO containers_config (container_id, key, value) VALUES (1, 'thekey', 'thevalue');`
-
-	_, err = db.DB().Exec(statements)
-	s.Nil(err)
-
-	// Run the upgrade from V6 code
-	err = query.Transaction(db.DB(), node.UpdateFromV16)
-	s.Nil(err)
-
-	// Make sure the inserted data is still there.
-	statements = `SELECT count(*) FROM containers_config;`
-	err = db.DB().QueryRow(statements).Scan(&count)
-	s.Nil(err)
-	s.Equal(count, 1, "There should be exactly one entry in containers_config!")
-
-	// Drop the container.
-	statements = `DELETE FROM containers WHERE name = 'thename';`
-
-	_, err = db.DB().Exec(statements)
-	s.Nil(err)
-
-	// Make sure there are 0 container_profiles entries left.
-	statements = `SELECT count(*) FROM containers_profiles;`
-	err = db.DB().QueryRow(statements).Scan(&count)
-	s.Nil(err)
-	s.Equal(count, 0, "Deleting a container didn't delete the profile association!")
-}
-
-func (s *dbTestSuite) Test_run_database_upgrades_with_some_foreign_keys_inconsistencies() {
-	var db *sql.DB
-	var err error
-	var count int
-	var statements string
-
-	dir, err := ioutil.TempDir("", "lxd-db-test-")
-	s.Nil(err)
-	defer os.RemoveAll(dir)
-	path := filepath.Join(dir, "lxd.db")
-	db, err = sql.Open("sqlite3", path)
-	defer db.Close()
-	s.Nil(err)
-
-	// This schema is a part of schema rev 1.
-	statements = `
-CREATE TABLE containers (
-    id INTEGER primary key AUTOINCREMENT NOT NULL,
-    name VARCHAR(255) NOT NULL,
-    architecture INTEGER NOT NULL,
-    type INTEGER NOT NULL,
-    UNIQUE (name)
-);
-CREATE TABLE containers_config (
-    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
-    container_id INTEGER NOT NULL,
-    key VARCHAR(255) NOT NULL,
-    value TEXT,
-    FOREIGN KEY (container_id) REFERENCES containers (id),
-    UNIQUE (container_id, key)
-);
-CREATE TABLE schema (
-    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
-    version INTEGER NOT NULL,
-    updated_at DATETIME NOT NULL,
-    UNIQUE (version)
-);
-CREATE TABLE images (
-    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
-    fingerprint VARCHAR(255) NOT NULL,
-    filename VARCHAR(255) NOT NULL,
-    size INTEGER NOT NULL,
-    public INTEGER NOT NULL DEFAULT 0,
-    architecture INTEGER NOT NULL,
-    creation_date DATETIME,
-    expiry_date DATETIME,
-    upload_date DATETIME NOT NULL,
-    UNIQUE (fingerprint)
-);
-CREATE TABLE images_properties (
-    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
-    image_id INTEGER NOT NULL,
-    type INTEGER NOT NULL,
-    key VARCHAR(255) NOT NULL,
-    value TEXT,
-    FOREIGN KEY (image_id) REFERENCES images (id)
-);
-CREATE TABLE certificates (
-    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
-    fingerprint VARCHAR(255) NOT NULL,
-    type INTEGER NOT NULL,
-    name VARCHAR(255) NOT NULL,
-    certificate TEXT NOT NULL,
-    UNIQUE (fingerprint)
-);
-INSERT INTO schema (version, updated_at) values (1, "now");
-INSERT INTO containers (name, architecture, type) VALUES ('thename', 1, 1);
-INSERT INTO containers_config (container_id, key, value) VALUES (1, 'thekey', 'thevalue');`
-
-	_, err = db.Exec(statements)
-	s.Nil(err)
-
-	// Now that we have a consistent schema, let's remove the container entry
-	// *without* the ON DELETE CASCADE in place.
-	statements = `DELETE FROM containers;`
-	_, err = db.Exec(statements)
-	s.Nil(err)
-
-	// The "foreign key" on containers_config now points to nothing.
-	// Let's run the schema upgrades.
-	schema := node.Schema()
-	_, err = schema.Ensure(db)
-	s.Nil(err)
-
-	// Make sure there are 0 containers_config entries left.
-	statements = `SELECT count(*) FROM containers_config;`
-	err = db.QueryRow(statements).Scan(&count)
-	s.Nil(err)
-	s.Equal(count, 0, "updateDb did not delete orphaned child entries after adding ON DELETE CASCADE!")
-}
-
 func (s *dbTestSuite) Test_ImageGet_finds_image_for_fingerprint() {
 	var err error
 	var result *api.Image
diff --git a/lxd/db/devices.go b/lxd/db/devices.go
index f246a3b17..5953aff14 100644
--- a/lxd/db/devices.go
+++ b/lxd/db/devices.go
@@ -134,7 +134,7 @@ func dbDeviceConfig(db *sql.DB, id int, isprofile bool) (types.Device, error) {
 	return newdev, nil
 }
 
-func (n *Node) Devices(qName string, isprofile bool) (types.Devices, error) {
+func (c *Cluster) Devices(qName string, isprofile bool) (types.Devices, error) {
 	var q string
 	if isprofile {
 		q = `SELECT profiles_devices.id, profiles_devices.name, profiles_devices.type
@@ -151,7 +151,7 @@ func (n *Node) Devices(qName string, isprofile bool) (types.Devices, error) {
 	var name, stype string
 	inargs := []interface{}{qName}
 	outfmt := []interface{}{id, name, dtype}
-	results, err := queryScan(n.db, q, inargs, outfmt)
+	results, err := queryScan(c.db, q, inargs, outfmt)
 	if err != nil {
 		return nil, err
 	}
@@ -164,7 +164,7 @@ func (n *Node) Devices(qName string, isprofile bool) (types.Devices, error) {
 		if err != nil {
 			return nil, err
 		}
-		newdev, err := dbDeviceConfig(n.db, id, isprofile)
+		newdev, err := dbDeviceConfig(c.db, id, isprofile)
 		if err != nil {
 			return nil, err
 		}
diff --git a/lxd/db/images.go b/lxd/db/images.go
index 244d5777d..e69e8acb7 100644
--- a/lxd/db/images.go
+++ b/lxd/db/images.go
@@ -17,7 +17,7 @@ var ImageSourceProtocol = map[int]string{
 	2: "simplestreams",
 }
 
-func (n *Node) ImagesGet(public bool) ([]string, error) {
+func (c *Cluster) ImagesGet(public bool) ([]string, error) {
 	q := "SELECT fingerprint FROM images"
 	if public == true {
 		q = "SELECT fingerprint FROM images WHERE public=1"
@@ -26,7 +26,7 @@ func (n *Node) ImagesGet(public bool) ([]string, error) {
 	var fp string
 	inargs := []interface{}{}
 	outfmt := []interface{}{fp}
-	dbResults, err := queryScan(n.db, q, inargs, outfmt)
+	dbResults, err := queryScan(c.db, q, inargs, outfmt)
 	if err != nil {
 		return []string{}, err
 	}
@@ -39,7 +39,7 @@ func (n *Node) ImagesGet(public bool) ([]string, error) {
 	return results, nil
 }
 
-func (n *Node) ImagesGetExpired(expiry int64) ([]string, error) {
+func (c *Cluster) ImagesGetExpired(expiry int64) ([]string, error) {
 	q := `SELECT fingerprint, last_use_date, upload_date FROM images WHERE cached=1`
 
 	var fpStr string
@@ -48,7 +48,7 @@ func (n *Node) ImagesGetExpired(expiry int64) ([]string, error) {
 
 	inargs := []interface{}{}
 	outfmt := []interface{}{fpStr, useStr, uploadStr}
-	dbResults, err := queryScan(n.db, q, inargs, outfmt)
+	dbResults, err := queryScan(c.db, q, inargs, outfmt)
 	if err != nil {
 		return []string{}, err
 	}
@@ -79,7 +79,7 @@ func (n *Node) ImagesGetExpired(expiry int64) ([]string, error) {
 	return results, nil
 }
 
-func (n *Node) ImageSourceInsert(imageId int, server string, protocol string, certificate string, alias string) error {
+func (c *Cluster) ImageSourceInsert(imageId int, server string, protocol string, certificate string, alias string) error {
 	stmt := `INSERT INTO images_source (image_id, server, protocol, certificate, alias) values (?, ?, ?, ?, ?)`
 
 	protocolInt := -1
@@ -93,11 +93,11 @@ func (n *Node) ImageSourceInsert(imageId int, server string, protocol string, ce
 		return fmt.Errorf("Invalid protocol: %s", protocol)
 	}
 
-	_, err := exec(n.db, stmt, imageId, server, protocolInt, certificate, alias)
+	_, err := exec(c.db, stmt, imageId, server, protocolInt, certificate, alias)
 	return err
 }
 
-func (n *Node) ImageSourceGet(imageId int) (int, api.ImageSource, error) {
+func (c *Cluster) ImageSourceGet(imageId int) (int, api.ImageSource, error) {
 	q := `SELECT id, server, protocol, certificate, alias FROM images_source WHERE image_id=?`
 
 	id := 0
@@ -106,7 +106,7 @@ func (n *Node) ImageSourceGet(imageId int) (int, api.ImageSource, error) {
 
 	arg1 := []interface{}{imageId}
 	arg2 := []interface{}{&id, &result.Server, &protocolInt, &result.Certificate, &result.Alias}
-	err := dbQueryRowScan(n.db, q, arg1, arg2)
+	err := dbQueryRowScan(c.db, q, arg1, arg2)
 	if err != nil {
 		if err == sql.ErrNoRows {
 			return -1, api.ImageSource{}, NoSuchObjectError
@@ -129,7 +129,7 @@ func (n *Node) ImageSourceGet(imageId int) (int, api.ImageSource, error) {
 // Try to find a source entry of a locally cached image that matches
 // the given remote details (server, protocol and alias). Return the
 // fingerprint linked to the matching entry, if any.
-func (n *Node) ImageSourceGetCachedFingerprint(server string, protocol string, alias string) (string, error) {
+func (c *Cluster) ImageSourceGetCachedFingerprint(server string, protocol string, alias string) (string, error) {
 	protocolInt := -1
 	for protoInt, protoString := range ImageSourceProtocol {
 		if protoString == protocol {
@@ -152,7 +152,7 @@ func (n *Node) ImageSourceGetCachedFingerprint(server string, protocol string, a
 
 	arg1 := []interface{}{server, protocolInt, alias}
 	arg2 := []interface{}{&fingerprint}
-	err := dbQueryRowScan(n.db, q, arg1, arg2)
+	err := dbQueryRowScan(c.db, q, arg1, arg2)
 	if err != nil {
 		if err == sql.ErrNoRows {
 			return "", NoSuchObjectError
@@ -165,13 +165,13 @@ func (n *Node) ImageSourceGetCachedFingerprint(server string, protocol string, a
 }
 
 // Whether an image with the given fingerprint exists.
-func (n *Node) ImageExists(fingerprint string) (bool, error) {
+func (c *Cluster) ImageExists(fingerprint string) (bool, error) {
 	var exists bool
 	var err error
 	query := "SELECT COUNT(*) > 0 FROM images WHERE fingerprint=?"
 	inargs := []interface{}{fingerprint}
 	outargs := []interface{}{&exists}
-	err = dbQueryRowScan(n.db, query, inargs, outargs)
+	err = dbQueryRowScan(c.db, query, inargs, outargs)
 	return exists, err
 }
 
@@ -180,7 +180,7 @@ func (n *Node) ImageExists(fingerprint string) (bool, error) {
 // pass a shortform and will get the full fingerprint.
 // There can never be more than one image with a given fingerprint, as it is
 // enforced by a UNIQUE constraint in the schema.
-func (n *Node) ImageGet(fingerprint string, public bool, strictMatching bool) (int, *api.Image, error) {
+func (c *Cluster) ImageGet(fingerprint string, public bool, strictMatching bool) (int, *api.Image, error) {
 	var err error
 	var create, expire, used, upload *time.Time // These hold the db-returned times
 
@@ -212,7 +212,7 @@ func (n *Node) ImageGet(fingerprint string, public bool, strictMatching bool) (i
 		query += " AND public=1"
 	}
 
-	err = dbQueryRowScan(n.db, query, inargs, outfmt)
+	err = dbQueryRowScan(c.db, query, inargs, outfmt)
 	if err != nil {
 		return -1, nil, err // Likely: there are no rows for this fingerprint
 	}
@@ -223,7 +223,7 @@ func (n *Node) ImageGet(fingerprint string, public bool, strictMatching bool) (i
 		count := 0
 		outfmt := []interface{}{&count}
 
-		err = dbQueryRowScan(n.db, query, inargs, outfmt)
+		err = dbQueryRowScan(c.db, query, inargs, outfmt)
 		if err != nil {
 			return -1, nil, err
 		}
@@ -262,7 +262,7 @@ func (n *Node) ImageGet(fingerprint string, public bool, strictMatching bool) (i
 	var key, value, name, desc string
 	inargs = []interface{}{id}
 	outfmt = []interface{}{key, value}
-	results, err := queryScan(n.db, q, inargs, outfmt)
+	results, err := queryScan(c.db, q, inargs, outfmt)
 	if err != nil {
 		return -1, nil, err
 	}
@@ -280,7 +280,7 @@ func (n *Node) ImageGet(fingerprint string, public bool, strictMatching bool) (i
 	q = "SELECT name, description FROM images_aliases WHERE image_id=?"
 	inargs = []interface{}{id}
 	outfmt = []interface{}{name, desc}
-	results, err = queryScan(n.db, q, inargs, outfmt)
+	results, err = queryScan(c.db, q, inargs, outfmt)
 	if err != nil {
 		return -1, nil, err
 	}
@@ -295,7 +295,7 @@ func (n *Node) ImageGet(fingerprint string, public bool, strictMatching bool) (i
 
 	image.Aliases = aliases
 
-	_, source, err := n.ImageSourceGet(id)
+	_, source, err := c.ImageSourceGet(id)
 	if err == nil {
 		image.UpdateSource = &source
 	}
@@ -303,8 +303,8 @@ func (n *Node) ImageGet(fingerprint string, public bool, strictMatching bool) (i
 	return id, &image, nil
 }
 
-func (n *Node) ImageDelete(id int) error {
-	_, err := exec(n.db, "DELETE FROM images WHERE id=?", id)
+func (c *Cluster) ImageDelete(id int) error {
+	_, err := exec(c.db, "DELETE FROM images WHERE id=?", id)
 	if err != nil {
 		return err
 	}
@@ -312,12 +312,12 @@ func (n *Node) ImageDelete(id int) error {
 	return nil
 }
 
-func (n *Node) ImageAliasesGet() ([]string, error) {
+func (c *Cluster) ImageAliasesGet() ([]string, error) {
 	q := "SELECT name FROM images_aliases"
 	var name string
 	inargs := []interface{}{}
 	outfmt := []interface{}{name}
-	results, err := queryScan(n.db, q, inargs, outfmt)
+	results, err := queryScan(c.db, q, inargs, outfmt)
 	if err != nil {
 		return nil, err
 	}
@@ -328,7 +328,7 @@ func (n *Node) ImageAliasesGet() ([]string, error) {
 	return names, nil
 }
 
-func (n *Node) ImageAliasGet(name string, isTrustedClient bool) (int, api.ImageAliasesEntry, error) {
+func (c *Cluster) ImageAliasGet(name string, isTrustedClient bool) (int, api.ImageAliasesEntry, error) {
 	q := `SELECT images_aliases.id, images.fingerprint, images_aliases.description
 			 FROM images_aliases
 			 INNER JOIN images
@@ -344,7 +344,7 @@ func (n *Node) ImageAliasGet(name string, isTrustedClient bool) (int, api.ImageA
 
 	arg1 := []interface{}{name}
 	arg2 := []interface{}{&id, &fingerprint, &description}
-	err := dbQueryRowScan(n.db, q, arg1, arg2)
+	err := dbQueryRowScan(c.db, q, arg1, arg2)
 	if err != nil {
 		if err == sql.ErrNoRows {
 			return -1, entry, NoSuchObjectError
@@ -360,53 +360,53 @@ func (n *Node) ImageAliasGet(name string, isTrustedClient bool) (int, api.ImageA
 	return id, entry, nil
 }
 
-func (n *Node) ImageAliasRename(id int, name string) error {
-	_, err := exec(n.db, "UPDATE images_aliases SET name=? WHERE id=?", name, id)
+func (c *Cluster) ImageAliasRename(id int, name string) error {
+	_, err := exec(c.db, "UPDATE images_aliases SET name=? WHERE id=?", name, id)
 	return err
 }
 
-func (n *Node) ImageAliasDelete(name string) error {
-	_, err := exec(n.db, "DELETE FROM images_aliases WHERE name=?", name)
+func (c *Cluster) ImageAliasDelete(name string) error {
+	_, err := exec(c.db, "DELETE FROM images_aliases WHERE name=?", name)
 	return err
 }
 
-func (n *Node) ImageAliasesMove(source int, destination int) error {
-	_, err := exec(n.db, "UPDATE images_aliases SET image_id=? WHERE image_id=?", destination, source)
+func (c *Cluster) ImageAliasesMove(source int, destination int) error {
+	_, err := exec(c.db, "UPDATE images_aliases SET image_id=? WHERE image_id=?", destination, source)
 	return err
 }
 
 // Insert an alias ento the database.
-func (n *Node) ImageAliasAdd(name string, imageID int, desc string) error {
+func (c *Cluster) ImageAliasAdd(name string, imageID int, desc string) error {
 	stmt := `INSERT INTO images_aliases (name, image_id, description) values (?, ?, ?)`
-	_, err := exec(n.db, stmt, name, imageID, desc)
+	_, err := exec(c.db, stmt, name, imageID, desc)
 	return err
 }
 
-func (n *Node) ImageAliasUpdate(id int, imageID int, desc string) error {
+func (c *Cluster) ImageAliasUpdate(id int, imageID int, desc string) error {
 	stmt := `UPDATE images_aliases SET image_id=?, description=? WHERE id=?`
-	_, err := exec(n.db, stmt, imageID, desc, id)
+	_, err := exec(c.db, stmt, imageID, desc, id)
 	return err
 }
 
-func (n *Node) ImageLastAccessUpdate(fingerprint string, date time.Time) error {
+func (c *Cluster) ImageLastAccessUpdate(fingerprint string, date time.Time) error {
 	stmt := `UPDATE images SET last_use_date=? WHERE fingerprint=?`
-	_, err := exec(n.db, stmt, date, fingerprint)
+	_, err := exec(c.db, stmt, date, fingerprint)
 	return err
 }
 
-func (n *Node) ImageLastAccessInit(fingerprint string) error {
+func (c *Cluster) ImageLastAccessInit(fingerprint string) error {
 	stmt := `UPDATE images SET cached=1, last_use_date=strftime("%s") WHERE fingerprint=?`
-	_, err := exec(n.db, stmt, fingerprint)
+	_, err := exec(c.db, stmt, fingerprint)
 	return err
 }
 
-func (n *Node) ImageUpdate(id int, fname string, sz int64, public bool, autoUpdate bool, architecture string, createdAt time.Time, expiresAt time.Time, properties map[string]string) error {
+func (c *Cluster) ImageUpdate(id int, fname string, sz int64, public bool, autoUpdate bool, architecture string, createdAt time.Time, expiresAt time.Time, properties map[string]string) error {
 	arch, err := osarch.ArchitectureId(architecture)
 	if err != nil {
 		arch = 0
 	}
 
-	tx, err := begin(n.db)
+	tx, err := begin(c.db)
 	if err != nil {
 		return err
 	}
@@ -461,13 +461,13 @@ func (n *Node) ImageUpdate(id int, fname string, sz int64, public bool, autoUpda
 	return nil
 }
 
-func (n *Node) ImageInsert(fp string, fname string, sz int64, public bool, autoUpdate bool, architecture string, createdAt time.Time, expiresAt time.Time, properties map[string]string) error {
+func (c *Cluster) ImageInsert(fp string, fname string, sz int64, public bool, autoUpdate bool, architecture string, createdAt time.Time, expiresAt time.Time, properties map[string]string) error {
 	arch, err := osarch.ArchitectureId(architecture)
 	if err != nil {
 		arch = 0
 	}
 
-	tx, err := begin(n.db)
+	tx, err := begin(c.db)
 	if err != nil {
 		return err
 	}
@@ -495,14 +495,14 @@ func (n *Node) ImageInsert(fp string, fname string, sz int64, public bool, autoU
 		return err
 	}
 
-	if len(properties) > 0 {
-		id64, err := result.LastInsertId()
-		if err != nil {
-			tx.Rollback()
-			return err
-		}
-		id := int(id64)
+	id64, err := result.LastInsertId()
+	if err != nil {
+		tx.Rollback()
+		return err
+	}
+	id := int(id64)
 
+	if len(properties) > 0 {
 		pstmt, err := tx.Prepare(`INSERT INTO images_properties (image_id, type, key, value) VALUES (?, 0, ?, ?)`)
 		if err != nil {
 			tx.Rollback()
@@ -522,6 +522,12 @@ func (n *Node) ImageInsert(fp string, fname string, sz int64, public bool, autoU
 
 	}
 
+	_, err = tx.Exec("INSERT INTO images_nodes(image_id, node_id) VALUES(?, ?)", id, c.id)
+	if err != nil {
+		tx.Rollback()
+		return err
+	}
+
 	if err := TxCommit(tx); err != nil {
 		return err
 	}
@@ -550,7 +556,7 @@ func (c *Cluster) ImageGetPools(imageFingerprint string) ([]int64, error) {
 }
 
 // Get the names of all storage pools on which a given image exists.
-func (n *Node) ImageGetPoolNamesFromIDs(poolIDs []int64) ([]string, error) {
+func (c *Cluster) ImageGetPoolNamesFromIDs(poolIDs []int64) ([]string, error) {
 	var poolName string
 	query := "SELECT name FROM storage_pools WHERE id=?"
 
@@ -559,7 +565,7 @@ func (n *Node) ImageGetPoolNamesFromIDs(poolIDs []int64) ([]string, error) {
 		inargs := []interface{}{poolID}
 		outargs := []interface{}{poolName}
 
-		result, err := queryScan(n.db, query, inargs, outargs)
+		result, err := queryScan(c.db, query, inargs, outargs)
 		if err != nil {
 			return []string{}, err
 		}
@@ -573,7 +579,7 @@ func (n *Node) ImageGetPoolNamesFromIDs(poolIDs []int64) ([]string, error) {
 }
 
 // ImageUploadedAt updates the upload_date column and an image row.
-func (n *Node) ImageUploadedAt(id int, uploadedAt time.Time) error {
-	_, err := exec(n.db, "UPDATE images SET upload_date=? WHERE id=?", uploadedAt, id)
+func (c *Cluster) ImageUploadedAt(id int, uploadedAt time.Time) error {
+	_, err := exec(c.db, "UPDATE images SET upload_date=? WHERE id=?", uploadedAt, id)
 	return err
 }
diff --git a/lxd/db/migration.go b/lxd/db/migration.go
index 9744bb8a3..4a424bf7e 100644
--- a/lxd/db/migration.go
+++ b/lxd/db/migration.go
@@ -65,6 +65,13 @@ func (c *Cluster) ImportPreClusteringData(dump *Dump) error {
 		return errors.Wrap(err, "failed to start cluster database transaction")
 	}
 
+	// Delete the default profile in the cluster database, which always
+	// gets created no matter what.
+	_, err = tx.Exec("DELETE FROM profiles WHERE id=1")
+	if err != nil {
+		return errors.Wrap(err, "failed to delete default profile")
+	}
+
 	for _, table := range preClusteringTables {
 		for i, row := range dump.Data[table] {
 			for i, element := range row {
@@ -88,6 +95,8 @@ func (c *Cluster) ImportPreClusteringData(dump *Dump) error {
 			}
 
 			switch table {
+			case "containers":
+				fallthrough
 			case "networks_config":
 				appendNodeID()
 			case "storage_pools_config":
@@ -117,6 +126,28 @@ func (c *Cluster) ImportPreClusteringData(dump *Dump) error {
 			if n != 1 {
 				return fmt.Errorf("could not insert %d int %s", i, table)
 			}
+
+			// Also insert the image ID -> node ID association.
+			if table == "images" {
+				stmt := "INSERT INTO images_nodes(image_id, node_id) VALUES(?, 1)"
+				var imageID int64
+				for i, column := range columns {
+					if column == "id" {
+						imageID = row[i].(int64)
+						if err != nil {
+							return err
+						}
+						break
+					}
+				}
+				if imageID == 0 {
+					return fmt.Errorf("image has invalid ID")
+				}
+				_, err := tx.Exec(stmt, row...)
+				if err != nil {
+					return errors.Wrapf(err, "failed to associate image to node")
+				}
+			}
 		}
 	}
 
@@ -137,8 +168,18 @@ type Dump struct {
 var preClusteringTables = []string{
 	"certificates",
 	"config",
+	"containers",
+	"containers_config",
+	"containers_devices",
+	"containers_devices_config",
+	"containers_profiles",
+	"images",
+	"images_aliases",
+	"images_properties",
+	"images_source",
 	"networks",
 	"networks_config",
+	"profiles",
 	"storage_pools",
 	"storage_pools_config",
 	"storage_volumes",
diff --git a/lxd/db/migration_test.go b/lxd/db/migration_test.go
index 8d2b392a7..720a9dfb1 100644
--- a/lxd/db/migration_test.go
+++ b/lxd/db/migration_test.go
@@ -98,6 +98,7 @@ func newPreClusteringTx(t *testing.T) *sql.Tx {
 		preClusteringNodeSchema,
 		"INSERT INTO certificates VALUES (1, 'abcd:efgh', 1, 'foo', 'FOO')",
 		"INSERT INTO config VALUES(1, 'core.https_address', '1.2.3.4:666')",
+		"INSERT INTO images VALUES(1, 'abc', 'x.gz', 16, 0, 1, 0, 0, strftime('%d-%m-%Y', 'now'), 0, 0, 0)",
 		"INSERT INTO networks VALUES(1, 'lxcbr0', 'LXD bridge')",
 		"INSERT INTO networks_config VALUES(1, 1, 'ipv4.nat', 'true')",
 		"INSERT INTO storage_pools VALUES (1, 'default', 'dir', '')",
diff --git a/lxd/db/node/schema.go b/lxd/db/node/schema.go
index fd88a6ada..fcd18b658 100644
--- a/lxd/db/node/schema.go
+++ b/lxd/db/node/schema.go
@@ -12,103 +12,12 @@ CREATE TABLE config (
     value TEXT,
     UNIQUE (key)
 );
-CREATE TABLE "containers" (
-    id INTEGER primary key AUTOINCREMENT NOT NULL,
-    name VARCHAR(255) NOT NULL,
-    architecture INTEGER NOT NULL,
-    type INTEGER NOT NULL,
-    ephemeral INTEGER NOT NULL DEFAULT 0,
-    creation_date DATETIME NOT NULL DEFAULT 0,
-    stateful INTEGER NOT NULL DEFAULT 0,
-    last_use_date DATETIME,
-    description TEXT,
-    UNIQUE (name)
-);
-CREATE TABLE containers_config (
-    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
-    container_id INTEGER NOT NULL,
-    key VARCHAR(255) NOT NULL,
-    value TEXT,
-    FOREIGN KEY (container_id) REFERENCES containers (id) ON DELETE CASCADE,
-    UNIQUE (container_id, key)
-);
-CREATE TABLE containers_devices (
-    id INTEGER primary key AUTOINCREMENT NOT NULL,
-    container_id INTEGER NOT NULL,
-    name VARCHAR(255) NOT NULL,
-    type INTEGER NOT NULL default 0,
-    FOREIGN KEY (container_id) REFERENCES containers (id) ON DELETE CASCADE,
-    UNIQUE (container_id, name)
-);
-CREATE TABLE containers_devices_config (
-    id INTEGER primary key AUTOINCREMENT NOT NULL,
-    container_device_id INTEGER NOT NULL,
-    key VARCHAR(255) NOT NULL,
-    value TEXT,
-    FOREIGN KEY (container_device_id) REFERENCES containers_devices (id) ON DELETE CASCADE,
-    UNIQUE (container_device_id, key)
-);
-CREATE TABLE containers_profiles (
-    id INTEGER primary key AUTOINCREMENT NOT NULL,
-    container_id INTEGER NOT NULL,
-    profile_id INTEGER NOT NULL,
-    apply_order INTEGER NOT NULL default 0,
-    UNIQUE (container_id, profile_id),
-    FOREIGN KEY (container_id) REFERENCES containers(id) ON DELETE CASCADE,
-    FOREIGN KEY (profile_id) REFERENCES profiles(id) ON DELETE CASCADE
-);
-CREATE TABLE images (
-    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
-    fingerprint VARCHAR(255) NOT NULL,
-    filename VARCHAR(255) NOT NULL,
-    size INTEGER NOT NULL,
-    public INTEGER NOT NULL DEFAULT 0,
-    architecture INTEGER NOT NULL,
-    creation_date DATETIME,
-    expiry_date DATETIME,
-    upload_date DATETIME NOT NULL,
-    cached INTEGER NOT NULL DEFAULT 0,
-    last_use_date DATETIME,
-    auto_update INTEGER NOT NULL DEFAULT 0,
-    UNIQUE (fingerprint)
-);
-CREATE TABLE "images_aliases" (
-    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
-    name VARCHAR(255) NOT NULL,
-    image_id INTEGER NOT NULL,
-    description TEXT,
-    FOREIGN KEY (image_id) REFERENCES images (id) ON DELETE CASCADE,
-    UNIQUE (name)
-);
-CREATE TABLE images_properties (
-    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
-    image_id INTEGER NOT NULL,
-    type INTEGER NOT NULL,
-    key VARCHAR(255) NOT NULL,
-    value TEXT,
-    FOREIGN KEY (image_id) REFERENCES images (id) ON DELETE CASCADE
-);
-CREATE TABLE images_source (
-    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
-    image_id INTEGER NOT NULL,
-    server TEXT NOT NULL,
-    protocol INTEGER NOT NULL,
-    certificate TEXT NOT NULL,
-    alias VARCHAR(255) NOT NULL,
-    FOREIGN KEY (image_id) REFERENCES images (id) ON DELETE CASCADE
-);
 CREATE TABLE patches (
     id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
     name VARCHAR(255) NOT NULL,
     applied_at DATETIME NOT NULL,
     UNIQUE (name)
 );
-CREATE TABLE profiles (
-    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
-    name VARCHAR(255) NOT NULL,
-    description TEXT,
-    UNIQUE (name)
-);
 CREATE TABLE profiles_config (
     id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
     profile_id INTEGER NOT NULL,
diff --git a/lxd/db/node/update.go b/lxd/db/node/update.go
index 1153ecf8f..d4ce9efea 100644
--- a/lxd/db/node/update.go
+++ b/lxd/db/node/update.go
@@ -119,8 +119,18 @@ CREATE TABLE raft_nodes (
 );
 DELETE FROM config WHERE NOT key='core.https_address';
 DROP TABLE certificates;
+DROP TABLE containers_devices_config;
+DROP TABLE containers_devices;
+DROP TABLE containers_config;
+DROP TABLE containers_profiles;
+DROP TABLE containers;
+DROP TABLE images_aliases;
+DROP TABLE images_properties;
+DROP TABLE images_source;
+DROP TABLE images;
 DROP TABLE networks_config;
 DROP TABLE networks;
+DROP TABLE profiles;
 DROP TABLE storage_volumes_config;
 DROP TABLE storage_volumes;
 DROP TABLE storage_pools_config;
diff --git a/lxd/db/profiles.go b/lxd/db/profiles.go
index 61bbd386f..bddfb317c 100644
--- a/lxd/db/profiles.go
+++ b/lxd/db/profiles.go
@@ -11,12 +11,12 @@ import (
 )
 
 // Profiles returns a string list of profiles.
-func (n *Node) Profiles() ([]string, error) {
+func (c *Cluster) Profiles() ([]string, error) {
 	q := fmt.Sprintf("SELECT name FROM profiles")
 	inargs := []interface{}{}
 	var name string
 	outfmt := []interface{}{name}
-	result, err := queryScan(n.db, q, inargs, outfmt)
+	result, err := queryScan(c.db, q, inargs, outfmt)
 	if err != nil {
 		return []string{}, err
 	}
@@ -29,24 +29,24 @@ func (n *Node) Profiles() ([]string, error) {
 	return response, nil
 }
 
-func (n *Node) ProfileGet(name string) (int64, *api.Profile, error) {
+func (c *Cluster) ProfileGet(name string) (int64, *api.Profile, error) {
 	id := int64(-1)
 	description := sql.NullString{}
 
 	q := "SELECT id, description FROM profiles WHERE name=?"
 	arg1 := []interface{}{name}
 	arg2 := []interface{}{&id, &description}
-	err := dbQueryRowScan(n.db, q, arg1, arg2)
+	err := dbQueryRowScan(c.db, q, arg1, arg2)
 	if err != nil {
 		return -1, nil, err
 	}
 
-	config, err := n.ProfileConfig(name)
+	config, err := c.ProfileConfig(name)
 	if err != nil {
 		return -1, nil, err
 	}
 
-	devices, err := n.Devices(name, true)
+	devices, err := c.Devices(name, true)
 	if err != nil {
 		return -1, nil, err
 	}
@@ -62,10 +62,10 @@ func (n *Node) ProfileGet(name string) (int64, *api.Profile, error) {
 	return id, &profile, nil
 }
 
-func (n *Node) ProfileCreate(profile string, description string, config map[string]string,
+func (c *Cluster) ProfileCreate(profile string, description string, config map[string]string,
 	devices types.Devices) (int64, error) {
 
-	tx, err := begin(n.db)
+	tx, err := begin(c.db)
 	if err != nil {
 		return -1, err
 	}
@@ -100,15 +100,15 @@ func (n *Node) ProfileCreate(profile string, description string, config map[stri
 	return id, nil
 }
 
-func (n *Node) ProfileCreateDefault() error {
-	id, _, _ := n.ProfileGet("default")
+func (c *Cluster) ProfileCreateDefault() error {
+	id, _, _ := c.ProfileGet("default")
 
 	if id != -1 {
 		// default profile already exists
 		return nil
 	}
 
-	_, err := n.ProfileCreate("default", "Default LXD profile", map[string]string{}, types.Devices{})
+	_, err := c.ProfileCreate("default", "Default LXD profile", map[string]string{}, types.Devices{})
 	if err != nil {
 		return err
 	}
@@ -117,7 +117,7 @@ func (n *Node) ProfileCreateDefault() error {
 }
 
 // Get the profile configuration map from the DB
-func (n *Node) ProfileConfig(name string) (map[string]string, error) {
+func (c *Cluster) ProfileConfig(name string) (map[string]string, error) {
 	var key, value string
 	query := `
         SELECT
@@ -127,7 +127,7 @@ func (n *Node) ProfileConfig(name string) (map[string]string, error) {
 		WHERE name=?`
 	inargs := []interface{}{name}
 	outfmt := []interface{}{key, value}
-	results, err := queryScan(n.db, query, inargs, outfmt)
+	results, err := queryScan(c.db, query, inargs, outfmt)
 	if err != nil {
 		return nil, fmt.Errorf("Failed to get profile '%s'", name)
 	}
@@ -139,7 +139,7 @@ func (n *Node) ProfileConfig(name string) (map[string]string, error) {
 		 */
 		query := "SELECT id FROM profiles WHERE name=?"
 		var id int
-		results, err := queryScan(n.db, query, []interface{}{name}, []interface{}{id})
+		results, err := queryScan(c.db, query, []interface{}{name}, []interface{}{id})
 		if err != nil {
 			return nil, err
 		}
@@ -161,13 +161,13 @@ func (n *Node) ProfileConfig(name string) (map[string]string, error) {
 	return config, nil
 }
 
-func (n *Node) ProfileDelete(name string) error {
-	id, _, err := n.ProfileGet(name)
+func (c *Cluster) ProfileDelete(name string) error {
+	id, _, err := c.ProfileGet(name)
 	if err != nil {
 		return err
 	}
 
-	_, err = exec(n.db, "DELETE FROM profiles WHERE id=?", id)
+	_, err = exec(c.db, "DELETE FROM profiles WHERE id=?", id)
 	if err != nil {
 		return err
 	}
@@ -175,8 +175,8 @@ func (n *Node) ProfileDelete(name string) error {
 	return nil
 }
 
-func (n *Node) ProfileUpdate(name string, newName string) error {
-	tx, err := begin(n.db)
+func (c *Cluster) ProfileUpdate(name string, newName string) error {
+	tx, err := begin(c.db)
 	if err != nil {
 		return err
 	}
@@ -236,7 +236,7 @@ func ProfileConfigAdd(tx *sql.Tx, id int64, config map[string]string) error {
 	return nil
 }
 
-func (n *Node) ProfileContainersGet(profile string) ([]string, error) {
+func (c *Cluster) ProfileContainersGet(profile string) ([]string, error) {
 	q := `SELECT containers.name FROM containers JOIN containers_profiles
 		ON containers.id == containers_profiles.container_id
 		JOIN profiles ON containers_profiles.profile_id == profiles.id
@@ -247,7 +247,7 @@ func (n *Node) ProfileContainersGet(profile string) ([]string, error) {
 	var name string
 	outfmt := []interface{}{name}
 
-	output, err := queryScan(n.db, q, inargs, outfmt)
+	output, err := queryScan(c.db, q, inargs, outfmt)
 	if err != nil {
 		return results, err
 	}
@@ -259,13 +259,13 @@ func (n *Node) ProfileContainersGet(profile string) ([]string, error) {
 	return results, nil
 }
 
-func (n *Node) ProfileCleanupLeftover() error {
+func (c *Cluster) ProfileCleanupLeftover() error {
 	stmt := `
 DELETE FROM profiles_config WHERE profile_id NOT IN (SELECT id FROM profiles);
 DELETE FROM profiles_devices WHERE profile_id NOT IN (SELECT id FROM profiles);
 DELETE FROM profiles_devices_config WHERE profile_device_id NOT IN (SELECT id FROM profiles_devices);
 `
-	_, err := n.db.Exec(stmt)
+	_, err := c.db.Exec(stmt)
 	if err != nil {
 		return err
 	}
diff --git a/lxd/devices.go b/lxd/devices.go
index 99d0a7e7a..978bb4656 100644
--- a/lxd/devices.go
+++ b/lxd/devices.go
@@ -604,7 +604,7 @@ func deviceTaskBalance(s *state.State) {
 	}
 
 	// Iterate through the containers
-	containers, err := s.Node.ContainersList(db.CTypeRegular)
+	containers, err := s.Cluster.ContainersList(db.CTypeRegular)
 	if err != nil {
 		logger.Error("problem loading containers list", log.Ctx{"err": err})
 		return
@@ -730,7 +730,7 @@ func deviceNetworkPriority(s *state.State, netif string) {
 		return
 	}
 
-	containers, err := s.Node.ContainersList(db.CTypeRegular)
+	containers, err := s.Cluster.ContainersList(db.CTypeRegular)
 	if err != nil {
 		return
 	}
@@ -761,7 +761,7 @@ func deviceNetworkPriority(s *state.State, netif string) {
 }
 
 func deviceUSBEvent(s *state.State, usb usbDevice) {
-	containers, err := s.Node.ContainersList(db.CTypeRegular)
+	containers, err := s.Cluster.ContainersList(db.CTypeRegular)
 	if err != nil {
 		logger.Error("problem loading containers list", log.Ctx{"err": err})
 		return
diff --git a/lxd/devlxd.go b/lxd/devlxd.go
index b1cb16ea9..7dd0b1d61 100644
--- a/lxd/devlxd.go
+++ b/lxd/devlxd.go
@@ -348,7 +348,7 @@ func findContainerForPid(pid int32, d *Daemon) (container, error) {
 		return nil, err
 	}
 
-	containers, err := d.db.ContainersList(db.CTypeRegular)
+	containers, err := d.cluster.ContainersList(db.CTypeRegular)
 	if err != nil {
 		return nil, err
 	}
diff --git a/lxd/images.go b/lxd/images.go
index cd7916969..5697637a4 100644
--- a/lxd/images.go
+++ b/lxd/images.go
@@ -300,7 +300,7 @@ func imgPostContInfo(d *Daemon, r *http.Request, req api.ImagesPost, builddir st
 
 	info.Fingerprint = fmt.Sprintf("%x", sha256.Sum(nil))
 
-	_, _, err = d.db.ImageGet(info.Fingerprint, false, true)
+	_, _, err = d.cluster.ImageGet(info.Fingerprint, false, true)
 	if err == nil {
 		return nil, fmt.Errorf("The image already exists: %s", info.Fingerprint)
 	}
@@ -316,7 +316,7 @@ func imgPostContInfo(d *Daemon, r *http.Request, req api.ImagesPost, builddir st
 	info.Properties = req.Properties
 
 	// Create the database entry
-	err = d.db.ImageInsert(info.Fingerprint, info.Filename, info.Size, info.Public, info.AutoUpdate, info.Architecture, info.CreatedAt, info.ExpiresAt, info.Properties)
+	err = d.cluster.ImageInsert(info.Fingerprint, info.Filename, info.Size, info.Public, info.AutoUpdate, info.Architecture, info.CreatedAt, info.ExpiresAt, info.Properties)
 	if err != nil {
 		return nil, err
 	}
@@ -341,7 +341,7 @@ func imgPostRemoteInfo(d *Daemon, req api.ImagesPost, op *operation) (*api.Image
 		return nil, err
 	}
 
-	id, info, err := d.db.ImageGet(info.Fingerprint, false, true)
+	id, info, err := d.cluster.ImageGet(info.Fingerprint, false, true)
 	if err != nil {
 		return nil, err
 	}
@@ -353,7 +353,7 @@ func imgPostRemoteInfo(d *Daemon, req api.ImagesPost, op *operation) (*api.Image
 
 	// Update the DB record if needed
 	if req.Public || req.AutoUpdate || req.Filename != "" || len(req.Properties) > 0 {
-		err = d.db.ImageUpdate(id, req.Filename, info.Size, req.Public, req.AutoUpdate, info.Architecture, info.CreatedAt, info.ExpiresAt, info.Properties)
+		err = d.cluster.ImageUpdate(id, req.Filename, info.Size, req.Public, req.AutoUpdate, info.Architecture, info.CreatedAt, info.ExpiresAt, info.Properties)
 		if err != nil {
 			return nil, err
 		}
@@ -410,7 +410,7 @@ func imgPostURLInfo(d *Daemon, req api.ImagesPost, op *operation) (*api.Image, e
 		return nil, err
 	}
 
-	id, info, err := d.db.ImageGet(info.Fingerprint, false, false)
+	id, info, err := d.cluster.ImageGet(info.Fingerprint, false, false)
 	if err != nil {
 		return nil, err
 	}
@@ -421,7 +421,7 @@ func imgPostURLInfo(d *Daemon, req api.ImagesPost, op *operation) (*api.Image, e
 	}
 
 	if req.Public || req.AutoUpdate || req.Filename != "" || len(req.Properties) > 0 {
-		err = d.db.ImageUpdate(id, req.Filename, info.Size, req.Public, req.AutoUpdate, info.Architecture, info.CreatedAt, info.ExpiresAt, info.Properties)
+		err = d.cluster.ImageUpdate(id, req.Filename, info.Size, req.Public, req.AutoUpdate, info.Architecture, info.CreatedAt, info.ExpiresAt, info.Properties)
 		if err != nil {
 			return nil, err
 		}
@@ -618,7 +618,7 @@ func getImgPostInfo(d *Daemon, r *http.Request, builddir string, post *os.File)
 	}
 
 	// Check if the image already exists
-	exists, err := d.db.ImageExists(info.Fingerprint)
+	exists, err := d.cluster.ImageExists(info.Fingerprint)
 	if err != nil {
 		return nil, err
 	}
@@ -626,7 +626,7 @@ func getImgPostInfo(d *Daemon, r *http.Request, builddir string, post *os.File)
 		return nil, fmt.Errorf("Image with same fingerprint already exists")
 	}
 	// Create the database entry
-	err = d.db.ImageInsert(info.Fingerprint, info.Filename, info.Size, info.Public, info.AutoUpdate, info.Architecture, info.CreatedAt, info.ExpiresAt, info.Properties)
+	err = d.cluster.ImageInsert(info.Fingerprint, info.Filename, info.Size, info.Public, info.AutoUpdate, info.Architecture, info.CreatedAt, info.ExpiresAt, info.Properties)
 	if err != nil {
 		return nil, err
 	}
@@ -741,17 +741,17 @@ func imagesPost(d *Daemon, r *http.Request) Response {
 
 		// Apply any provided alias
 		for _, alias := range req.Aliases {
-			_, _, err := d.db.ImageAliasGet(alias.Name, true)
+			_, _, err := d.cluster.ImageAliasGet(alias.Name, true)
 			if err == nil {
 				return fmt.Errorf("Alias already exists: %s", alias.Name)
 			}
 
-			id, _, err := d.db.ImageGet(info.Fingerprint, false, false)
+			id, _, err := d.cluster.ImageGet(info.Fingerprint, false, false)
 			if err != nil {
 				return err
 			}
 
-			err = d.db.ImageAliasAdd(alias.Name, id, alias.Description)
+			err = d.cluster.ImageAliasAdd(alias.Name, id, alias.Description)
 			if err != nil {
 				return err
 			}
@@ -817,7 +817,7 @@ func getImageMetadata(fname string) (*api.ImageMetadata, error) {
 }
 
 func doImagesGet(d *Daemon, recursion bool, public bool) (interface{}, error) {
-	results, err := d.db.ImagesGet(public)
+	results, err := d.cluster.ImagesGet(public)
 	if err != nil {
 		return []string{}, err
 	}
@@ -830,7 +830,7 @@ func doImagesGet(d *Daemon, recursion bool, public bool) (interface{}, error) {
 			url := fmt.Sprintf("/%s/images/%s", version.APIVersion, name)
 			resultString[i] = url
 		} else {
-			image, response := doImageGet(d.db, name, public)
+			image, response := doImageGet(d.cluster, name, public)
 			if response != nil {
 				continue
 			}
@@ -884,14 +884,14 @@ func autoUpdateImagesTask(d *Daemon) (task.Func, task.Schedule) {
 func autoUpdateImages(ctx context.Context, d *Daemon) {
 	logger.Infof("Updating images")
 
-	images, err := d.db.ImagesGet(false)
+	images, err := d.cluster.ImagesGet(false)
 	if err != nil {
 		logger.Error("Unable to retrieve the list of images", log.Ctx{"err": err})
 		return
 	}
 
 	for _, fingerprint := range images {
-		id, info, err := d.db.ImageGet(fingerprint, false, true)
+		id, info, err := d.cluster.ImageGet(fingerprint, false, true)
 		if err != nil {
 			logger.Error("Error loading image", log.Ctx{"err": err, "fp": fingerprint})
 			continue
@@ -923,7 +923,7 @@ func autoUpdateImages(ctx context.Context, d *Daemon) {
 // Returns whether the image has been updated.
 func autoUpdateImage(d *Daemon, op *operation, id int, info *api.Image) error {
 	fingerprint := info.Fingerprint
-	_, source, err := d.db.ImageSourceGet(id)
+	_, source, err := d.cluster.ImageSourceGet(id)
 	if err != nil {
 		logger.Error("Error getting source image", log.Ctx{"err": err, "fp": fingerprint})
 		return err
@@ -938,7 +938,7 @@ func autoUpdateImage(d *Daemon, op *operation, id int, info *api.Image) error {
 	}
 
 	// Translate the IDs to poolNames.
-	poolNames, err := d.db.ImageGetPoolNamesFromIDs(poolIDs)
+	poolNames, err := d.cluster.ImageGetPoolNamesFromIDs(poolIDs)
 	if err != nil {
 		logger.Error("Error getting image pools", log.Ctx{"err": err, "fp": fingerprint})
 		return err
@@ -977,27 +977,27 @@ func autoUpdateImage(d *Daemon, op *operation, id int, info *api.Image) error {
 			continue
 		}
 
-		newId, _, err := d.db.ImageGet(hash, false, true)
+		newId, _, err := d.cluster.ImageGet(hash, false, true)
 		if err != nil {
 			logger.Error("Error loading image", log.Ctx{"err": err, "fp": hash})
 			continue
 		}
 
 		if info.Cached {
-			err = d.db.ImageLastAccessInit(hash)
+			err = d.cluster.ImageLastAccessInit(hash)
 			if err != nil {
 				logger.Error("Error setting cached flag", log.Ctx{"err": err, "fp": hash})
 				continue
 			}
 		}
 
-		err = d.db.ImageLastAccessUpdate(hash, info.LastUsedAt)
+		err = d.cluster.ImageLastAccessUpdate(hash, info.LastUsedAt)
 		if err != nil {
 			logger.Error("Error setting last use date", log.Ctx{"err": err, "fp": hash})
 			continue
 		}
 
-		err = d.db.ImageAliasesMove(id, newId)
+		err = d.cluster.ImageAliasesMove(id, newId)
 		if err != nil {
 			logger.Error("Error moving aliases", log.Ctx{"err": err, "fp": hash})
 			continue
@@ -1038,7 +1038,7 @@ func autoUpdateImage(d *Daemon, op *operation, id int, info *api.Image) error {
 	}
 
 	// Remove the database entry for the image.
-	if err = d.db.ImageDelete(id); err != nil {
+	if err = d.cluster.ImageDelete(id); err != nil {
 		logger.Debugf("Error deleting image from database %s: %s", fname, err)
 	}
 
@@ -1094,7 +1094,7 @@ func pruneExpiredImages(ctx context.Context, d *Daemon) {
 	}
 
 	// Get the list of expired images.
-	images, err := d.db.ImagesGetExpired(expiry)
+	images, err := d.cluster.ImagesGetExpired(expiry)
 	if err != nil {
 		logger.Error("Unable to retrieve the list of expired images", log.Ctx{"err": err})
 		return
@@ -1119,7 +1119,7 @@ func pruneExpiredImages(ctx context.Context, d *Daemon) {
 		}
 
 		// Translate the IDs to poolNames.
-		poolNames, err := d.db.ImageGetPoolNamesFromIDs(poolIDs)
+		poolNames, err := d.cluster.ImageGetPoolNamesFromIDs(poolIDs)
 		if err != nil {
 			continue
 		}
@@ -1150,13 +1150,13 @@ func pruneExpiredImages(ctx context.Context, d *Daemon) {
 			}
 		}
 
-		imgID, _, err := d.db.ImageGet(fp, false, false)
+		imgID, _, err := d.cluster.ImageGet(fp, false, false)
 		if err != nil {
 			logger.Debugf("Error retrieving image info %s: %s", fp, err)
 		}
 
 		// Remove the database entry for the image.
-		if err = d.db.ImageDelete(imgID); err != nil {
+		if err = d.cluster.ImageDelete(imgID); err != nil {
 			logger.Debugf("Error deleting image %s from database: %s", fp, err)
 		}
 	}
@@ -1186,7 +1186,7 @@ func imageDelete(d *Daemon, r *http.Request) Response {
 	deleteFromAllPools := func() error {
 		// Use the fingerprint we received in a LIKE query and use the full
 		// fingerprint we receive from the database in all further queries.
-		imgID, imgInfo, err := d.db.ImageGet(fingerprint, false, false)
+		imgID, imgInfo, err := d.cluster.ImageGet(fingerprint, false, false)
 		if err != nil {
 			return err
 		}
@@ -1196,7 +1196,7 @@ func imageDelete(d *Daemon, r *http.Request) Response {
 			return err
 		}
 
-		pools, err := d.db.ImageGetPoolNamesFromIDs(poolIDs)
+		pools, err := d.cluster.ImageGetPoolNamesFromIDs(poolIDs)
 		if err != nil {
 			return err
 		}
@@ -1227,7 +1227,7 @@ func imageDelete(d *Daemon, r *http.Request) Response {
 		}
 
 		// Remove the database entry for the image.
-		return d.db.ImageDelete(imgID)
+		return d.cluster.ImageDelete(imgID)
 	}
 
 	rmimg := func(op *operation) error {
@@ -1245,7 +1245,7 @@ func imageDelete(d *Daemon, r *http.Request) Response {
 	return OperationResponse(op)
 }
 
-func doImageGet(db *db.Node, fingerprint string, public bool) (*api.Image, Response) {
+func doImageGet(db *db.Cluster, fingerprint string, public bool) (*api.Image, Response) {
 	_, imgInfo, err := db.ImageGet(fingerprint, public, false)
 	if err != nil {
 		return nil, SmartError(err)
@@ -1289,7 +1289,7 @@ func imageGet(d *Daemon, r *http.Request) Response {
 	public := d.checkTrustedClient(r) != nil
 	secret := r.FormValue("secret")
 
-	info, response := doImageGet(d.db, fingerprint, false)
+	info, response := doImageGet(d.cluster, fingerprint, false)
 	if response != nil {
 		return response
 	}
@@ -1305,7 +1305,7 @@ func imageGet(d *Daemon, r *http.Request) Response {
 func imagePut(d *Daemon, r *http.Request) Response {
 	// Get current value
 	fingerprint := mux.Vars(r)["fingerprint"]
-	id, info, err := d.db.ImageGet(fingerprint, false, false)
+	id, info, err := d.cluster.ImageGet(fingerprint, false, false)
 	if err != nil {
 		return SmartError(err)
 	}
@@ -1322,7 +1322,7 @@ func imagePut(d *Daemon, r *http.Request) Response {
 		return BadRequest(err)
 	}
 
-	err = d.db.ImageUpdate(id, info.Filename, info.Size, req.Public, req.AutoUpdate, info.Architecture, info.CreatedAt, info.ExpiresAt, req.Properties)
+	err = d.cluster.ImageUpdate(id, info.Filename, info.Size, req.Public, req.AutoUpdate, info.Architecture, info.CreatedAt, info.ExpiresAt, req.Properties)
 	if err != nil {
 		return SmartError(err)
 	}
@@ -1333,7 +1333,7 @@ func imagePut(d *Daemon, r *http.Request) Response {
 func imagePatch(d *Daemon, r *http.Request) Response {
 	// Get current value
 	fingerprint := mux.Vars(r)["fingerprint"]
-	id, info, err := d.db.ImageGet(fingerprint, false, false)
+	id, info, err := d.cluster.ImageGet(fingerprint, false, false)
 	if err != nil {
 		return SmartError(err)
 	}
@@ -1388,7 +1388,7 @@ func imagePatch(d *Daemon, r *http.Request) Response {
 		info.Properties = properties
 	}
 
-	err = d.db.ImageUpdate(id, info.Filename, info.Size, info.Public, info.AutoUpdate, info.Architecture, info.CreatedAt, info.ExpiresAt, info.Properties)
+	err = d.cluster.ImageUpdate(id, info.Filename, info.Size, info.Public, info.AutoUpdate, info.Architecture, info.CreatedAt, info.ExpiresAt, info.Properties)
 	if err != nil {
 		return SmartError(err)
 	}
@@ -1409,17 +1409,17 @@ func aliasesPost(d *Daemon, r *http.Request) Response {
 	}
 
 	// This is just to see if the alias name already exists.
-	_, _, err := d.db.ImageAliasGet(req.Name, true)
+	_, _, err := d.cluster.ImageAliasGet(req.Name, true)
 	if err == nil {
 		return Conflict
 	}
 
-	id, _, err := d.db.ImageGet(req.Target, false, false)
+	id, _, err := d.cluster.ImageGet(req.Target, false, false)
 	if err != nil {
 		return SmartError(err)
 	}
 
-	err = d.db.ImageAliasAdd(req.Name, id, req.Description)
+	err = d.cluster.ImageAliasAdd(req.Name, id, req.Description)
 	if err != nil {
 		return SmartError(err)
 	}
@@ -1430,7 +1430,7 @@ func aliasesPost(d *Daemon, r *http.Request) Response {
 func aliasesGet(d *Daemon, r *http.Request) Response {
 	recursion := util.IsRecursionRequest(r)
 
-	names, err := d.db.ImageAliasesGet()
+	names, err := d.cluster.ImageAliasesGet()
 	if err != nil {
 		return BadRequest(err)
 	}
@@ -1442,7 +1442,7 @@ func aliasesGet(d *Daemon, r *http.Request) Response {
 			responseStr = append(responseStr, url)
 
 		} else {
-			_, alias, err := d.db.ImageAliasGet(name, d.checkTrustedClient(r) == nil)
+			_, alias, err := d.cluster.ImageAliasGet(name, d.checkTrustedClient(r) == nil)
 			if err != nil {
 				continue
 			}
@@ -1460,7 +1460,7 @@ func aliasesGet(d *Daemon, r *http.Request) Response {
 func aliasGet(d *Daemon, r *http.Request) Response {
 	name := mux.Vars(r)["name"]
 
-	_, alias, err := d.db.ImageAliasGet(name, d.checkTrustedClient(r) == nil)
+	_, alias, err := d.cluster.ImageAliasGet(name, d.checkTrustedClient(r) == nil)
 	if err != nil {
 		return SmartError(err)
 	}
@@ -1470,12 +1470,12 @@ func aliasGet(d *Daemon, r *http.Request) Response {
 
 func aliasDelete(d *Daemon, r *http.Request) Response {
 	name := mux.Vars(r)["name"]
-	_, _, err := d.db.ImageAliasGet(name, true)
+	_, _, err := d.cluster.ImageAliasGet(name, true)
 	if err != nil {
 		return SmartError(err)
 	}
 
-	err = d.db.ImageAliasDelete(name)
+	err = d.cluster.ImageAliasDelete(name)
 	if err != nil {
 		return SmartError(err)
 	}
@@ -1486,7 +1486,7 @@ func aliasDelete(d *Daemon, r *http.Request) Response {
 func aliasPut(d *Daemon, r *http.Request) Response {
 	// Get current value
 	name := mux.Vars(r)["name"]
-	id, alias, err := d.db.ImageAliasGet(name, true)
+	id, alias, err := d.cluster.ImageAliasGet(name, true)
 	if err != nil {
 		return SmartError(err)
 	}
@@ -1506,12 +1506,12 @@ func aliasPut(d *Daemon, r *http.Request) Response {
 		return BadRequest(fmt.Errorf("The target field is required"))
 	}
 
-	imageId, _, err := d.db.ImageGet(req.Target, false, false)
+	imageId, _, err := d.cluster.ImageGet(req.Target, false, false)
 	if err != nil {
 		return SmartError(err)
 	}
 
-	err = d.db.ImageAliasUpdate(id, imageId, req.Description)
+	err = d.cluster.ImageAliasUpdate(id, imageId, req.Description)
 	if err != nil {
 		return SmartError(err)
 	}
@@ -1522,7 +1522,7 @@ func aliasPut(d *Daemon, r *http.Request) Response {
 func aliasPatch(d *Daemon, r *http.Request) Response {
 	// Get current value
 	name := mux.Vars(r)["name"]
-	id, alias, err := d.db.ImageAliasGet(name, true)
+	id, alias, err := d.cluster.ImageAliasGet(name, true)
 	if err != nil {
 		return SmartError(err)
 	}
@@ -1558,12 +1558,12 @@ func aliasPatch(d *Daemon, r *http.Request) Response {
 		alias.Description = description
 	}
 
-	imageId, _, err := d.db.ImageGet(alias.Target, false, false)
+	imageId, _, err := d.cluster.ImageGet(alias.Target, false, false)
 	if err != nil {
 		return SmartError(err)
 	}
 
-	err = d.db.ImageAliasUpdate(id, imageId, alias.Description)
+	err = d.cluster.ImageAliasUpdate(id, imageId, alias.Description)
 	if err != nil {
 		return SmartError(err)
 	}
@@ -1580,17 +1580,17 @@ func aliasPost(d *Daemon, r *http.Request) Response {
 	}
 
 	// Check that the name isn't already in use
-	id, _, _ := d.db.ImageAliasGet(req.Name, true)
+	id, _, _ := d.cluster.ImageAliasGet(req.Name, true)
 	if id > 0 {
 		return Conflict
 	}
 
-	id, _, err := d.db.ImageAliasGet(name, true)
+	id, _, err := d.cluster.ImageAliasGet(name, true)
 	if err != nil {
 		return SmartError(err)
 	}
 
-	err = d.db.ImageAliasRename(id, req.Name)
+	err = d.cluster.ImageAliasRename(id, req.Name)
 	if err != nil {
 		return SmartError(err)
 	}
@@ -1604,7 +1604,7 @@ func imageExport(d *Daemon, r *http.Request) Response {
 	public := d.checkTrustedClient(r) != nil
 	secret := r.FormValue("secret")
 
-	_, imgInfo, err := d.db.ImageGet(fingerprint, false, false)
+	_, imgInfo, err := d.cluster.ImageGet(fingerprint, false, false)
 	if err != nil {
 		return SmartError(err)
 	}
@@ -1654,7 +1654,7 @@ func imageExport(d *Daemon, r *http.Request) Response {
 
 func imageSecret(d *Daemon, r *http.Request) Response {
 	fingerprint := mux.Vars(r)["fingerprint"]
-	_, imgInfo, err := d.db.ImageGet(fingerprint, false, false)
+	_, imgInfo, err := d.cluster.ImageGet(fingerprint, false, false)
 	if err != nil {
 		return SmartError(err)
 	}
@@ -1681,7 +1681,7 @@ func imageSecret(d *Daemon, r *http.Request) Response {
 
 func imageRefresh(d *Daemon, r *http.Request) Response {
 	fingerprint := mux.Vars(r)["fingerprint"]
-	imageId, imageInfo, err := d.db.ImageGet(fingerprint, false, false)
+	imageId, imageInfo, err := d.cluster.ImageGet(fingerprint, false, false)
 	if err != nil {
 		return SmartError(err)
 	}
diff --git a/lxd/logging.go b/lxd/logging.go
index 8a0856f13..3408cd683 100644
--- a/lxd/logging.go
+++ b/lxd/logging.go
@@ -41,7 +41,7 @@ func expireLogs(ctx context.Context, state *state.State) error {
 	var containers []string
 	ch := make(chan struct{})
 	go func() {
-		containers, err = state.Node.ContainersList(db.CTypeRegular)
+		containers, err = state.Cluster.ContainersList(db.CTypeRegular)
 		ch <- struct{}{}
 	}()
 	select {
diff --git a/lxd/main_activateifneeded.go b/lxd/main_activateifneeded.go
index 5b43da9dc..7772c84f6 100644
--- a/lxd/main_activateifneeded.go
+++ b/lxd/main_activateifneeded.go
@@ -6,14 +6,20 @@ import (
 	"os"
 	"path/filepath"
 
+	"github.com/CanonicalLtd/go-sqlite3x"
 	"github.com/lxc/lxd/client"
 	"github.com/lxc/lxd/lxd/db"
 	"github.com/lxc/lxd/lxd/node"
 	"github.com/lxc/lxd/shared"
 	"github.com/lxc/lxd/shared/idmap"
 	"github.com/lxc/lxd/shared/logger"
+	"github.com/mattn/go-sqlite3"
 )
 
+func init() {
+	sql.Register("dqlite_direct_access", &sqlite3.SQLiteDriver{ConnectHook: sqliteDirectAccess})
+}
+
 func cmdActivateIfNeeded(args *Args) error {
 	// Only root should run this
 	if os.Geteuid() != 0 {
@@ -56,14 +62,23 @@ func cmdActivateIfNeeded(args *Args) error {
 	}
 
 	// Look for auto-started or previously started containers
-	result, err := d.db.ContainersList(db.CTypeRegular)
+	path := filepath.Join(d.os.VarDir, "raft", "db.bin")
+	if !shared.PathExists(path) {
+		logger.Debugf("No DB, so no need to start the daemon now.")
+		return nil
+	}
+	sqldb, err = sql.Open("dqlite_direct_access", path+"?mode=ro")
 	if err != nil {
 		return err
 	}
 
+	d.cluster = db.ForLocalInspection(sqldb)
+	result, err := d.cluster.ContainersList(db.CTypeRegular)
+
 	for _, name := range result {
 		c, err := containerLoadByName(d.State(), name)
 		if err != nil {
+			sqldb.Close()
 			return err
 		}
 
@@ -72,18 +87,45 @@ func cmdActivateIfNeeded(args *Args) error {
 		autoStart := config["boot.autostart"]
 
 		if c.IsRunning() {
+			sqldb.Close()
 			logger.Debugf("Daemon has running containers, activating...")
 			_, err := lxd.ConnectLXDUnix("", nil)
 			return err
 		}
 
 		if lastState == "RUNNING" || lastState == "Running" || shared.IsTrue(autoStart) {
+			sqldb.Close()
 			logger.Debugf("Daemon has auto-started containers, activating...")
 			_, err := lxd.ConnectLXDUnix("", nil)
 			return err
 		}
 	}
 
+	sqldb.Close()
 	logger.Debugf("No need to start the daemon now.")
 	return nil
 }
+
+// Configure the sqlite connection so that it's safe to access the
+// dqlite-managed sqlite file, also without setting up raft.
+func sqliteDirectAccess(conn *sqlite3.SQLiteConn) error {
+	// Ensure journal mode is set to WAL, as this is a requirement for
+	// replication.
+	err := sqlite3x.JournalModePragma(conn, sqlite3x.JournalWal)
+	if err != nil {
+		return err
+	}
+
+	// Ensure we don't truncate or checkpoint the WAL on exit, as this
+	// would bork replication which must be in full control of the WAL
+	// file.
+	err = sqlite3x.JournalSizeLimitPragma(conn, -1)
+	if err != nil {
+		return err
+	}
+	err = sqlite3x.DatabaseNoCheckpointOnClose(conn)
+	if err != nil {
+		return err
+	}
+	return nil
+}
diff --git a/lxd/main_sql.go b/lxd/main_sql.go
index e721633bb..f22f67375 100644
--- a/lxd/main_sql.go
+++ b/lxd/main_sql.go
@@ -35,7 +35,7 @@ func cmdSQL(args *Args) error {
 	if err != nil {
 		return err
 	}
-	if strings.HasPrefix(query, "SELECT") {
+	if strings.HasPrefix(strings.ToUpper(query), "SELECT") {
 		// Print results in tabular format
 		widths := make([]int, len(result.Columns))
 		for i, column := range result.Columns {
diff --git a/lxd/main_test.go b/lxd/main_test.go
index 4bed14446..0cf2ca693 100644
--- a/lxd/main_test.go
+++ b/lxd/main_test.go
@@ -82,12 +82,12 @@ func (suite *lxdTestSuite) SetupTest() {
 	devicesMap := map[string]map[string]string{}
 	devicesMap["root"] = rootDev
 
-	defaultID, _, err := suite.d.db.ProfileGet("default")
+	defaultID, _, err := suite.d.cluster.ProfileGet("default")
 	if err != nil {
 		suite.T().Fatalf("failed to get default profile: %v", err)
 	}
 
-	tx, err := suite.d.db.Begin()
+	tx, err := suite.d.cluster.Begin()
 	if err != nil {
 		suite.T().Fatalf("failed to begin transaction: %v", err)
 	}
diff --git a/lxd/networks.go b/lxd/networks.go
index e12bc5619..0b994187c 100644
--- a/lxd/networks.go
+++ b/lxd/networks.go
@@ -183,7 +183,7 @@ func doNetworkGet(d *Daemon, name string) (api.Network, error) {
 	n.Config = map[string]string{}
 
 	// Look for containers using the interface
-	cts, err := d.db.ContainersList(db.CTypeRegular)
+	cts, err := d.cluster.ContainersList(db.CTypeRegular)
 	if err != nil {
 		return api.Network{}, err
 	}
@@ -475,7 +475,7 @@ func (n *network) IsRunning() bool {
 
 func (n *network) IsUsed() bool {
 	// Look for containers using the interface
-	cts, err := n.db.ContainersList(db.CTypeRegular)
+	cts, err := n.state.Cluster.ContainersList(db.CTypeRegular)
 	if err != nil {
 		return true
 	}
diff --git a/lxd/networks_utils.go b/lxd/networks_utils.go
index 1de47c57a..ccef0ed59 100644
--- a/lxd/networks_utils.go
+++ b/lxd/networks_utils.go
@@ -744,7 +744,7 @@ func networkUpdateStatic(s *state.State, networkName string) error {
 	defer networkStaticLock.Unlock()
 
 	// Get all the containers
-	containers, err := s.Node.ContainersList(db.CTypeRegular)
+	containers, err := s.Cluster.ContainersList(db.CTypeRegular)
 	if err != nil {
 		return err
 	}
diff --git a/lxd/patches.go b/lxd/patches.go
index f8dbb2dd9..2384d15d5 100644
--- a/lxd/patches.go
+++ b/lxd/patches.go
@@ -107,11 +107,11 @@ func patchesApplyAll(d *Daemon) error {
 
 // Patches begin here
 func patchLeftoverProfileConfig(name string, d *Daemon) error {
-	return d.db.ProfileCleanupLeftover()
+	return d.cluster.ProfileCleanupLeftover()
 }
 
 func patchInvalidProfileNames(name string, d *Daemon) error {
-	profiles, err := d.db.Profiles()
+	profiles, err := d.cluster.Profiles()
 	if err != nil {
 		return err
 	}
@@ -119,7 +119,7 @@ func patchInvalidProfileNames(name string, d *Daemon) error {
 	for _, profile := range profiles {
 		if strings.Contains(profile, "/") || shared.StringInSlice(profile, []string{".", ".."}) {
 			logger.Info("Removing unreachable profile (invalid name)", log.Ctx{"name": profile})
-			err := d.db.ProfileDelete(profile)
+			err := d.cluster.ProfileDelete(profile)
 			if err != nil {
 				return err
 			}
@@ -208,25 +208,25 @@ func patchStorageApi(name string, d *Daemon) error {
 	// Check if this LXD instace currently has any containers, snapshots, or
 	// images configured. If so, we create a default storage pool in the
 	// database. Otherwise, the user will have to run LXD init.
-	cRegular, err := d.db.ContainersList(db.CTypeRegular)
+	cRegular, err := d.cluster.ContainersList(db.CTypeRegular)
 	if err != nil {
 		return err
 	}
 
 	// Get list of existing snapshots.
-	cSnapshots, err := d.db.ContainersList(db.CTypeSnapshot)
+	cSnapshots, err := d.cluster.ContainersList(db.CTypeSnapshot)
 	if err != nil {
 		return err
 	}
 
 	// Get list of existing public images.
-	imgPublic, err := d.db.ImagesGet(true)
+	imgPublic, err := d.cluster.ImagesGet(true)
 	if err != nil {
 		return err
 	}
 
 	// Get list of existing private images.
-	imgPrivate, err := d.db.ImagesGet(false)
+	imgPrivate, err := d.cluster.ImagesGet(false)
 	if err != nil {
 		return err
 	}
@@ -450,7 +450,7 @@ func upgradeFromStorageTypeBtrfs(name string, d *Daemon, defaultPoolName string,
 		}
 
 		// Check if we need to account for snapshots for this container.
-		ctSnapshots, err := d.db.ContainerGetSnapshots(ct)
+		ctSnapshots, err := d.cluster.ContainerGetSnapshots(ct)
 		if err != nil {
 			return err
 		}
@@ -1126,7 +1126,7 @@ func upgradeFromStorageTypeLvm(name string, d *Daemon, defaultPoolName string, d
 		}
 
 		// Check if we need to account for snapshots for this container.
-		ctSnapshots, err := d.db.ContainerGetSnapshots(ct)
+		ctSnapshots, err := d.cluster.ContainerGetSnapshots(ct)
 		if err != nil {
 			return err
 		}
@@ -1572,7 +1572,7 @@ func upgradeFromStorageTypeZfs(name string, d *Daemon, defaultPoolName string, d
 		}
 
 		// Check if we need to account for snapshots for this container.
-		ctSnapshots, err := d.db.ContainerGetSnapshots(ct)
+		ctSnapshots, err := d.cluster.ContainerGetSnapshots(ct)
 		if err != nil {
 			logger.Errorf("Failed to query database")
 			return err
@@ -1715,10 +1715,10 @@ func updatePoolPropertyForAllObjects(d *Daemon, poolName string, allcontainers [
 	// appropriate device including a pool is added to the default profile
 	// or the user explicitly passes the pool the container's storage volume
 	// is supposed to be created on.
-	profiles, err := d.db.Profiles()
+	profiles, err := d.cluster.Profiles()
 	if err == nil {
 		for _, pName := range profiles {
-			pID, p, err := d.db.ProfileGet(pName)
+			pID, p, err := d.cluster.ProfileGet(pName)
 			if err != nil {
 				logger.Errorf("Could not query database: %s.", err)
 				return err
@@ -1877,13 +1877,13 @@ func patchStorageApiV1(name string, d *Daemon) error {
 		return nil
 	}
 
-	cRegular, err := d.db.ContainersList(db.CTypeRegular)
+	cRegular, err := d.cluster.ContainersList(db.CTypeRegular)
 	if err != nil {
 		return err
 	}
 
 	// Get list of existing snapshots.
-	cSnapshots, err := d.db.ContainersList(db.CTypeSnapshot)
+	cSnapshots, err := d.cluster.ContainersList(db.CTypeSnapshot)
 	if err != nil {
 		return err
 	}
@@ -1898,7 +1898,7 @@ func patchStorageApiV1(name string, d *Daemon) error {
 }
 
 func patchStorageApiDirCleanup(name string, d *Daemon) error {
-	fingerprints, err := d.db.ImagesGet(false)
+	fingerprints, err := d.cluster.ImagesGet(false)
 	if err != nil {
 		return err
 	}
@@ -2481,18 +2481,18 @@ func patchStorageApiDirBindMount(name string, d *Daemon) error {
 }
 
 func patchFixUploadedAt(name string, d *Daemon) error {
-	images, err := d.db.ImagesGet(false)
+	images, err := d.cluster.ImagesGet(false)
 	if err != nil {
 		return err
 	}
 
 	for _, fingerprint := range images {
-		id, image, err := d.db.ImageGet(fingerprint, false, true)
+		id, image, err := d.cluster.ImageGet(fingerprint, false, true)
 		if err != nil {
 			return err
 		}
 
-		err = d.db.ImageUploadedAt(id, image.UploadedAt)
+		err = d.cluster.ImageUploadedAt(id, image.UploadedAt)
 		if err != nil {
 			return err
 		}
@@ -2542,7 +2542,7 @@ func patchStorageApiCephSizeRemove(name string, d *Daemon) error {
 }
 
 func patchDevicesNewNamingScheme(name string, d *Daemon) error {
-	cts, err := d.db.ContainersList(db.CTypeRegular)
+	cts, err := d.cluster.ContainersList(db.CTypeRegular)
 	if err != nil {
 		logger.Errorf("Failed to retrieve containers from database")
 		return err
@@ -2728,7 +2728,7 @@ func patchUpdateFromV10(d *Daemon) error {
 }
 
 func patchUpdateFromV11(d *Daemon) error {
-	cNames, err := d.db.ContainersList(db.CTypeSnapshot)
+	cNames, err := d.cluster.ContainersList(db.CTypeSnapshot)
 	if err != nil {
 		return err
 	}
@@ -2799,7 +2799,7 @@ func patchUpdateFromV15(d *Daemon) error {
 	// munge all LVM-backed containers' LV names to match what is
 	// required for snapshot support
 
-	cNames, err := d.db.ContainersList(db.CTypeRegular)
+	cNames, err := d.cluster.ContainersList(db.CTypeRegular)
 	if err != nil {
 		return err
 	}
diff --git a/lxd/profiles.go b/lxd/profiles.go
index 6bafb47f0..561633881 100644
--- a/lxd/profiles.go
+++ b/lxd/profiles.go
@@ -23,7 +23,7 @@ import (
 
 /* This is used for both profiles post and profile put */
 func profilesGet(d *Daemon, r *http.Request) Response {
-	results, err := d.db.Profiles()
+	results, err := d.cluster.Profiles()
 	if err != nil {
 		return SmartError(err)
 	}
@@ -66,7 +66,7 @@ func profilesPost(d *Daemon, r *http.Request) Response {
 		return BadRequest(fmt.Errorf("No name provided"))
 	}
 
-	_, profile, _ := d.db.ProfileGet(req.Name)
+	_, profile, _ := d.cluster.ProfileGet(req.Name)
 	if profile != nil {
 		return BadRequest(fmt.Errorf("The profile already exists"))
 	}
@@ -90,7 +90,7 @@ func profilesPost(d *Daemon, r *http.Request) Response {
 	}
 
 	// Update DB entry
-	_, err = d.db.ProfileCreate(req.Name, req.Description, req.Config, req.Devices)
+	_, err = d.cluster.ProfileCreate(req.Name, req.Description, req.Config, req.Devices)
 	if err != nil {
 		return SmartError(
 			fmt.Errorf("Error inserting %s into database: %s", req.Name, err))
@@ -105,12 +105,12 @@ var profilesCmd = Command{
 	post: profilesPost}
 
 func doProfileGet(s *state.State, name string) (*api.Profile, error) {
-	_, profile, err := s.Node.ProfileGet(name)
+	_, profile, err := s.Cluster.ProfileGet(name)
 	if err != nil {
 		return nil, err
 	}
 
-	cts, err := s.Node.ProfileContainersGet(name)
+	cts, err := s.Cluster.ProfileContainersGet(name)
 	if err != nil {
 		return nil, err
 	}
@@ -139,7 +139,7 @@ func profileGet(d *Daemon, r *http.Request) Response {
 func getContainersWithProfile(s *state.State, profile string) []container {
 	results := []container{}
 
-	output, err := s.Node.ProfileContainersGet(profile)
+	output, err := s.Cluster.ProfileContainersGet(profile)
 	if err != nil {
 		return results
 	}
@@ -159,7 +159,7 @@ func getContainersWithProfile(s *state.State, profile string) []container {
 func profilePut(d *Daemon, r *http.Request) Response {
 	// Get the profile
 	name := mux.Vars(r)["name"]
-	id, profile, err := d.db.ProfileGet(name)
+	id, profile, err := d.cluster.ProfileGet(name)
 	if err != nil {
 		return SmartError(fmt.Errorf("Failed to retrieve profile='%s'", name))
 	}
@@ -182,7 +182,7 @@ func profilePut(d *Daemon, r *http.Request) Response {
 func profilePatch(d *Daemon, r *http.Request) Response {
 	// Get the profile
 	name := mux.Vars(r)["name"]
-	id, profile, err := d.db.ProfileGet(name)
+	id, profile, err := d.cluster.ProfileGet(name)
 	if err != nil {
 		return SmartError(fmt.Errorf("Failed to retrieve profile='%s'", name))
 	}
@@ -260,7 +260,7 @@ func profilePost(d *Daemon, r *http.Request) Response {
 	}
 
 	// Check that the name isn't already in use
-	id, _, _ := d.db.ProfileGet(req.Name)
+	id, _, _ := d.cluster.ProfileGet(req.Name)
 	if id > 0 {
 		return Conflict
 	}
@@ -273,7 +273,7 @@ func profilePost(d *Daemon, r *http.Request) Response {
 		return BadRequest(fmt.Errorf("Invalid profile name '%s'", req.Name))
 	}
 
-	err := d.db.ProfileUpdate(name, req.Name)
+	err := d.cluster.ProfileUpdate(name, req.Name)
 	if err != nil {
 		return SmartError(err)
 	}
@@ -295,7 +295,7 @@ func profileDelete(d *Daemon, r *http.Request) Response {
 		return BadRequest(fmt.Errorf("Profile is currently in use"))
 	}
 
-	err = d.db.ProfileDelete(name)
+	err = d.cluster.ProfileDelete(name)
 	if err != nil {
 		return SmartError(err)
 	}
diff --git a/lxd/profiles_test.go b/lxd/profiles_test.go
index 2f864cee4..ab2b60b6a 100644
--- a/lxd/profiles_test.go
+++ b/lxd/profiles_test.go
@@ -1,53 +1,39 @@
 package main
 
 import (
-	"database/sql"
-	"io/ioutil"
-	"os"
 	"testing"
+
+	"github.com/lxc/lxd/lxd/db"
 )
 
 func Test_removing_a_profile_deletes_associated_configuration_entries(t *testing.T) {
-	var db *sql.DB
-	var err error
-
-	d := DefaultDaemon()
-	d.os.VarDir, err = ioutil.TempDir("", "lxd-test")
-	if err != nil {
-		t.Fatal(err)
-	}
-	defer os.RemoveAll(d.os.VarDir)
-
-	_, err = initializeDbObject(d)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	db = d.db.DB()
+	cluster, cleanup := db.NewTestCluster(t)
+	defer cleanup()
+	db := cluster.DB()
 
 	// Insert a container and a related profile. Dont't forget that the profile
 	// we insert is profile ID 2 (there is a default profile already).
 	statements := `
-    INSERT INTO containers (name, architecture, type) VALUES ('thename', 1, 1);
+    INSERT INTO containers (node_id, name, architecture, type) VALUES (1, 'thename', 1, 1);
     INSERT INTO profiles (name) VALUES ('theprofile');
     INSERT INTO containers_profiles (container_id, profile_id) VALUES (1, 2);
     INSERT INTO profiles_devices (name, profile_id) VALUES ('somename', 2);
     INSERT INTO profiles_config (key, value, profile_id) VALUES ('thekey', 'thevalue', 2);
     INSERT INTO profiles_devices_config (profile_device_id, key, value) VALUES (1, 'something', 'boring');`
 
-	_, err = db.Exec(statements)
+	_, err := db.Exec(statements)
 	if err != nil {
 		t.Fatal(err)
 	}
 
 	// Delete the profile we just created with dbapi.ProfileDelete
-	err = d.db.ProfileDelete("theprofile")
+	err = cluster.ProfileDelete("theprofile")
 	if err != nil {
 		t.Fatal(err)
 	}
 
 	// Make sure there are 0 profiles_devices entries left.
-	devices, err := d.db.Devices("theprofile", true)
+	devices, err := cluster.Devices("theprofile", true)
 	if err != nil {
 		t.Fatal(err)
 	}
@@ -56,7 +42,7 @@ func Test_removing_a_profile_deletes_associated_configuration_entries(t *testing
 	}
 
 	// Make sure there are 0 profiles_config entries left.
-	config, err := d.db.ProfileConfig("theprofile")
+	config, err := cluster.ProfileConfig("theprofile")
 	if err == nil {
 		t.Fatal("found the profile!")
 	}
diff --git a/lxd/profiles_utils.go b/lxd/profiles_utils.go
index d43e17bc2..1980c9768 100644
--- a/lxd/profiles_utils.go
+++ b/lxd/profiles_utils.go
@@ -38,7 +38,7 @@ func doProfileUpdate(d *Daemon, name string, id int64, profile *api.Profile, req
 			// Check what profile the device comes from
 			profiles := container.Profiles()
 			for i := len(profiles) - 1; i >= 0; i-- {
-				_, profile, err := d.db.ProfileGet(profiles[i])
+				_, profile, err := d.cluster.ProfileGet(profiles[i])
 				if err != nil {
 					return SmartError(err)
 				}
@@ -60,7 +60,7 @@ func doProfileUpdate(d *Daemon, name string, id int64, profile *api.Profile, req
 	}
 
 	// Update the database
-	tx, err := d.db.Begin()
+	tx, err := d.cluster.Begin()
 	if err != nil {
 		return SmartError(err)
 	}
diff --git a/lxd/storage_lvm_utils.go b/lxd/storage_lvm_utils.go
index 52878c689..08681d072 100644
--- a/lxd/storage_lvm_utils.go
+++ b/lxd/storage_lvm_utils.go
@@ -684,7 +684,7 @@ func storageLVMThinpoolExists(vgName string, poolName string) (bool, error) {
 func storageLVMGetThinPoolUsers(s *state.State) ([]string, error) {
 	results := []string{}
 
-	cNames, err := s.Node.ContainersList(db.CTypeRegular)
+	cNames, err := s.Cluster.ContainersList(db.CTypeRegular)
 	if err != nil {
 		return results, err
 	}
@@ -702,7 +702,7 @@ func storageLVMGetThinPoolUsers(s *state.State) ([]string, error) {
 		}
 	}
 
-	imageNames, err := s.Node.ImagesGet(false)
+	imageNames, err := s.Cluster.ImagesGet(false)
 	if err != nil {
 		return results, err
 	}
diff --git a/lxd/storage_pools.go b/lxd/storage_pools.go
index bed199461..a0e384ab5 100644
--- a/lxd/storage_pools.go
+++ b/lxd/storage_pools.go
@@ -220,7 +220,7 @@ func storagePoolDelete(d *Daemon, r *http.Request) Response {
 	}
 
 	// Check if the storage pool is still referenced in any profiles.
-	profiles, err := profilesUsingPoolGetNames(d.db, poolName)
+	profiles, err := profilesUsingPoolGetNames(d.cluster, poolName)
 	if err != nil {
 		return SmartError(err)
 	}
diff --git a/lxd/storage_pools_utils.go b/lxd/storage_pools_utils.go
index 3eb356013..e520a96e6 100644
--- a/lxd/storage_pools_utils.go
+++ b/lxd/storage_pools_utils.go
@@ -88,7 +88,7 @@ func storagePoolUsedByGet(state *state.State, poolID int64, poolName string) ([]
 	}
 
 	// Retrieve all profiles that exist on this storage pool.
-	profiles, err := profilesUsingPoolGetNames(state.Node, poolName)
+	profiles, err := profilesUsingPoolGetNames(state.Cluster, poolName)
 
 	if err != nil {
 		return []string{}, err
@@ -128,7 +128,7 @@ func storagePoolUsedByGet(state *state.State, poolID int64, poolName string) ([]
 	return poolUsedBy, err
 }
 
-func profilesUsingPoolGetNames(db *db.Node, poolName string) ([]string, error) {
+func profilesUsingPoolGetNames(db *db.Cluster, poolName string) ([]string, error) {
 	usedBy := []string{}
 
 	profiles, err := db.Profiles()
diff --git a/lxd/storage_volumes_utils.go b/lxd/storage_volumes_utils.go
index ef691645a..2cfe05647 100644
--- a/lxd/storage_volumes_utils.go
+++ b/lxd/storage_volumes_utils.go
@@ -172,7 +172,7 @@ func storagePoolVolumeUpdate(state *state.State, poolName string, volumeName str
 
 func storagePoolVolumeUsedByContainersGet(s *state.State, volumeName string,
 	volumeTypeName string) ([]string, error) {
-	cts, err := s.Node.ContainersList(db.CTypeRegular)
+	cts, err := s.Cluster.ContainersList(db.CTypeRegular)
 	if err != nil {
 		return []string{}, err
 	}
@@ -233,7 +233,7 @@ func storagePoolVolumeUsedByGet(s *state.State, volumeName string, volumeTypeNam
 			fmt.Sprintf("/%s/containers/%s", version.APIVersion, ct))
 	}
 
-	profiles, err := profilesUsingPoolVolumeGetNames(s.Node, volumeName, volumeTypeName)
+	profiles, err := profilesUsingPoolVolumeGetNames(s.Cluster, volumeName, volumeTypeName)
 	if err != nil {
 		return []string{}, err
 	}
@@ -249,7 +249,7 @@ func storagePoolVolumeUsedByGet(s *state.State, volumeName string, volumeTypeNam
 	return volumeUsedBy, nil
 }
 
-func profilesUsingPoolVolumeGetNames(db *db.Node, volumeName string, volumeType string) ([]string, error) {
+func profilesUsingPoolVolumeGetNames(db *db.Cluster, volumeName string, volumeType string) ([]string, error) {
 	usedBy := []string{}
 
 	profiles, err := db.Profiles()
diff --git a/test/includes/lxd.sh b/test/includes/lxd.sh
index 511446a53..5179ac429 100644
--- a/test/includes/lxd.sh
+++ b/test/includes/lxd.sh
@@ -181,26 +181,25 @@ kill_lxd() {
         check_empty "${daemon_dir}/shmounts/"
         check_empty "${daemon_dir}/snapshots/"
 
-        echo "==> Checking for leftover DB entries"
-        check_empty_table "${daemon_dir}/lxd.db" "containers"
-        check_empty_table "${daemon_dir}/lxd.db" "containers_config"
-        check_empty_table "${daemon_dir}/lxd.db" "containers_devices"
-        check_empty_table "${daemon_dir}/lxd.db" "containers_devices_config"
-        check_empty_table "${daemon_dir}/lxd.db" "containers_profiles"
-        check_empty_table "${daemon_dir}/lxd.db" "images"
-        check_empty_table "${daemon_dir}/lxd.db" "images_aliases"
-        check_empty_table "${daemon_dir}/lxd.db" "images_properties"
-        check_empty_table "${daemon_dir}/lxd.db" "images_source"
-        check_empty_table "${daemon_dir}/lxd.db" "profiles"
-        check_empty_table "${daemon_dir}/lxd.db" "profiles_config"
-        check_empty_table "${daemon_dir}/lxd.db" "profiles_devices"
-        check_empty_table "${daemon_dir}/lxd.db" "profiles_devices_config"
-
         echo "==> Checking for leftover cluster DB entries"
 	# FIXME: we should not use the command line sqlite client, since it's
         #        not compatible with dqlite
+        check_empty_table "${daemon_dir}/raft/db.bin" "containers"
+        check_empty_table "${daemon_dir}/raft/db.bin" "containers_config"
+        check_empty_table "${daemon_dir}/raft/db.bin" "containers_devices"
+        check_empty_table "${daemon_dir}/raft/db.bin" "containers_devices_config"
+        check_empty_table "${daemon_dir}/raft/db.bin" "containers_profiles"
+        check_empty_table "${daemon_dir}/raft/db.bin" "images"
+        check_empty_table "${daemon_dir}/raft/db.bin" "images_aliases"
+        check_empty_table "${daemon_dir}/raft/db.bin" "images_properties"
+        check_empty_table "${daemon_dir}/raft/db.bin" "images_source"
+        check_empty_table "${daemon_dir}/raft/db.bin" "images_nodes"
         check_empty_table "${daemon_dir}/raft/db.bin" "networks"
         check_empty_table "${daemon_dir}/raft/db.bin" "networks_config"
+        check_empty_table "${daemon_dir}/raft/db.bin" "profiles"
+        check_empty_table "${daemon_dir}/raft/db.bin" "profiles_config"
+        check_empty_table "${daemon_dir}/raft/db.bin" "profiles_devices"
+        check_empty_table "${daemon_dir}/raft/db.bin" "profiles_devices_config"
         check_empty_table "${daemon_dir}/raft/db.bin" "storage_pools"
         check_empty_table "${daemon_dir}/raft/db.bin" "storage_pools_config"
         check_empty_table "${daemon_dir}/raft/db.bin" "storage_volumes"
diff --git a/test/suites/backup.sh b/test/suites/backup.sh
index 84b304836..0b14a0b83 100644
--- a/test/suites/backup.sh
+++ b/test/suites/backup.sh
@@ -17,7 +17,7 @@ test_container_import() {
     ! lxd import ctImport
     lxd import ctImport --force
     kill -9 "${pid}"
-    sqlite3 "${LXD_DIR}/lxd.db" "PRAGMA foreign_keys=ON; DELETE FROM containers WHERE name='ctImport'"
+    lxd sql "PRAGMA foreign_keys=ON; DELETE FROM containers WHERE name='ctImport'"
     lxd sql "PRAGMA foreign_keys=ON; DELETE FROM storage_volumes WHERE name='ctImport'"
     lxd import ctImport --force
     lxc start ctImport
@@ -39,7 +39,7 @@ test_container_import() {
     lxc start ctImport
     pid=$(lxc info ctImport | grep ^Pid | awk '{print $2}')
     kill -9 "${pid}"
-    sqlite3 "${LXD_DIR}/lxd.db" "PRAGMA foreign_keys=ON; DELETE FROM containers WHERE name='ctImport'"
+    lxd sql "PRAGMA foreign_keys=ON; DELETE FROM containers WHERE name='ctImport'"
     ! lxd import ctImport
     lxd import ctImport --force
     lxc info ctImport | grep snap0
@@ -51,7 +51,7 @@ test_container_import() {
     lxc start ctImport
     pid=$(lxc info ctImport | grep ^Pid | awk '{print $2}')
     kill -9 "${pid}"
-    sqlite3 "${LXD_DIR}/lxd.db" "PRAGMA foreign_keys=ON; DELETE FROM containers WHERE name='ctImport/snap0'"
+    lxd sql "PRAGMA foreign_keys=ON; DELETE FROM containers WHERE name='ctImport/snap0'"
     ! lxd import ctImport
     lxd import ctImport --force
     lxc info ctImport | grep snap0
@@ -63,8 +63,8 @@ test_container_import() {
     lxc start ctImport
     pid=$(lxc info ctImport | grep ^Pid | awk '{print $2}')
     kill -9 "${pid}"
-    sqlite3 "${LXD_DIR}/lxd.db" "PRAGMA foreign_keys=ON; DELETE FROM containers WHERE name='ctImport'"
-    sqlite3 "${LXD_DIR}/lxd.db" "PRAGMA foreign_keys=ON; DELETE FROM containers WHERE name='ctImport/snap0'"
+    lxd sql "PRAGMA foreign_keys=ON; DELETE FROM containers WHERE name='ctImport'"
+    lxd sql "PRAGMA foreign_keys=ON; DELETE FROM containers WHERE name='ctImport/snap0'"
     lxd sql "PRAGMA foreign_keys=ON; DELETE FROM storage_volumes WHERE name='ctImport'"
     ! lxd import ctImport
     lxd import ctImport --force
@@ -77,8 +77,8 @@ test_container_import() {
     lxc start ctImport
     pid=$(lxc info ctImport | grep ^Pid | awk '{print $2}')
     kill -9 "${pid}"
-    sqlite3 "${LXD_DIR}/lxd.db" "PRAGMA foreign_keys=ON; DELETE FROM containers WHERE name='ctImport'"
-    sqlite3 "${LXD_DIR}/lxd.db" "PRAGMA foreign_keys=ON; DELETE FROM containers WHERE name='ctImport/snap0'"
+    lxd sql "PRAGMA foreign_keys=ON; DELETE FROM containers WHERE name='ctImport'"
+    lxd sql "PRAGMA foreign_keys=ON; DELETE FROM containers WHERE name='ctImport/snap0'"
     lxd sql "PRAGMA foreign_keys=ON; DELETE FROM storage_volumes WHERE name='ctImport'"
     lxd sql "PRAGMA foreign_keys=ON; DELETE FROM storage_volumes WHERE name='ctImport/snap0'"
     lxd import ctImport
@@ -98,8 +98,8 @@ test_container_import() {
     fi
     pid=$(lxc info ctImport | grep ^Pid | awk '{print $2}')
     kill -9 "${pid}"
-    sqlite3 "${LXD_DIR}/lxd.db" "PRAGMA foreign_keys=ON; DELETE FROM containers WHERE name='ctImport'"
-    sqlite3 "${LXD_DIR}/lxd.db" "PRAGMA foreign_keys=ON; DELETE FROM containers WHERE name='ctImport/snap0'"
+    lxd sql "PRAGMA foreign_keys=ON; DELETE FROM containers WHERE name='ctImport'"
+    lxd sql "PRAGMA foreign_keys=ON; DELETE FROM containers WHERE name='ctImport/snap0'"
     lxd sql "PRAGMA foreign_keys=ON; DELETE FROM storage_volumes WHERE name='ctImport'"
     ! lxd import ctImport
     lxd import ctImport --force
diff --git a/test/suites/database_update.sh b/test/suites/database_update.sh
index b8b46301f..872308f20 100644
--- a/test/suites/database_update.sh
+++ b/test/suites/database_update.sh
@@ -9,12 +9,12 @@ test_database_update(){
   spawn_lxd "${LXD_MIGRATE_DIR}" true
 
   # Assert there are enough tables.
-  expected_tables=17
+  expected_tables=7
   tables=$(sqlite3 "${MIGRATE_DB}" ".dump" | grep -c "CREATE TABLE")
   [ "${tables}" -eq "${expected_tables}" ] || { echo "FAIL: Wrong number of tables after database migration. Found: ${tables}, expected ${expected_tables}"; false; }
 
   # There should be 12 "ON DELETE CASCADE" occurrences
-  expected_cascades=11
+  expected_cascades=3
   cascades=$(sqlite3 "${MIGRATE_DB}" ".dump" | grep -c "ON DELETE CASCADE")
   [ "${cascades}" -eq "${expected_cascades}" ] || { echo "FAIL: Wrong number of ON DELETE CASCADE foreign keys. Found: ${cascades}, exected: ${expected_cascades}"; false; }
 
diff --git a/test/suites/image.sh b/test/suites/image.sh
index e36bb149e..fbb44ab8c 100644
--- a/test/suites/image.sh
+++ b/test/suites/image.sh
@@ -27,7 +27,7 @@ test_image_expiry() {
   lxc_remote image list l2: | grep -q "${fpbrief}"
 
   # Override the upload date
-  sqlite3 "${LXD2_DIR}/lxd.db" "UPDATE images SET last_use_date='$(date --rfc-3339=seconds -u -d "2 days ago")' WHERE fingerprint='${fp}'"
+  LXD_DIR="$LXD2_DIR" lxd sql "UPDATE images SET last_use_date='$(date --rfc-3339=seconds -u -d "2 days ago")' WHERE fingerprint='${fp}'" | grep -q "Rows affected: 1"
 
   # Trigger the expiry
   lxc_remote config set l2: images.remote_cache_expiry 1
diff --git a/test/suites/profiling.sh b/test/suites/profiling.sh
index 769a5580e..63859bd95 100644
--- a/test/suites/profiling.sh
+++ b/test/suites/profiling.sh
@@ -4,7 +4,8 @@ test_cpu_profiling() {
   spawn_lxd "${LXD3_DIR}" false --cpuprofile "${LXD3_DIR}/cpu.out"
   lxdpid=$(cat "${LXD3_DIR}/lxd.pid")
   kill -TERM "${lxdpid}"
-  wait "${lxdpid}" || true
+  wait "${lxdpid}"
+     #|| true
   export PPROF_TMPDIR="${TEST_DIR}/pprof"
   echo top5 | go tool pprof "$(which lxd)" "${LXD3_DIR}/cpu.out"
   echo ""

From 3c3224ba4464f028da6be0b51ceec64c5179ac21 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Thu, 26 Oct 2017 18:17:54 +0000
Subject: [PATCH 064/116] Covert api.Cluster.TargetCert to string

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 client/interfaces.go    | 2 +-
 client/lxd_cluster.go   | 2 +-
 lxd/api_cluster.go      | 2 +-
 lxd/api_cluster_test.go | 6 +++---
 lxd/main_init.go        | 2 +-
 shared/api/cluster.go   | 2 +-
 6 files changed, 8 insertions(+), 8 deletions(-)

diff --git a/client/interfaces.go b/client/interfaces.go
index eaca03a79..277f576f8 100644
--- a/client/interfaces.go
+++ b/client/interfaces.go
@@ -164,7 +164,7 @@ type ContainerServer interface {
 	GetCluster(password string) (cluster *api.Cluster, err error)
 	BootstrapCluster(name string) (op *Operation, err error)
 	AcceptNode(targetPassword, name, address string, schema, api int) (info *api.ClusterNodeAccepted, err error)
-	JoinCluster(targetAddress, targetPassword string, targetCert []byte, name string) (op *Operation, err error)
+	JoinCluster(targetAddress, targetPassword, targetCert, name string) (op *Operation, err error)
 
 	// Internal functions (for internal use)
 	RawQuery(method string, path string, data interface{}, queryETag string) (resp *api.Response, ETag string, err error)
diff --git a/client/lxd_cluster.go b/client/lxd_cluster.go
index 7d153cbb5..20a107b8b 100644
--- a/client/lxd_cluster.go
+++ b/client/lxd_cluster.go
@@ -55,7 +55,7 @@ func (r *ProtocolLXD) AcceptNode(targetPassword, name, address string, schema, a
 }
 
 // JoinCluster requests to join an existing cluster.
-func (r *ProtocolLXD) JoinCluster(targetAddress, targetPassword string, targetCert []byte, name string) (*Operation, error) {
+func (r *ProtocolLXD) JoinCluster(targetAddress, targetPassword, targetCert, name string) (*Operation, error) {
 	cluster := api.ClusterPost{
 		TargetAddress:  targetAddress,
 		TargetPassword: targetPassword,
diff --git a/lxd/api_cluster.go b/lxd/api_cluster.go
index 66641ef50..7d9d624c6 100644
--- a/lxd/api_cluster.go
+++ b/lxd/api_cluster.go
@@ -173,7 +173,7 @@ func clusterPostJoin(d *Daemon, req api.ClusterPost) Response {
 		}
 
 		// Update our TLS configuration using the returned cluster certificate.
-		err = util.WriteCert(d.os.VarDir, "cluster", req.TargetCert, info.PrivateKey, req.TargetCA)
+		err = util.WriteCert(d.os.VarDir, "cluster", []byte(req.TargetCert), info.PrivateKey, req.TargetCA)
 		if err != nil {
 			return errors.Wrap(err, "failed to save cluster certificate")
 		}
diff --git a/lxd/api_cluster_test.go b/lxd/api_cluster_test.go
index 02f5e7fc4..55ba594ac 100644
--- a/lxd/api_cluster_test.go
+++ b/lxd/api_cluster_test.go
@@ -48,7 +48,7 @@ func TestCluster_Join(t *testing.T) {
 
 	// Make the second node join the cluster.
 	address := daemons[0].endpoints.NetworkAddress()
-	cert := daemons[0].endpoints.NetworkPublicKey()
+	cert := string(daemons[0].endpoints.NetworkPublicKey())
 	client = f.ClientUnix(daemons[1])
 	op, err = client.JoinCluster(address, "sekret", cert, "rusp")
 	require.NoError(t, err)
@@ -102,7 +102,7 @@ func TestCluster_JoinWrongTrustPassword(t *testing.T) {
 
 	// Make the second node join the cluster.
 	address := daemons[0].endpoints.NetworkAddress()
-	cert := daemons[0].endpoints.NetworkPublicKey()
+	cert := string(daemons[0].endpoints.NetworkPublicKey())
 	client = f.ClientUnix(daemons[1])
 	op, err = client.JoinCluster(address, "noop", cert, "rusp")
 	require.NoError(t, err)
@@ -160,7 +160,7 @@ func (f *clusterFixture) FormCluster(daemons []*Daemon) {
 
 	// Make the other nodes join the cluster.
 	address := daemons[0].endpoints.NetworkAddress()
-	cert := daemons[0].endpoints.NetworkPublicKey()
+	cert := string(daemons[0].endpoints.NetworkPublicKey())
 	for i, daemon := range daemons[1:] {
 		client = f.ClientUnix(daemon)
 		op, err := client.JoinCluster(address, "sekret", cert, fmt.Sprintf("rusp-%d", i))
diff --git a/lxd/main_init.go b/lxd/main_init.go
index 686bc9198..a48dafa71 100644
--- a/lxd/main_init.go
+++ b/lxd/main_init.go
@@ -262,7 +262,7 @@ func (cmd *CmdInit) fillDataWithClustering(data *cmdInitData, clustering *cmdIni
 	}
 	data.Cluster.Name = clustering.Name
 	data.Cluster.TargetAddress = clustering.TargetAddress
-	data.Cluster.TargetCert = clustering.TargetCert
+	data.Cluster.TargetCert = string(clustering.TargetCert)
 	data.Cluster.TargetPassword = clustering.TargetPassword
 }
 
diff --git a/shared/api/cluster.go b/shared/api/cluster.go
index 045411d64..61339f650 100644
--- a/shared/api/cluster.go
+++ b/shared/api/cluster.go
@@ -16,7 +16,7 @@ type ClusterPost struct {
 	Schema         int    `json:"schema" yaml:"schema"`
 	API            int    `json:"api" yaml:"api"`
 	TargetAddress  string `json:"target_address" yaml:"target_address"`
-	TargetCert     []byte `json:"target_cert" yaml:"target_cert"`
+	TargetCert     string `json:"target_cert" yaml:"target_cert"`
 	TargetCA       []byte `json:"target_ca" yaml:"target_ca"`
 	TargetPassword string `json:"target_password" yaml:"target_password"`
 }

From ce6b896ab06f383b2c633e338952a97458cdd6df Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Fri, 27 Oct 2017 12:42:37 +0000
Subject: [PATCH 065/116] Don't block on failed db queries when shutting down

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 client/connection.go   | 17 ++++++++++------
 lxd/cluster/gateway.go | 55 ++++++++++++++++++++++++++++++++++++++++----------
 lxd/daemon.go          | 21 +++++++++++++------
 lxd/main_daemon.go     |  1 +
 lxd/main_shutdown.go   |  5 ++++-
 5 files changed, 75 insertions(+), 24 deletions(-)

diff --git a/client/connection.go b/client/connection.go
index c2e830cd9..ca17f8869 100644
--- a/client/connection.go
+++ b/client/connection.go
@@ -45,6 +45,9 @@ type ConnectionArgs struct {
 
 	// Cookie jar
 	CookieJar http.CookieJar
+
+	// Skip automatic GetServer request upon connection
+	SkipGetServer bool
 }
 
 // ConnectLXD lets you connect to a remote LXD daemon over HTTPs.
@@ -97,13 +100,15 @@ func ConnectLXDUnix(path string, args *ConnectionArgs) (ContainerServer, error)
 	server.http = httpClient
 
 	// Test the connection and seed the server information
-	serverStatus, _, err := server.GetServer()
-	if err != nil {
-		return nil, err
-	}
+	if !args.SkipGetServer {
+		serverStatus, _, err := server.GetServer()
+		if err != nil {
+			return nil, err
+		}
 
-	// Record the server certificate
-	server.httpCertificate = serverStatus.Environment.Certificate
+		// Record the server certificate
+		server.httpCertificate = serverStatus.Environment.Certificate
+	}
 
 	return &server, nil
 }
diff --git a/lxd/cluster/gateway.go b/lxd/cluster/gateway.go
index 243ec27b4..64c891b02 100644
--- a/lxd/cluster/gateway.go
+++ b/lxd/cluster/gateway.go
@@ -31,10 +31,13 @@ import (
 // HandlerFuncs method returns and to access the dqlite cluster using the gRPC
 // dialer returned by the Dialer method.
 func NewGateway(db *db.Node, cert *shared.CertInfo, latency float64) (*Gateway, error) {
+	ctx, cancel := context.WithCancel(context.Background())
 	gateway := &Gateway{
 		db:      db,
 		cert:    cert,
 		latency: latency,
+		ctx:     ctx,
+		cancel:  cancel,
 	}
 
 	err := gateway.init()
@@ -69,6 +72,11 @@ type Gateway struct {
 	// database, to minimize the difference between code paths in
 	// clustering and non-clustering modes.
 	memoryDial func() (*grpc.ClientConn, error)
+
+	// Used when shutting down the daemon to cancel any ongoing gRPC
+	// dialing attempt.
+	ctx    context.Context
+	cancel context.CancelFunc
 }
 
 // HandlerFuncs returns the HTTP handlers that should be added to the REST API
@@ -146,10 +154,11 @@ func (g *Gateway) Dialer() grpcsql.Dialer {
 			return g.memoryDial()
 		}
 
-		// FIXME: timeout should be configurable
+		// TODO: should the timeout be configurable?
+		ctx, cancel := context.WithTimeout(g.ctx, 5*time.Second)
+		defer cancel()
 		var err error
-		remaining := 10 * time.Second
-		for remaining > 0 {
+		for {
 			// Network connection.
 			addresses, dbErr := g.cachedRaftNodes()
 			if dbErr != nil {
@@ -158,19 +167,34 @@ func (g *Gateway) Dialer() grpcsql.Dialer {
 
 			for _, address := range addresses {
 				var conn *grpc.ClientConn
-				conn, err = grpcNetworkDial(address, g.cert, time.Second)
+				conn, err = grpcNetworkDial(g.ctx, address, g.cert)
 				if err == nil {
 					return conn, nil
 				}
 				logger.Debugf("Failed to establish gRPC connection with %s: %v", address, err)
 			}
-			time.Sleep(250 * time.Millisecond)
-			remaining -= 250 * time.Millisecond
+			if ctx.Err() != nil {
+				return nil, ctx.Err()
+			}
+			select {
+			case <-time.After(250 * time.Millisecond):
+				continue
+			case <-ctx.Done():
+				return nil, ctx.Err()
+			}
 		}
-		return nil, err
 	}
 }
 
+// Kill is an API that the daemon calls before it actually shuts down and calls
+// Shutdown(). It will abort any ongoing or new attempt to establish a SQL gRPC
+// connection with the dialer (typically for running some pre-shutdown
+// queries).
+func (g *Gateway) Kill() {
+	logger.Debug("Cancel ongoing or future gRPC connection attempts")
+	g.cancel()
+}
+
 // Shutdown this gateway, stopping the gRPC server and possibly the raft factory.
 func (g *Gateway) Shutdown() error {
 	if g.server != nil {
@@ -276,16 +300,27 @@ func (g *Gateway) cachedRaftNodes() ([]string, error) {
 	return addresses, nil
 }
 
-func grpcNetworkDial(addr string, cert *shared.CertInfo, t time.Duration) (*grpc.ClientConn, error) {
+func grpcNetworkDial(ctx context.Context, addr string, cert *shared.CertInfo) (*grpc.ClientConn, error) {
 	config, err := tlsClientConfig(cert)
 	if err != nil {
 		return nil, err
 	}
 
+	// The whole attempt should not take more than a second. If the context
+	// gets cancelled, calling code will typically try against another
+	// database node, in round robin.
+	ctx, cancel := context.WithTimeout(ctx, time.Second)
+	defer cancel()
+
 	// Make a probe HEAD request to check if the target node is the leader.
 	url := fmt.Sprintf("https://%s%s", addr, grpcEndpoint)
+	request, err := http.NewRequest("HEAD", url, nil)
+	if err != nil {
+		return nil, err
+	}
+	request = request.WithContext(ctx)
 	client := &http.Client{Transport: &http.Transport{TLSClientConfig: config}}
-	response, err := client.Head(url)
+	response, err := client.Do(request)
 	if err != nil {
 		return nil, err
 	}
@@ -293,8 +328,6 @@ func grpcNetworkDial(addr string, cert *shared.CertInfo, t time.Duration) (*grpc
 		return nil, fmt.Errorf(response.Status)
 	}
 
-	ctx, cancel := context.WithTimeout(context.Background(), t)
-	defer cancel()
 	options := []grpc.DialOption{
 		grpc.WithTransportCredentials(credentials.NewTLS(config)),
 	}
diff --git a/lxd/daemon.go b/lxd/daemon.go
index f4f9caec2..1f7fc625b 100644
--- a/lxd/daemon.go
+++ b/lxd/daemon.go
@@ -409,12 +409,6 @@ func (d *Daemon) init() error {
 		return errors.Wrap(err, "failed to fetch node address")
 	}
 
-	/* Open the cluster database */
-	d.cluster, err = db.OpenCluster("db.bin", d.gateway.Dialer(), address)
-	if err != nil {
-		return errors.Wrap(err, "failed to open cluster database")
-	}
-
 	/* Setup the web server */
 	config := &endpoints.Config{
 		Dir:                  d.os.VarDir,
@@ -429,6 +423,12 @@ func (d *Daemon) init() error {
 		return err
 	}
 
+	/* Open the cluster database */
+	d.cluster, err = db.OpenCluster("db.bin", d.gateway.Dialer(), address)
+	if err != nil {
+		return errors.Wrap(err, "failed to open cluster database")
+	}
+
 	/* Migrate the node local data to the cluster database, if needed */
 	if dump != nil {
 		logger.Infof("Migrating data from lxd.db to db.bin")
@@ -559,6 +559,15 @@ func (d *Daemon) numRunningContainers() (int, error) {
 	return count, nil
 }
 
+// Kill signals the daemon that we want to shutdown, and that any work
+// initiated from this point (e.g. database queries over gRPC) should not be
+// retried in case of failure.
+func (d *Daemon) Kill() {
+	if d.gateway != nil {
+		d.gateway.Kill()
+	}
+}
+
 // Stop stops the shared daemon.
 func (d *Daemon) Stop() error {
 	errs := []error{}
diff --git a/lxd/main_daemon.go b/lxd/main_daemon.go
index 7b9d84372..a4520315e 100644
--- a/lxd/main_daemon.go
+++ b/lxd/main_daemon.go
@@ -66,6 +66,7 @@ func cmdDaemon(args *Args) error {
 
 	case <-d.shutdownChan:
 		logger.Infof("Asked to shutdown by API, shutting down containers.")
+		d.Kill()
 		containersShutdown(s)
 		networkShutdown(s)
 	}
diff --git a/lxd/main_shutdown.go b/lxd/main_shutdown.go
index 00654ff64..675ad647c 100644
--- a/lxd/main_shutdown.go
+++ b/lxd/main_shutdown.go
@@ -8,7 +8,10 @@ import (
 )
 
 func cmdShutdown(args *Args) error {
-	c, err := lxd.ConnectLXDUnix("", nil)
+	connArgs := &lxd.ConnectionArgs{
+		SkipGetServer: true,
+	}
+	c, err := lxd.ConnectLXDUnix("", connArgs)
 	if err != nil {
 		return err
 	}

From 5ea9f6a2b6453e8b165331c16a627e2f78fa00bf Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Thu, 26 Oct 2017 18:15:23 +0000
Subject: [PATCH 066/116] Basic clustering integration tests

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 test/includes/clustering.sh | 72 +++++++++++++++++++++++++++++++++++++++++++
 test/includes/lxd.sh        | 36 ++++++++++++++--------
 test/main.sh                |  3 ++
 test/suites/clustering.sh   | 75 +++++++++++++++++++++++++++++++++++++++++++++
 4 files changed, 174 insertions(+), 12 deletions(-)
 create mode 100644 test/includes/clustering.sh
 create mode 100644 test/suites/clustering.sh

diff --git a/test/includes/clustering.sh b/test/includes/clustering.sh
new file mode 100644
index 000000000..6ffda750c
--- /dev/null
+++ b/test/includes/clustering.sh
@@ -0,0 +1,72 @@
+# Test helper for clustering
+
+setup_clustering_bridge() {
+  name="br$$"
+
+  echo "==> Setup clustering bridge ${name}"
+
+  brctl addbr "${name}"
+  ip addr add 10.1.1.1/16 dev "${name}"
+  ip link set dev "${name}" up
+
+  iptables -t nat -A POSTROUTING -s 10.1.0.0/16 -d 0.0.0.0/0 -j MASQUERADE
+  echo 1 > /proc/sys/net/ipv4/ip_forward
+}
+
+teardown_clustering_bridge() {
+  name="br$$"
+
+  if brctl show | grep -q "${name}" ; then
+      echo "==> Teardown clustering bridge ${name}"
+      echo 0 > /proc/sys/net/ipv4/ip_forward
+      iptables -t nat -D POSTROUTING -s 10.1.0.0/16 -d 0.0.0.0/0 -j MASQUERADE
+      ip link set dev "${name}" down
+      ip addr del 10.1.1.1/16 dev "${name}"
+      brctl delbr "${name}"
+  fi
+}
+
+setup_clustering_netns() {
+  id="${1}"
+  shift
+
+  prefix="lxd$$"
+  ns="${prefix}${id}"
+
+  echo "==> Setup clustering netns ${ns}"
+
+  ip netns add "${ns}"
+
+  veth1="v${ns}1"
+  veth2="v${ns}2"
+
+  ip link add "${veth1}" type veth peer name "${veth2}"
+  ip link set "${veth2}" netns "${ns}"
+
+  bridge="br$$"
+  brctl addif "${bridge}" "${veth1}"
+
+  ip link set "${veth1}" up
+
+  ip netns exec "${ns}" ip link set dev lo up
+  ip netns exec "${ns}" ip link set dev "${veth2}" name eth0
+  ip netns exec "${ns}" ip link set eth0 up
+  ip netns exec "${ns}" ip addr add "10.1.1.10${id}/16" dev eth0
+  ip netns exec "${ns}" ip route add default via 10.1.1.1
+}
+
+teardown_clustering_netns() {
+  prefix="lxd$$"
+  bridge="br$$"
+  for ns in $(ip netns | grep "${prefix}" | cut -f 1 -d " ") ; do
+      echo "==> Teardown clustering netns ${ns}"
+      veth1="v${ns}1"
+      veth2="v${ns}2"
+      ip netns exec "${ns}" ip link set eth0 down
+      ip netns exec "${ns}" ip link set lo down
+      ip link set "${veth1}" down
+      brctl delif "${bridge}" "${veth1}"
+      ip link delete "${veth1}" type veth
+      ip netns delete "${ns}"
+  done
+}
diff --git a/test/includes/lxd.sh b/test/includes/lxd.sh
index 5179ac429..a1b553788 100644
--- a/test/includes/lxd.sh
+++ b/test/includes/lxd.sh
@@ -40,7 +40,11 @@ spawn_lxd() {
     echo "==> Spawning lxd in ${lxddir}"
     # shellcheck disable=SC2086
 
-    LD_LIBRARY_PATH="${sqlite}" LXD_DIR="${lxddir}" lxd --logfile "${lxddir}/lxd.log" "${DEBUG-}" "$@" 2>&1 &
+    if [ "${LXD_NETNS}" = "" ]; then
+	LD_LIBRARY_PATH="${sqlite}" LXD_DIR="${lxddir}" lxd --logfile "${lxddir}/lxd.log" "${DEBUG-}" "$@" 2>&1 &
+    else
+	LD_LIBRARY_PATH="${sqlite}" LXD_DIR="${lxddir}" ip netns exec "${LXD_NETNS}" lxd --logfile "${lxddir}/lxd.log" "${DEBUG-}" "$@" 2>&1 &
+    fi
     LXD_PID=$!
     echo "${LXD_PID}" > "${lxddir}/lxd.pid"
     # shellcheck disable=SC2153
@@ -50,15 +54,17 @@ spawn_lxd() {
     echo "==> Confirming lxd is responsive"
     LXD_DIR="${lxddir}" lxd waitready --timeout=300
 
-    echo "==> Binding to network"
-    # shellcheck disable=SC2034
-    for i in $(seq 10); do
-        addr="127.0.0.1:$(local_tcp_port)"
-        LXD_DIR="${lxddir}" lxc config set core.https_address "${addr}" || continue
-        echo "${addr}" > "${lxddir}/lxd.addr"
-        echo "==> Bound to ${addr}"
-        break
-    done
+    if [ "${LXD_NETNS}" = "" ]; then
+	echo "==> Binding to network"
+	# shellcheck disable=SC2034
+	for i in $(seq 10); do
+            addr="127.0.0.1:$(local_tcp_port)"
+            LXD_DIR="${lxddir}" lxc config set core.https_address "${addr}" || continue
+            echo "${addr}" > "${lxddir}/lxd.addr"
+            echo "==> Bound to ${addr}"
+            break
+	done
+    fi
 
     echo "==> Setting trust password"
     LXD_DIR="${lxddir}" lxc config set core.trust_password foo
@@ -66,8 +72,10 @@ spawn_lxd() {
         set -x
     fi
 
-    echo "==> Setting up networking"
-    LXD_DIR="${lxddir}" lxc profile device add default eth0 nic nictype=p2p name=eth0
+    if [ "${LXD_NETNS}" = "" ]; then
+	echo "==> Setting up networking"
+	LXD_DIR="${lxddir}" lxc profile device add default eth0 nic nictype=p2p name=eth0
+    fi
 
     if [ "${storage}" = true ]; then
         echo "==> Configuring storage backend"
@@ -289,4 +297,8 @@ cleanup_lxds() {
     wipe "$test_dir"
 
     umount_loops "$test_dir"
+
+    # Cleanup clustering networking, if any
+    teardown_clustering_netns
+    teardown_clustering_bridge
 }
diff --git a/test/main.sh b/test/main.sh
index 43b4d9c44..39d818f3a 100755
--- a/test/main.sh
+++ b/test/main.sh
@@ -24,6 +24,9 @@ if [ -z "${LXD_BACKEND:-}" ]; then
     LXD_BACKEND="dir"
 fi
 
+# shellcheck disable=SC2034
+LXD_NETNS=""
+
 import_subdir_files() {
     test "$1"
     # shellcheck disable=SC2039
diff --git a/test/suites/clustering.sh b/test/suites/clustering.sh
new file mode 100644
index 000000000..2197ea439
--- /dev/null
+++ b/test/suites/clustering.sh
@@ -0,0 +1,75 @@
+test_clustering() {
+  setup_clustering_bridge
+  prefix="lxd$$"
+
+  setup_clustering_netns 1
+  LXD_ONE_DIR=$(mktemp -d -p "${TEST_DIR}" XXX)
+  chmod +x "${LXD_ONE_DIR}"
+  ns="${prefix}1"
+  LXD_NETNS="${ns}" spawn_lxd "${LXD_ONE_DIR}" false
+  (
+    set -e
+    # shellcheck disable=SC2034
+    LXD_DIR=${LXD_ONE_DIR}
+
+  cat <<EOF | lxd init --preseed
+config:
+  core.trust_password: sekret
+  core.https_address: 10.1.1.101:8443
+  images.auto_update_interval: 15
+storage_pools:
+- name: data
+  driver: dir
+profiles:
+- name: default
+  devices:
+    root:
+      path: /
+      pool: data
+      type: disk
+cluster:
+  name: one
+EOF
+  )
+
+  # Add a newline at the end of each line. YAML as weird rules..
+  cert=$(sed ':a;N;$!ba;s/\n/\n\n/g' "${LXD_ONE_DIR}/server.crt")
+
+  setup_clustering_netns 2
+  LXD_TWO_DIR=$(mktemp -d -p "${TEST_DIR}" XXX)
+  chmod +x "${LXD_TWO_DIR}"
+  ns="${prefix}2"
+  LXD_NETNS="${ns}" spawn_lxd "${LXD_TWO_DIR}" false
+  (
+    set -e
+    # shellcheck disable=SC2034
+    LXD_DIR=${LXD_TWO_DIR}
+
+  cat <<EOF | lxd init --preseed
+config:
+  core.https_address: 10.1.1.102:8443
+  images.auto_update_interval: 15
+storage_pools:
+- name: data
+  driver: dir
+profiles:
+- name: default
+  devices:
+    root:
+      path: /
+      pool: data
+      type: disk
+cluster:
+  name: two
+  target_address: 10.1.1.101:8443
+  target_password: sekret
+  target_cert: "$cert"
+EOF
+  )
+
+  LXD_DIR="${LXD_TWO_DIR}" lxd shutdown
+  LXD_DIR="${LXD_ONE_DIR}" lxd shutdown
+  sleep 2
+  rm -f "${LXD_TWO_DIR}/unix.socket"
+  rm -f "${LXD_ONE_DIR}/unix.socket"
+}

From 829dbed5e5b49b6511b866c1266f8086f753b275 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Fri, 27 Oct 2017 13:20:27 +0000
Subject: [PATCH 067/116] Retry database interactions if raft leadership
 changed

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/containers_get.go |  2 +-
 lxd/db/db.go          | 44 +++++++++++++++++++++++++-------------------
 2 files changed, 26 insertions(+), 20 deletions(-)

diff --git a/lxd/containers_get.go b/lxd/containers_get.go
index 29ac485ac..376190b86 100644
--- a/lxd/containers_get.go
+++ b/lxd/containers_get.go
@@ -19,7 +19,7 @@ func containersGet(d *Daemon, r *http.Request) Response {
 		if err == nil {
 			return SyncResponse(true, result)
 		}
-		if !db.IsDbLockedError(err) {
+		if !db.IsRetriableError(err) {
 			logger.Debugf("DBERR: containersGet: error %q", err)
 			return SmartError(err)
 		}
diff --git a/lxd/db/db.go b/lxd/db/db.go
index 0dd5e6c7e..59770699f 100644
--- a/lxd/db/db.go
+++ b/lxd/db/db.go
@@ -6,7 +6,7 @@ import (
 	"strings"
 	"time"
 
-	grpcsql "github.com/CanonicalLtd/go-grpc-sql"
+	"github.com/CanonicalLtd/go-grpc-sql"
 	"github.com/mattn/go-sqlite3"
 	"github.com/pkg/errors"
 
@@ -213,21 +213,15 @@ func (c *Cluster) Transaction(f func(*ClusterTx) error) error {
 
 	// FIXME: the retry loop should be configurable.
 	var err error
-	for i := 0; i < 10; i++ {
+	for i := 0; i < 20; i++ {
 		err = query.Transaction(c.db, func(tx *sql.Tx) error {
 			clusterTx.tx = tx
 			return f(clusterTx)
 		})
-		if err != nil {
-			// FIXME: we should bubble errors using errors.Wrap()
-			// instead, and check for sql.ErrBadConnection.
-			badConnection := strings.Contains(err.Error(), "bad connection")
-			leadershipLost := strings.Contains(err.Error(), "leadership lost")
-			if badConnection || leadershipLost {
-				logger.Debugf("Retry failed transaction")
-				time.Sleep(time.Second)
-				continue
-			}
+		if err != nil && IsRetriableError(err) {
+			logger.Debugf("Retry failed transaction")
+			time.Sleep(250 * time.Millisecond)
+			continue
 		}
 		break
 	}
@@ -277,7 +271,9 @@ func UpdateSchemasDotGo() error {
 	return nil
 }
 
-func IsDbLockedError(err error) bool {
+// IsRetriableError returns true if the given error might be transient and the
+// interaction can be safely retried.
+func IsRetriableError(err error) bool {
 	if err == nil {
 		return false
 	}
@@ -287,6 +283,16 @@ func IsDbLockedError(err error) bool {
 	if err.Error() == "database is locked" {
 		return true
 	}
+
+	// FIXME: we should bubble errors using errors.Wrap()
+	// instead, and check for err.Cause() == sql.ErrBadConnection.
+	if strings.Contains(err.Error(), "bad connection") {
+		return true
+	}
+	if strings.Contains(err.Error(), "leadership lost") {
+		return true
+	}
+
 	return false
 }
 
@@ -306,7 +312,7 @@ func begin(db *sql.DB) (*sql.Tx, error) {
 		if err == nil {
 			return tx, nil
 		}
-		if !IsDbLockedError(err) {
+		if !IsRetriableError(err) {
 			logger.Debugf("DbBegin: error %q", err)
 			return nil, err
 		}
@@ -324,7 +330,7 @@ func TxCommit(tx *sql.Tx) error {
 		if err == nil {
 			return nil
 		}
-		if !IsDbLockedError(err) {
+		if !IsRetriableError(err) {
 			logger.Debugf("Txcommit: error %q", err)
 			return err
 		}
@@ -345,7 +351,7 @@ func dbQueryRowScan(db *sql.DB, q string, args []interface{}, outargs []interfac
 		if isNoMatchError(err) {
 			return err
 		}
-		if !IsDbLockedError(err) {
+		if !IsRetriableError(err) {
 			return err
 		}
 		time.Sleep(30 * time.Millisecond)
@@ -362,7 +368,7 @@ func dbQuery(db *sql.DB, q string, args ...interface{}) (*sql.Rows, error) {
 		if err == nil {
 			return result, nil
 		}
-		if !IsDbLockedError(err) {
+		if !IsRetriableError(err) {
 			logger.Debugf("DbQuery: query %q error %q", q, err)
 			return nil, err
 		}
@@ -447,7 +453,7 @@ func queryScan(qi queryer, q string, inargs []interface{}, outfmt []interface{})
 		if err == nil {
 			return result, nil
 		}
-		if !IsDbLockedError(err) {
+		if !IsRetriableError(err) {
 			logger.Debugf("DbQuery: query %q error %q", q, err)
 			return nil, err
 		}
@@ -465,7 +471,7 @@ func exec(db *sql.DB, q string, args ...interface{}) (sql.Result, error) {
 		if err == nil {
 			return result, nil
 		}
-		if !IsDbLockedError(err) {
+		if !IsRetriableError(err) {
 			logger.Debugf("DbExec: query %q error %q", q, err)
 			return nil, err
 		}

From 2923445f2e6291f1c24158e5de7fa3e94412a2d9 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Mon, 30 Oct 2017 09:35:21 +0000
Subject: [PATCH 068/116] Limit open db connections to 1, to match the former
 exclusive mode

The node-level database is opened with _txlock=exclusive, which
doesn't quite work with dqlite. However limiting the number of open
connections to 1 has effectively the same semantics.

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/db/db.go     | 1 +
 lxd/db/images.go | 5 +++--
 2 files changed, 4 insertions(+), 2 deletions(-)

diff --git a/lxd/db/db.go b/lxd/db/db.go
index 59770699f..97d1f9173 100644
--- a/lxd/db/db.go
+++ b/lxd/db/db.go
@@ -170,6 +170,7 @@ func OpenCluster(name string, dialer grpcsql.Dialer, address string) (*Cluster,
 	cluster := &Cluster{
 		db: db,
 	}
+	db.SetMaxOpenConns(1)
 
 	// Figure out the ID of this node.
 	err = cluster.Transaction(func(tx *ClusterTx) error {
diff --git a/lxd/db/images.go b/lxd/db/images.go
index e69e8acb7..e71ef07ac 100644
--- a/lxd/db/images.go
+++ b/lxd/db/images.go
@@ -440,14 +440,15 @@ func (c *Cluster) ImageUpdate(id int, fname string, sz int64, public bool, autoU
 		return err
 	}
 
-	stmt, err = tx.Prepare(`INSERT INTO images_properties (image_id, type, key, value) VALUES (?, ?, ?, ?)`)
+	stmt2, err := tx.Prepare(`INSERT INTO images_properties (image_id, type, key, value) VALUES (?, ?, ?, ?)`)
 	if err != nil {
 		tx.Rollback()
 		return err
 	}
+	defer stmt2.Close()
 
 	for key, value := range properties {
-		_, err = stmt.Exec(id, 0, key, value)
+		_, err = stmt2.Exec(id, 0, key, value)
 		if err != nil {
 			tx.Rollback()
 			return err

From 2ce16da6276eafc9b1c8352e1f658b5b3cbb05cf Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Fri, 27 Oct 2017 13:21:25 +0000
Subject: [PATCH 069/116] Cluster notifications for network delete

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/api.go                |  7 +++++++
 lxd/api_1.0.go            |  2 +-
 lxd/networks.go           | 41 +++++++++++++++++++++++++++++++++++------
 test/suites/clustering.sh | 29 +++++++++++++++++++++++++----
 4 files changed, 68 insertions(+), 11 deletions(-)

diff --git a/lxd/api.go b/lxd/api.go
index ba6285ce9..825874f1b 100644
--- a/lxd/api.go
+++ b/lxd/api.go
@@ -98,3 +98,10 @@ func setCORSHeaders(rw http.ResponseWriter, req *http.Request, config *cluster.C
 		rw.Header().Set("Access-Control-Allow-Credentials", "true")
 	}
 }
+
+// Return true if this an API request coming from a cluster node that is
+// notifying us of some user-initiated API request that needs some action to be
+// taken on this node as well.
+func isClusterNotification(r *http.Request) bool {
+	return r.Header.Get("User-Agent") == "lxd-cluster-notifier"
+}
diff --git a/lxd/api_1.0.go b/lxd/api_1.0.go
index 27f123712..96d2631fb 100644
--- a/lxd/api_1.0.go
+++ b/lxd/api_1.0.go
@@ -181,7 +181,7 @@ func api10Put(d *Daemon, r *http.Request) Response {
 
 	// If this is a notification from a cluster node, just run the triggers
 	// for reacting to the values that changed.
-	if r.Header.Get("User-Agent") == "lxd-cluster-notifier" {
+	if isClusterNotification(r) {
 		changed := make(map[string]string)
 		for key, value := range req.Config {
 			changed[key] = value.(string)
diff --git a/lxd/networks.go b/lxd/networks.go
index 0b994187c..1f1c1b0d2 100644
--- a/lxd/networks.go
+++ b/lxd/networks.go
@@ -15,6 +15,8 @@ import (
 	"github.com/gorilla/mux"
 	log "github.com/lxc/lxd/shared/log15"
 
+	lxd "github.com/lxc/lxd/client"
+	"github.com/lxc/lxd/lxd/cluster"
 	"github.com/lxc/lxd/lxd/db"
 	"github.com/lxc/lxd/lxd/state"
 	"github.com/lxc/lxd/lxd/util"
@@ -237,8 +239,37 @@ func networkDelete(d *Daemon, r *http.Request) Response {
 	if err != nil {
 		return NotFound
 	}
+	if isClusterNotification(r) {
+		n.db = nil // We just want to delete the network from the system
+	} else {
+		// Sanity checks
+		if n.IsUsed() {
+			return BadRequest(fmt.Errorf("The network is currently in use"))
+		}
+	}
+
+	// If we're just handling a notification, we're done.
+	if n.db == nil {
+		return EmptySyncResponse
+	}
+
+	// Notify all other nodes. If any node is down, an error will be returned.
+	notifier, err := cluster.NewNotifier(d.State(), d.endpoints.NetworkCert(), cluster.NotifyAll)
+	if err != nil {
+		return SmartError(err)
+	}
+	err = notifier(func(client lxd.ContainerServer) error {
+		_, _, err := client.GetServer()
+		if err != nil {
+			return err
+		}
+		return client.DeleteNetwork(name)
+	})
+	if err != nil {
+		return SmartError(err)
+	}
 
-	// Attempt to delete the network
+	// Delete the network
 	err = n.Delete()
 	if err != nil {
 		return SmartError(err)
@@ -495,17 +526,15 @@ func (n *network) IsUsed() bool {
 }
 
 func (n *network) Delete() error {
-	// Sanity checks
-	if n.IsUsed() {
-		return fmt.Errorf("The network is currently in use")
-	}
-
 	// Bring the network down
 	if n.IsRunning() {
 		err := n.Stop()
 		if err != nil {
 			return err
 		}
+		if n.db == nil {
+			return nil
+		}
 	}
 
 	// Remove the network from the database
diff --git a/test/suites/clustering.sh b/test/suites/clustering.sh
index 2197ea439..7e963ff1f 100644
--- a/test/suites/clustering.sh
+++ b/test/suites/clustering.sh
@@ -1,12 +1,13 @@
 test_clustering() {
   setup_clustering_bridge
   prefix="lxd$$"
+  bridge="${prefix}"
 
   setup_clustering_netns 1
   LXD_ONE_DIR=$(mktemp -d -p "${TEST_DIR}" XXX)
   chmod +x "${LXD_ONE_DIR}"
-  ns="${prefix}1"
-  LXD_NETNS="${ns}" spawn_lxd "${LXD_ONE_DIR}" false
+  ns1="${prefix}1"
+  LXD_NETNS="${ns1}" spawn_lxd "${LXD_ONE_DIR}" false
   (
     set -e
     # shellcheck disable=SC2034
@@ -20,6 +21,12 @@ config:
 storage_pools:
 - name: data
   driver: dir
+networks:
+- name: $bridge
+  type: bridge
+  config:
+    ipv4.address: none
+    ipv6.address: none
 profiles:
 - name: default
   devices:
@@ -38,8 +45,8 @@ EOF
   setup_clustering_netns 2
   LXD_TWO_DIR=$(mktemp -d -p "${TEST_DIR}" XXX)
   chmod +x "${LXD_TWO_DIR}"
-  ns="${prefix}2"
-  LXD_NETNS="${ns}" spawn_lxd "${LXD_TWO_DIR}" false
+  ns2="${prefix}2"
+  LXD_NETNS="${ns2}" spawn_lxd "${LXD_TWO_DIR}" false
   (
     set -e
     # shellcheck disable=SC2034
@@ -52,6 +59,12 @@ config:
 storage_pools:
 - name: data
   driver: dir
+networks:
+- name: $bridge
+  type: bridge
+  config:
+    ipv4.address: none
+    ipv6.address: none
 profiles:
 - name: default
   devices:
@@ -67,6 +80,14 @@ cluster:
 EOF
   )
 
+  # The preseeded network bridge exists on all nodes.
+  ip netns exec "${ns1}" ip link show "${bridge}" > /dev/null
+  ip netns exec "${ns2}" ip link show "${bridge}" > /dev/null
+
+  # The preseeded network can be deleted from any node, other nodes
+  # are notified.
+  LXD_DIR="${LXD_TWO_DIR}" lxc network delete "${bridge}"
+
   LXD_DIR="${LXD_TWO_DIR}" lxd shutdown
   LXD_DIR="${LXD_ONE_DIR}" lxd shutdown
   sleep 2

From f132063487e4c1ec9dcfb90fbeed615f11e7e33b Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Mon, 30 Oct 2017 10:45:58 +0000
Subject: [PATCH 070/116] Add integration test exercising cluster config values
 changes

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 test/suites/clustering.sh | 5 +++++
 1 file changed, 5 insertions(+)

diff --git a/test/suites/clustering.sh b/test/suites/clustering.sh
index 7e963ff1f..5ef8c7b60 100644
--- a/test/suites/clustering.sh
+++ b/test/suites/clustering.sh
@@ -80,6 +80,11 @@ cluster:
 EOF
   )
 
+  # Configuration keys can be changed on any node.
+  LXD_DIR="${LXD_TWO_DIR}" lxc config set images.auto_update_interval 10
+  LXD_DIR="${LXD_ONE_DIR}" lxc info | grep -q 'images.auto_update_interval: "10"'
+  LXD_DIR="${LXD_TWO_DIR}" lxc info | grep -q 'images.auto_update_interval: "10"'
+
   # The preseeded network bridge exists on all nodes.
   ip netns exec "${ns1}" ip link show "${bridge}" > /dev/null
   ip netns exec "${ns2}" ip link show "${bridge}" > /dev/null

From ddfb4fdbee108c11483d5a1a2c3a5b9e3c988a54 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Tue, 31 Oct 2017 10:39:58 +0000
Subject: [PATCH 071/116] Make /internal/raft redirect to a known cluster node

In case the node handling the request is not a database node, redirect
to one of the database nodes it knows about. This makes it possible to
join the cluster also if the specified target node is not a database
node.

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/cluster/gateway.go      | 31 ++++++++++++++-
 test/includes/clustering.sh | 78 +++++++++++++++++++++++++++++++++++++
 test/main.sh                |  1 +
 test/suites/clustering.sh   | 94 +++++++++++++++------------------------------
 4 files changed, 139 insertions(+), 65 deletions(-)

diff --git a/lxd/cluster/gateway.go b/lxd/cluster/gateway.go
index 64c891b02..342006939 100644
--- a/lxd/cluster/gateway.go
+++ b/lxd/cluster/gateway.go
@@ -4,6 +4,7 @@ import (
 	"fmt"
 	"net"
 	"net/http"
+	"net/url"
 	"strconv"
 	"time"
 
@@ -132,10 +133,38 @@ func (g *Gateway) HandlerFuncs() map[string]http.HandlerFunc {
 		g.server.ServeHTTP(w, r)
 	}
 	raft := func(w http.ResponseWriter, r *http.Request) {
-		if g.raft == nil || g.raft.HandlerFunc() == nil {
+		// If we are not part of the raft cluster, reply with a
+		// redirect to one of the raft nodes that we know about.
+		if g.raft == nil {
+			var address string
+			err := g.db.Transaction(func(tx *db.NodeTx) error {
+				nodes, err := tx.RaftNodes()
+				if err != nil {
+					return err
+				}
+				address = nodes[0].Address
+				return nil
+			})
+			if err != nil {
+				http.Error(w, "500 failed to fetch raft nodes", http.StatusInternalServerError)
+				return
+			}
+			url := &url.URL{
+				Scheme:   "http",
+				Path:     r.URL.Path,
+				RawQuery: r.URL.RawQuery,
+				Host:     address,
+			}
+			http.Redirect(w, r, url.String(), http.StatusPermanentRedirect)
+			return
+		}
+
+		// If this node is not clustered return a 404.
+		if g.raft.HandlerFunc() == nil {
 			http.NotFound(w, r)
 			return
 		}
+
 		g.raft.HandlerFunc()(w, r)
 	}
 
diff --git a/test/includes/clustering.sh b/test/includes/clustering.sh
index 6ffda750c..a87af60df 100644
--- a/test/includes/clustering.sh
+++ b/test/includes/clustering.sh
@@ -70,3 +70,81 @@ teardown_clustering_netns() {
       ip netns delete "${ns}"
   done
 }
+
+spawn_lxd_and_bootstrap_cluster() {
+  set -e
+  ns="${1}"
+  bridge="${2}"
+  LXD_DIR="${3}"
+  LXD_NETNS="${ns}" spawn_lxd "${LXD_DIR}" false
+  (
+    set -e
+
+    cat <<EOF | lxd init --preseed
+config:
+  core.trust_password: sekret
+  core.https_address: 10.1.1.101:8443
+  images.auto_update_interval: 15
+storage_pools:
+- name: data
+  driver: dir
+networks:
+- name: $bridge
+  type: bridge
+  config:
+    ipv4.address: none
+    ipv6.address: none
+profiles:
+- name: default
+  devices:
+    root:
+      path: /
+      pool: data
+      type: disk
+cluster:
+  name: node1
+EOF
+  )
+}
+
+spawn_lxd_and_join_cluster() {
+  set -e
+  ns="${1}"
+  bridge="${2}"
+  cert="${3}"
+  index="${4}"
+  target="${5}"
+  LXD_DIR="${6}"
+
+  LXD_NETNS="${ns}" spawn_lxd "${LXD_DIR}" false
+  (
+    set -e
+
+    cat <<EOF | lxd init --preseed
+config:
+  core.https_address: 10.1.1.10${index}:8443
+  images.auto_update_interval: 15
+storage_pools:
+- name: data
+  driver: dir
+networks:
+- name: $bridge
+  type: bridge
+  config:
+    ipv4.address: none
+    ipv6.address: none
+profiles:
+- name: default
+  devices:
+    root:
+      path: /
+      pool: data
+      type: disk
+cluster:
+  name: node${index}
+  target_address: 10.1.1.10${target}:8443
+  target_password: sekret
+  target_cert: "$cert"
+EOF
+  )
+}
diff --git a/test/main.sh b/test/main.sh
index 39d818f3a..0fe316193 100755
--- a/test/main.sh
+++ b/test/main.sh
@@ -193,6 +193,7 @@ run_test test_resources "resources"
 run_test test_kernel_limits "kernel limits"
 run_test test_macaroon_auth "macaroon authentication"
 run_test test_console "console"
+run_test test_clustering "clustering"
 
 # shellcheck disable=SC2034
 TEST_RESULT=success
diff --git a/test/suites/clustering.sh b/test/suites/clustering.sh
index 5ef8c7b60..27812d889 100644
--- a/test/suites/clustering.sh
+++ b/test/suites/clustering.sh
@@ -7,78 +7,17 @@ test_clustering() {
   LXD_ONE_DIR=$(mktemp -d -p "${TEST_DIR}" XXX)
   chmod +x "${LXD_ONE_DIR}"
   ns1="${prefix}1"
-  LXD_NETNS="${ns1}" spawn_lxd "${LXD_ONE_DIR}" false
-  (
-    set -e
-    # shellcheck disable=SC2034
-    LXD_DIR=${LXD_ONE_DIR}
-
-  cat <<EOF | lxd init --preseed
-config:
-  core.trust_password: sekret
-  core.https_address: 10.1.1.101:8443
-  images.auto_update_interval: 15
-storage_pools:
-- name: data
-  driver: dir
-networks:
-- name: $bridge
-  type: bridge
-  config:
-    ipv4.address: none
-    ipv6.address: none
-profiles:
-- name: default
-  devices:
-    root:
-      path: /
-      pool: data
-      type: disk
-cluster:
-  name: one
-EOF
-  )
+  spawn_lxd_and_bootstrap_cluster "${ns1}" "${bridge}" "${LXD_ONE_DIR}"
 
   # Add a newline at the end of each line. YAML as weird rules..
   cert=$(sed ':a;N;$!ba;s/\n/\n\n/g' "${LXD_ONE_DIR}/server.crt")
 
+  # Spawn a second node
   setup_clustering_netns 2
   LXD_TWO_DIR=$(mktemp -d -p "${TEST_DIR}" XXX)
   chmod +x "${LXD_TWO_DIR}"
   ns2="${prefix}2"
-  LXD_NETNS="${ns2}" spawn_lxd "${LXD_TWO_DIR}" false
-  (
-    set -e
-    # shellcheck disable=SC2034
-    LXD_DIR=${LXD_TWO_DIR}
-
-  cat <<EOF | lxd init --preseed
-config:
-  core.https_address: 10.1.1.102:8443
-  images.auto_update_interval: 15
-storage_pools:
-- name: data
-  driver: dir
-networks:
-- name: $bridge
-  type: bridge
-  config:
-    ipv4.address: none
-    ipv6.address: none
-profiles:
-- name: default
-  devices:
-    root:
-      path: /
-      pool: data
-      type: disk
-cluster:
-  name: two
-  target_address: 10.1.1.101:8443
-  target_password: sekret
-  target_cert: "$cert"
-EOF
-  )
+  spawn_lxd_and_join_cluster "${ns2}" "${bridge}" "${cert}" 2 1 "${LXD_TWO_DIR}"
 
   # Configuration keys can be changed on any node.
   LXD_DIR="${LXD_TWO_DIR}" lxc config set images.auto_update_interval 10
@@ -89,13 +28,40 @@ EOF
   ip netns exec "${ns1}" ip link show "${bridge}" > /dev/null
   ip netns exec "${ns2}" ip link show "${bridge}" > /dev/null
 
+  # Spawn a third node, using the non-leader node2 as join target.
+  setup_clustering_netns 3
+  LXD_THREE_DIR=$(mktemp -d -p "${TEST_DIR}" XXX)
+  chmod +x "${LXD_THREE_DIR}"
+  ns3="${prefix}3"
+  spawn_lxd_and_join_cluster "${ns3}" "${bridge}" "${cert}" 3 2 "${LXD_THREE_DIR}"
+
+  # Spawn a fourth node, this will be a non-database node.
+  setup_clustering_netns 4
+  LXD_FOUR_DIR=$(mktemp -d -p "${TEST_DIR}" XXX)
+  chmod +x "${LXD_FOUR_DIR}"
+  ns4="${prefix}4"
+  spawn_lxd_and_join_cluster "${ns4}" "${bridge}" "${cert}" 4 1 "${LXD_FOUR_DIR}"
+
+  # Spawn a fifth node, using non-database node4 as join target.
+  setup_clustering_netns 5
+  LXD_FIVE_DIR=$(mktemp -d -p "${TEST_DIR}" XXX)
+  chmod +x "${LXD_FIVE_DIR}"
+  ns5="${prefix}5"
+  spawn_lxd_and_join_cluster "${ns5}" "${bridge}" "${cert}" 5 4 "${LXD_FIVE_DIR}"
+
   # The preseeded network can be deleted from any node, other nodes
   # are notified.
   LXD_DIR="${LXD_TWO_DIR}" lxc network delete "${bridge}"
 
+  LXD_DIR="${LXD_FIVE_DIR}" lxd shutdown
+  LXD_DIR="${LXD_FOUR_DIR}" lxd shutdown
+  LXD_DIR="${LXD_THREE_DIR}" lxd shutdown
   LXD_DIR="${LXD_TWO_DIR}" lxd shutdown
   LXD_DIR="${LXD_ONE_DIR}" lxd shutdown
   sleep 2
+  rm -f "${LXD_FIVE_DIR}/unix.socket"
+  rm -f "${LXD_FOUR_DIR}/unix.socket"
+  rm -f "${LXD_THREE_DIR}/unix.socket"
   rm -f "${LXD_TWO_DIR}/unix.socket"
   rm -f "${LXD_ONE_DIR}/unix.socket"
 }

From 9ea1ef96bcb8b791bfd83be76ffb768f6f89bc89 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Wed, 1 Nov 2017 10:47:22 +0000
Subject: [PATCH 072/116] Change bootstrap/join endpoint from /cluster to
 /cluster/nodes

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 client/lxd_cluster.go |  6 +++---
 lxd/api_1.0.go        |  1 +
 lxd/api_cluster.go    | 29 +++++++++++++++++++++--------
 3 files changed, 25 insertions(+), 11 deletions(-)

diff --git a/client/lxd_cluster.go b/client/lxd_cluster.go
index 20a107b8b..e8baae3a5 100644
--- a/client/lxd_cluster.go
+++ b/client/lxd_cluster.go
@@ -27,7 +27,7 @@ func (r *ProtocolLXD) GetCluster(password string) (*api.Cluster, error) {
 // BootstrapCluster requests to bootstrap a new cluster.
 func (r *ProtocolLXD) BootstrapCluster(name string) (*Operation, error) {
 	cluster := api.ClusterPost{Name: name}
-	op, _, err := r.queryOperation("POST", "/cluster", cluster, "")
+	op, _, err := r.queryOperation("POST", "/cluster/nodes", cluster, "")
 	if err != nil {
 		return nil, err
 	}
@@ -45,7 +45,7 @@ func (r *ProtocolLXD) AcceptNode(targetPassword, name, address string, schema, a
 		TargetPassword: targetPassword,
 	}
 	info := &api.ClusterNodeAccepted{}
-	_, err := r.queryStruct("POST", "/cluster", cluster, "", &info)
+	_, err := r.queryStruct("POST", "/cluster/nodes", cluster, "", &info)
 
 	if err != nil {
 		return nil, err
@@ -62,7 +62,7 @@ func (r *ProtocolLXD) JoinCluster(targetAddress, targetPassword, targetCert, nam
 		TargetCert:     targetCert,
 		Name:           name,
 	}
-	op, _, err := r.queryOperation("POST", "/cluster", cluster, "")
+	op, _, err := r.queryOperation("POST", "/cluster/nodes", cluster, "")
 	if err != nil {
 		return nil, err
 	}
diff --git a/lxd/api_1.0.go b/lxd/api_1.0.go
index 96d2631fb..3dcb9ddf3 100644
--- a/lxd/api_1.0.go
+++ b/lxd/api_1.0.go
@@ -60,6 +60,7 @@ var api10 = []Command{
 	storagePoolVolumeTypeCmd,
 	serverResourceCmd,
 	clusterCmd,
+	clusterNodesCmd,
 }
 
 func api10Get(d *Daemon, r *http.Request) Response {
diff --git a/lxd/api_cluster.go b/lxd/api_cluster.go
index 7d9d624c6..0cf11e697 100644
--- a/lxd/api_cluster.go
+++ b/lxd/api_cluster.go
@@ -15,8 +15,10 @@ import (
 	"github.com/pkg/errors"
 )
 
-var clusterCmd = Command{name: "cluster", untrustedGet: true, get: clusterGet, untrustedPost: true, post: clusterPost}
+var clusterCmd = Command{name: "cluster", untrustedGet: true, get: clusterGet}
 
+// Return information about the cluster, such as the current networks and
+// storage pools, typically needed when a new node is joining.
 func clusterGet(d *Daemon, r *http.Request) Response {
 	// If the client is not trusted, check that it's presenting the trust
 	// password.
@@ -62,7 +64,18 @@ func clusterGet(d *Daemon, r *http.Request) Response {
 	return SyncResponse(true, cluster)
 }
 
-func clusterPost(d *Daemon, r *http.Request) Response {
+var clusterNodesCmd = Command{name: "cluster/nodes", untrustedPost: true, post: clusterNodesPost}
+
+// Depending on the parameters passed and on local state this endpoint will
+// either:
+//
+// - bootstrap a new cluster (if this node is not clustered yet)
+// - request to join an existing cluster
+// - accept the request of a node to join the cluster
+//
+// The client is required to be trusted when bootstrapping a cluster or request
+// to join an existing cluster.
+func clusterNodesPost(d *Daemon, r *http.Request) Response {
 	req := api.ClusterPost{}
 
 	// Parse the request
@@ -85,20 +98,20 @@ func clusterPost(d *Daemon, r *http.Request) Response {
 		if !trusted {
 			return Forbidden
 		}
-		return clusterPostBootstrap(d, req)
+		return clusterNodesPostBootstrap(d, req)
 	} else if req.TargetAddress == "" {
-		return clusterPostAccept(d, req)
+		return clusterNodesPostAccept(d, req)
 	} else {
 		// Joining an existing cluster requires the client to be
 		// trusted.
 		if !trusted {
 			return Forbidden
 		}
-		return clusterPostJoin(d, req)
+		return clusterNodesPostJoin(d, req)
 	}
 }
 
-func clusterPostBootstrap(d *Daemon, req api.ClusterPost) Response {
+func clusterNodesPostBootstrap(d *Daemon, req api.ClusterPost) Response {
 	run := func(op *operation) error {
 		return cluster.Bootstrap(d.State(), d.gateway, req.Name)
 	}
@@ -113,7 +126,7 @@ func clusterPostBootstrap(d *Daemon, req api.ClusterPost) Response {
 	return OperationResponse(op)
 }
 
-func clusterPostAccept(d *Daemon, req api.ClusterPost) Response {
+func clusterNodesPostAccept(d *Daemon, req api.ClusterPost) Response {
 	// Accepting a node requires the client to provide the correct
 	// trust password.
 	secret, err := cluster.ConfigGetString(d.cluster, "core.trust_password")
@@ -138,7 +151,7 @@ func clusterPostAccept(d *Daemon, req api.ClusterPost) Response {
 	return SyncResponse(true, accepted)
 }
 
-func clusterPostJoin(d *Daemon, req api.ClusterPost) Response {
+func clusterNodesPostJoin(d *Daemon, req api.ClusterPost) Response {
 	// Make sure basic pre-conditions are ment.
 	if len(req.TargetCert) == 0 {
 		return BadRequest(fmt.Errorf("No target cluster node certificate provided"))

From 6083144c0e8de963cdbdd631119db2c0762fdf0d Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Wed, 1 Nov 2017 11:53:46 +0000
Subject: [PATCH 073/116] Add db APIs to remove a node

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/db/node.go                    | 74 +++++++++++++++++++++++++++++++++++++--
 lxd/db/node_test.go               | 57 +++++++++++++++++++++++++++++-
 lxd/db/transaction_export_test.go | 11 ++++++
 3 files changed, 139 insertions(+), 3 deletions(-)
 create mode 100644 lxd/db/transaction_export_test.go

diff --git a/lxd/db/node.go b/lxd/db/node.go
index e029d1b31..743a6bd9e 100644
--- a/lxd/db/node.go
+++ b/lxd/db/node.go
@@ -27,8 +27,8 @@ func (n NodeInfo) IsDown() bool {
 	return n.Heartbeat.Before(time.Now().Add(-20 * time.Second))
 }
 
-// Node returns the node with the given network address.
-func (c *ClusterTx) Node(address string) (NodeInfo, error) {
+// NodeByAddress returns the node with the given network address.
+func (c *ClusterTx) NodeByAddress(address string) (NodeInfo, error) {
 	null := NodeInfo{}
 	nodes, err := c.nodes("address=?", address)
 	if err != nil {
@@ -44,6 +44,23 @@ func (c *ClusterTx) Node(address string) (NodeInfo, error) {
 	}
 }
 
+// NodeByName returns the node with the given name.
+func (c *ClusterTx) NodeByName(name string) (NodeInfo, error) {
+	null := NodeInfo{}
+	nodes, err := c.nodes("name=?", name)
+	if err != nil {
+		return null, err
+	}
+	switch len(nodes) {
+	case 0:
+		return null, NoSuchObjectError
+	case 1:
+		return nodes[0], nil
+	default:
+		return null, fmt.Errorf("more than one node matches")
+	}
+}
+
 // Nodes returns all LXD nodes part of the cluster.
 //
 // If this LXD instance is not clustered, a list with a single node whose
@@ -104,6 +121,22 @@ func (c *ClusterTx) NodeUpdate(id int64, name string, address string) error {
 	return nil
 }
 
+// NodeRemove removes the node with the given id.
+func (c *ClusterTx) NodeRemove(id int64) error {
+	result, err := c.tx.Exec("DELETE FROM nodes WHERE id=?", id)
+	if err != nil {
+		return err
+	}
+	n, err := result.RowsAffected()
+	if err != nil {
+		return err
+	}
+	if n != 1 {
+		return fmt.Errorf("query deleted %d rows instead of 1", n)
+	}
+	return nil
+}
+
 // NodeHeartbeat updates the heartbeat column of the node with the given address.
 func (c *ClusterTx) NodeHeartbeat(address string, heartbeat time.Time) error {
 	stmt := "UPDATE nodes SET heartbeat=? WHERE address=?"
@@ -120,3 +153,40 @@ func (c *ClusterTx) NodeHeartbeat(address string, heartbeat time.Time) error {
 	}
 	return nil
 }
+
+// NodeIsEmpty returns true if the node with the given ID has no containers or
+// images associated with it.
+func (c *ClusterTx) NodeIsEmpty(id int64) (bool, error) {
+	n, err := query.Count(c.tx, "containers", "node_id=?", id)
+	if err != nil {
+		return false, errors.Wrapf(err, "failed to get containers count for node %d", id)
+	}
+	if n > 0 {
+		return false, nil
+	}
+
+	n, err = query.Count(c.tx, "images", "node_id=?", id)
+	if err != nil {
+		return false, errors.Wrapf(err, "failed to get images count for node %d", id)
+	}
+	if n > 0 {
+		return false, nil
+	}
+
+	return true, nil
+}
+
+// NodeClear removes any container or image associated with this node.
+func (c *ClusterTx) NodeClear(id int64) error {
+	_, err := c.tx.Exec("DELETE FROM containers WHERE node_id=?", id)
+	if err != nil {
+		return err
+	}
+
+	_, err = c.tx.Exec("DELETE FROM images WHERE node_id=?", id)
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
diff --git a/lxd/db/node_test.go b/lxd/db/node_test.go
index f59a08d1f..22ee430d9 100644
--- a/lxd/db/node_test.go
+++ b/lxd/db/node_test.go
@@ -24,12 +24,38 @@ func TestNodeAdd(t *testing.T) {
 	require.NoError(t, err)
 	require.Len(t, nodes, 2)
 
-	node, err := tx.Node("1.2.3.4:666")
+	node, err := tx.NodeByAddress("1.2.3.4:666")
+	require.NoError(t, err)
 	assert.Equal(t, "buzz", node.Name)
 	assert.Equal(t, "1.2.3.4:666", node.Address)
 	assert.Equal(t, cluster.SchemaVersion, node.Schema)
 	assert.Equal(t, len(version.APIExtensions), node.APIExtensions)
 	assert.False(t, node.IsDown())
+
+	node, err = tx.NodeByName("buzz")
+	require.NoError(t, err)
+	assert.Equal(t, "buzz", node.Name)
+}
+
+// Remove a new raft node.
+func TestNodeRemove(t *testing.T) {
+	tx, cleanup := db.NewTestClusterTx(t)
+	defer cleanup()
+
+	_, err := tx.NodeAdd("buzz", "1.2.3.4:666")
+	require.NoError(t, err)
+
+	id, err := tx.NodeAdd("rusp", "5.6.7.8:666")
+	require.NoError(t, err)
+
+	err = tx.NodeRemove(id)
+	require.NoError(t, err)
+
+	_, err = tx.NodeByName("buzz")
+	assert.NoError(t, err)
+
+	_, err = tx.NodeByName("rusp")
+	assert.Equal(t, db.NoSuchObjectError, err)
 }
 
 // Update the heartbeat of a node.
@@ -50,3 +76,32 @@ func TestNodeHeartbeat(t *testing.T) {
 	node := nodes[1]
 	assert.True(t, node.IsDown())
 }
+
+// A node is considered empty only if it has no containers and no images.
+func TestNodeIsEmpty(t *testing.T) {
+	tx, cleanup := db.NewTestClusterTx(t)
+	defer cleanup()
+
+	id, err := tx.NodeAdd("buzz", "1.2.3.4:666")
+	require.NoError(t, err)
+
+	empty, err := tx.NodeIsEmpty(id)
+	require.NoError(t, err)
+	assert.True(t, empty)
+
+	_, err = tx.Tx().Exec(`
+INSERT INTO containers (id, node_id, name, architecture, type) VALUES (1, ?, 'foo', 1, 1)
+`, id)
+	require.NoError(t, err)
+
+	empty, err = tx.NodeIsEmpty(id)
+	require.NoError(t, err)
+	assert.False(t, empty)
+
+	err = tx.NodeClear(id)
+	require.NoError(t, err)
+
+	empty, err = tx.NodeIsEmpty(id)
+	require.NoError(t, err)
+	assert.True(t, empty)
+}
diff --git a/lxd/db/transaction_export_test.go b/lxd/db/transaction_export_test.go
new file mode 100644
index 000000000..31884382f
--- /dev/null
+++ b/lxd/db/transaction_export_test.go
@@ -0,0 +1,11 @@
+package db
+
+import "database/sql"
+
+// Tx returns the low level database handle to the cluster transaction.
+//
+// FIXME: this is needed by tests that need to interact with entities that have
+// no high-level ClusterTx APIs yet (containers, images, etc.).
+func (c *ClusterTx) Tx() *sql.Tx {
+	return c.tx
+}

From f10b0edbedb59505e8d680739d9db68be8e14c1f Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Wed, 1 Nov 2017 11:54:05 +0000
Subject: [PATCH 074/116] Add cluster.Leave function implementing logic to
 leave a cluster.

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/cluster/membership.go      | 106 ++++++++++++++++++++++++++++++++++++++++-
 lxd/cluster/membership_test.go |  20 ++++++++
 2 files changed, 125 insertions(+), 1 deletion(-)

diff --git a/lxd/cluster/membership.go b/lxd/cluster/membership.go
index 1d0f6e4b2..6b5db4ffa 100644
--- a/lxd/cluster/membership.go
+++ b/lxd/cluster/membership.go
@@ -7,6 +7,8 @@ import (
 	"strconv"
 	"time"
 
+	"github.com/CanonicalLtd/raft-http"
+	"github.com/CanonicalLtd/raft-membership"
 	"github.com/hashicorp/raft"
 	"github.com/lxc/lxd/lxd/db"
 	"github.com/lxc/lxd/lxd/db/cluster"
@@ -296,7 +298,7 @@ func Join(state *state.State, gateway *Gateway, cert *shared.CertInfo, name stri
 	// the new gRPC network connection. Also, update the storage_pools and
 	// networks tables with our local configuration.
 	err = state.Cluster.Transaction(func(tx *db.ClusterTx) error {
-		node, err := tx.Node(address)
+		node, err := tx.NodeByAddress(address)
 		if err != nil {
 			return errors.Wrap(err, "failed to get ID of joining node")
 		}
@@ -345,6 +347,86 @@ func Join(state *state.State, gateway *Gateway, cert *shared.CertInfo, name stri
 	return nil
 }
 
+// Leave a cluster.
+//
+// If the force flag is true, the node will be removed even if it still has
+// containers and images.
+//
+// Upon success, return the address of the leaving node.
+func Leave(state *state.State, gateway *Gateway, name string, force bool) (string, error) {
+	// Delete the node from the cluster and track its address.
+	var address string
+	err := state.Cluster.Transaction(func(tx *db.ClusterTx) error {
+		// Get the node (if it doesn't exists an error is returned).
+		node, err := tx.NodeByName(name)
+		if err != nil {
+			return err
+		}
+
+		// Check that the node is eligeable for leaving.
+		if !force {
+			err = membershipCheckClusterStateForLeave(tx, node.ID)
+		} else {
+			err = tx.NodeClear(node.ID)
+		}
+		if err != nil {
+			return err
+		}
+
+		// Actually remove the node from the cluster database.
+		err = tx.NodeRemove(node.ID)
+		if err != nil {
+			return err
+		}
+		address = node.Address
+		return nil
+	})
+	if err != nil {
+		return "", err
+	}
+
+	// If the node is a database node, leave the raft cluster too.
+	id := ""
+	target := ""
+	err = state.Node.Transaction(func(tx *db.NodeTx) error {
+		nodes, err := tx.RaftNodes()
+		if err != nil {
+			return err
+		}
+		for i, node := range nodes {
+			if node.Address == address {
+				id = strconv.Itoa(int(node.ID))
+				// Save the address of another database node,
+				// we'll use it to leave the raft cluster.
+				target = nodes[(i+1)%len(nodes)].Address
+				break
+			}
+		}
+		return nil
+	})
+	if err != nil {
+		return "", err
+	}
+
+	if target != "" {
+		logger.Info(
+			"Remove node from dqlite raft cluster",
+			log15.Ctx{"id": id, "address": address, "target": target})
+		dial, err := raftDial(gateway.cert)
+		if err != nil {
+			return "", err
+		}
+		err = rafthttp.ChangeMembership(
+			raftmembership.LeaveRequest, raftEndpoint, dial,
+			raft.ServerID(id), address, target, 5*time.Second)
+		if err != nil {
+			return "", err
+		}
+	}
+
+	return address, nil
+}
+
 // Check that node-related preconditions are met for bootstrapping or joining a
 // cluster.
 func membershipCheckNodeStateForBootstrapOrJoin(tx *db.NodeTx, address string) error {
@@ -414,6 +496,28 @@ func membershipCheckClusterStateForAccept(tx *db.ClusterTx, name string, address
 	return nil
 }
 
+// Check that cluster-related preconditions are met for leaving a cluster.
+func membershipCheckClusterStateForLeave(tx *db.ClusterTx, nodeID int64) error {
+	// Check that it has no containers or images.
+	empty, err := tx.NodeIsEmpty(nodeID)
+	if err != nil {
+		return err
+	}
+	if !empty {
+		return fmt.Errorf("node has containers or images")
+	}
+
+	// Check that it's not the last node.
+	nodes, err := tx.Nodes()
+	if err != nil {
+		return err
+	}
+	if len(nodes) == 1 {
+		return fmt.Errorf("node is the only node in the cluster")
+	}
+	return nil
+}
+
 // Check that there is no left-over cluster certificate in the LXD var dir of
 // this node.
 func membershipCheckNoLeftoverClusterCert(dir string) error {
diff --git a/lxd/cluster/membership_test.go b/lxd/cluster/membership_test.go
index fd4489ac0..4cc58d012 100644
--- a/lxd/cluster/membership_test.go
+++ b/lxd/cluster/membership_test.go
@@ -310,6 +310,26 @@ func TestJoin(t *testing.T) {
 	assert.Equal(t, targetAddress, nodes[0].Address)
 	assert.Equal(t, int64(2), nodes[1].ID)
 	assert.Equal(t, address, nodes[1].Address)
+
+	// Leave the cluster.
+	leaving, err := cluster.Leave(state, gateway, "rusp", false /* force */)
+	require.NoError(t, err)
+	assert.Equal(t, address, leaving)
+
+	// The node has gone from the cluster db.
+	err = targetState.Cluster.Transaction(func(tx *db.ClusterTx) error {
+		nodes, err := tx.Nodes()
+		require.NoError(t, err)
+		assert.Len(t, nodes, 1)
+		return nil
+	})
+	require.NoError(t, err)
+
+	// The node has gone from the raft cluster.
+	raft := targetGateway.Raft()
+	future := raft.GetConfiguration()
+	require.NoError(t, future.Error())
+	assert.Len(t, future.Configuration().Servers, 1)
 }
 
 // Helper for setting fixtures for Bootstrap tests.

From 6ef3be18bc8c3c5993ad0748172bee44e39f6dc5 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Wed, 1 Nov 2017 14:06:13 +0000
Subject: [PATCH 075/116] Add REST API for leaving a cluster

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 client/interfaces.go    |  1 +
 client/lxd_cluster.go   | 15 +++++++++
 lxd/api_1.0.go          |  1 +
 lxd/api_cluster.go      | 88 ++++++++++++++++++++++++++++++++++++++++++++++++-
 lxd/api_cluster_test.go | 17 ++++++++++
 lxd/cluster/gateway.go  | 25 ++++++++++++++
 6 files changed, 146 insertions(+), 1 deletion(-)

diff --git a/client/interfaces.go b/client/interfaces.go
index 277f576f8..30c1fbd1c 100644
--- a/client/interfaces.go
+++ b/client/interfaces.go
@@ -165,6 +165,7 @@ type ContainerServer interface {
 	BootstrapCluster(name string) (op *Operation, err error)
 	AcceptNode(targetPassword, name, address string, schema, api int) (info *api.ClusterNodeAccepted, err error)
 	JoinCluster(targetAddress, targetPassword, targetCert, name string) (op *Operation, err error)
+	LeaveCluster(name string, force bool) (op *Operation, err error)
 
 	// Internal functions (for internal use)
 	RawQuery(method string, path string, data interface{}, queryETag string) (resp *api.Response, ETag string, err error)
diff --git a/client/lxd_cluster.go b/client/lxd_cluster.go
index e8baae3a5..7afc29fef 100644
--- a/client/lxd_cluster.go
+++ b/client/lxd_cluster.go
@@ -69,3 +69,18 @@ func (r *ProtocolLXD) JoinCluster(targetAddress, targetPassword, targetCert, nam
 
 	return op, nil
 }
+
+// LeaveCluster makes the given node leave the cluster (gracefully or not,
+// depending on the force flag).
+func (r *ProtocolLXD) LeaveCluster(name string, force bool) (*Operation, error) {
+	params := ""
+	if force {
+		params += "?force=1"
+	}
+	url := fmt.Sprintf("/cluster/nodes/%s%s", name, params)
+	op, _, err := r.queryOperation("DELETE", url, nil, "")
+	if err != nil {
+		return nil, err
+	}
+	return op, nil
+}
diff --git a/lxd/api_1.0.go b/lxd/api_1.0.go
index 3dcb9ddf3..9111682f7 100644
--- a/lxd/api_1.0.go
+++ b/lxd/api_1.0.go
@@ -61,6 +61,7 @@ var api10 = []Command{
 	serverResourceCmd,
 	clusterCmd,
 	clusterNodesCmd,
+	clusterNodeCmd,
 }
 
 func api10Get(d *Daemon, r *http.Request) Response {
diff --git a/lxd/api_cluster.go b/lxd/api_cluster.go
index 0cf11e697..400f3a50f 100644
--- a/lxd/api_cluster.go
+++ b/lxd/api_cluster.go
@@ -4,18 +4,23 @@ import (
 	"encoding/json"
 	"fmt"
 	"net/http"
+	"os"
+	"path/filepath"
+	"strconv"
 
+	"github.com/gorilla/mux"
 	lxd "github.com/lxc/lxd/client"
 	"github.com/lxc/lxd/lxd/cluster"
 	"github.com/lxc/lxd/lxd/db"
 	"github.com/lxc/lxd/lxd/node"
 	"github.com/lxc/lxd/lxd/util"
+	"github.com/lxc/lxd/shared"
 	"github.com/lxc/lxd/shared/api"
 	"github.com/lxc/lxd/shared/version"
 	"github.com/pkg/errors"
 )
 
-var clusterCmd = Command{name: "cluster", untrustedGet: true, get: clusterGet}
+var clusterCmd = Command{name: "cluster", untrustedGet: true, get: clusterGet, delete: clusterDelete}
 
 // Return information about the cluster, such as the current networks and
 // storage pools, typically needed when a new node is joining.
@@ -64,6 +69,34 @@ func clusterGet(d *Daemon, r *http.Request) Response {
 	return SyncResponse(true, cluster)
 }
 
+// Disable clustering on a node.
+func clusterDelete(d *Daemon, r *http.Request) Response {
+	// Update our TLS configuration using our original certificate.
+	for _, suffix := range []string{"crt", "key", "ca"} {
+		path := filepath.Join(d.os.VarDir, "cluster."+suffix)
+		if !shared.PathExists(path) {
+			continue
+		}
+		err := os.Remove(path)
+		if err != nil {
+			return InternalError(err)
+		}
+	}
+	cert, err := util.LoadCert(d.os.VarDir)
+	if err != nil {
+		return InternalError(errors.Wrap(err, "failed to parse node certificate"))
+	}
+
+	// Reset the cluster database and make it local to this node.
+	d.endpoints.NetworkUpdateCert(cert)
+	err = d.gateway.Reset(cert)
+	if err != nil {
+		return SmartError(err)
+	}
+
+	return EmptySyncResponse
+}
+
 var clusterNodesCmd = Command{name: "cluster/nodes", untrustedPost: true, post: clusterNodesPost}
 
 // Depending on the parameters passed and on local state this endpoint will
@@ -215,3 +248,56 @@ func clusterNodesPostJoin(d *Daemon, req api.ClusterPost) Response {
 
 	return OperationResponse(op)
 }
+
+var clusterNodeCmd = Command{name: "cluster/nodes/{name}", delete: clusterNodeDelete}
+
+func clusterNodeDelete(d *Daemon, r *http.Request) Response {
+	force, err := strconv.Atoi(r.FormValue("force"))
+	if err != nil {
+		force = 0
+	}
+
+	name := mux.Vars(r)["name"]
+	address, err := cluster.Leave(d.State(), d.gateway, name, force == 1)
+	if err != nil {
+		return SmartError(err)
+	}
+
+	var run func(op *operation) error
+
+	if force == 1 {
+		// If the force flag is on, the returned operation is a no-op.
+		run = func(op *operation) error {
+			return nil
+		}
+
+	} else {
+		// Try to gracefully disable clustering on the target node.
+		cert := d.endpoints.NetworkCert()
+		args := &lxd.ConnectionArgs{
+			TLSServerCert: string(cert.PublicKey()),
+			TLSClientCert: string(cert.PublicKey()),
+			TLSClientKey:  string(cert.PrivateKey()),
+		}
+		run = func(op *operation) error {
+			// First request for this node to be added to the list of
+			// cluster nodes.
+			client, err := lxd.ConnectLXD(fmt.Sprintf("https://%s", address), args)
+			if err != nil {
+				return err
+			}
+			_, _, err = client.RawQuery("DELETE", "/1.0/cluster", nil, "")
+			return err
+		}
+	}
+
+	resources := map[string][]string{}
+	resources["cluster"] = []string{}
+
+	op, err := operationCreate(operationClassTask, resources, nil, run, nil, nil)
+	if err != nil {
+		return InternalError(err)
+	}
+
+	return OperationResponse(op)
+}
diff --git a/lxd/api_cluster_test.go b/lxd/api_cluster_test.go
index 55ba594ac..e4b58bb33 100644
--- a/lxd/api_cluster_test.go
+++ b/lxd/api_cluster_test.go
@@ -136,6 +136,23 @@ func TestCluster_Failover(t *testing.T) {
 	}
 }
 
+// A node can leave a cluster gracefully.
+func TestCluster_Leave(t *testing.T) {
+	if testing.Short() {
+		t.Skip("skipping cluster leave test in short mode.")
+	}
+	daemons, cleanup := newDaemons(t, 2)
+	defer cleanup()
+
+	f := clusterFixture{t: t}
+	f.FormCluster(daemons)
+
+	client := f.ClientUnix(daemons[1])
+	op, err := client.LeaveCluster("rusp-0", false)
+	require.NoError(t, err)
+	assert.NoError(t, op.Wait())
+}
+
 // Test helper for cluster-related APIs.
 type clusterFixture struct {
 	t       *testing.T
diff --git a/lxd/cluster/gateway.go b/lxd/cluster/gateway.go
index 342006939..ae5aa01b9 100644
--- a/lxd/cluster/gateway.go
+++ b/lxd/cluster/gateway.go
@@ -5,6 +5,8 @@ import (
 	"net"
 	"net/http"
 	"net/url"
+	"os"
+	"path/filepath"
 	"strconv"
 	"time"
 
@@ -238,6 +240,29 @@ func (g *Gateway) Shutdown() error {
 	return g.raft.Shutdown()
 }
 
+// Reset the gateway, shutting it down and starting against from scratch using
+// the given certificate.
+//
+// This is used when disabling clustering on a node.
+func (g *Gateway) Reset(cert *shared.CertInfo) error {
+	err := g.Shutdown()
+	if err != nil {
+		return err
+	}
+	err = os.RemoveAll(filepath.Join(g.db.Dir(), "raft"))
+	if err != nil {
+		return err
+	}
+	err = g.db.Transaction(func(tx *db.NodeTx) error {
+		return tx.RaftNodesReplace(nil)
+	})
+	if err != nil {
+		return err
+	}
+	g.cert = cert
+	return g.init()
+}
+
 // Initialize the gateway, creating a new raft factory and gRPC server (if this
 // node is a database node), and a gRPC dialer.
 func (g *Gateway) init() error {

From 371b173f1b6f621de4125a299c01947ed64082b1 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Mon, 6 Nov 2017 11:54:08 +0000
Subject: [PATCH 076/116] Add lxc cluster remove command

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxc/cluster.go            | 74 +++++++++++++++++++++++++++++++++++++++++++++++
 lxc/main.go               |  1 +
 po/de.po                  | 34 +++++++++++++++++-----
 po/el.po                  | 34 +++++++++++++++++-----
 po/fr.po                  | 34 +++++++++++++++++-----
 po/id.po                  | 34 +++++++++++++++++-----
 po/it.po                  | 34 +++++++++++++++++-----
 po/ja.po                  | 34 +++++++++++++++++-----
 po/lxd.pot                | 33 ++++++++++++++++-----
 po/nb_NO.po               | 34 +++++++++++++++++-----
 po/nl.po                  | 34 +++++++++++++++++-----
 po/pt_BR.po               | 34 +++++++++++++++++-----
 po/ru.po                  | 34 +++++++++++++++++-----
 po/sr.po                  | 34 +++++++++++++++++-----
 po/sv.po                  | 34 +++++++++++++++++-----
 po/tr.po                  | 34 +++++++++++++++++-----
 po/zh.po                  | 34 +++++++++++++++++-----
 po/zh_Hans.po             | 34 +++++++++++++++++-----
 test/suites/clustering.sh | 19 ++++++++++--
 19 files changed, 522 insertions(+), 115 deletions(-)
 create mode 100644 lxc/cluster.go

diff --git a/lxc/cluster.go b/lxc/cluster.go
new file mode 100644
index 000000000..7abfaa284
--- /dev/null
+++ b/lxc/cluster.go
@@ -0,0 +1,74 @@
+package main
+
+import (
+	"fmt"
+
+	"github.com/lxc/lxd/lxc/config"
+	"github.com/lxc/lxd/shared/gnuflag"
+	"github.com/lxc/lxd/shared/i18n"
+)
+
+type clusterCmd struct {
+	force bool
+}
+
+func (c *clusterCmd) usage() string {
+	return i18n.G(
+		`Usage: lxc cluster <subcommand> [options]
+
+Manage cluster nodes.
+
+*Cluster nodes*
+lxc cluster remove <node> [--force]
+    Remove a node from the cluster.`)
+}
+
+func (c *clusterCmd) flags() {
+	gnuflag.BoolVar(&c.force, "force", false, i18n.G("Force removing a node, even if degraded"))
+}
+
+func (c *clusterCmd) showByDefault() bool {
+	return true
+}
+
+func (c *clusterCmd) run(conf *config.Config, args []string) error {
+	if len(args) < 1 {
+		return errUsage
+	}
+
+	if args[0] == "remove" {
+		return c.doClusterNodeRemove(conf, args)
+	}
+
+	return nil
+}
+
+func (c *clusterCmd) doClusterNodeRemove(conf *config.Config, args []string) error {
+	if len(args) < 2 {
+		return errArgs
+	}
+
+	// [[lxc cluster]] remove production:bionic-1
+	remote, name, err := conf.ParseRemote(args[1])
+	if err != nil {
+		return err
+	}
+
+	client, err := conf.GetContainerServer(remote)
+	if err != nil {
+		return err
+	}
+
+	op, err := client.LeaveCluster(name, c.force)
+	if err != nil {
+		return err
+	}
+
+	err = op.Wait()
+	if err != nil {
+		return nil
+	}
+
+	fmt.Printf(i18n.G("Node %s removed")+"\n", name)
+	return nil
+}
diff --git a/lxc/main.go b/lxc/main.go
index 0e6575363..6133a44c1 100644
--- a/lxc/main.go
+++ b/lxc/main.go
@@ -213,6 +213,7 @@ type command interface {
 }
 
 var commands = map[string]command{
+	"cluster":   &clusterCmd{},
 	"config":    &configCmd{},
 	"console":   &consoleCmd{},
 	"copy":      &copyCmd{},
diff --git a/po/de.po b/po/de.po
index 146750977..8f14e96da 100644
--- a/po/de.po
+++ b/po/de.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: LXD\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2017-11-30 21:51-0800\n"
+"POT-Creation-Date: 2017-12-04 08:56+0000\n"
 "PO-Revision-Date: 2017-02-14 17:11+0000\n"
 "Last-Translator: Tim Rose <tim at netlope.de>\n"
 "Language-Team: German <https://hosted.weblate.org/projects/linux-containers/"
@@ -654,6 +654,10 @@ msgstr "Fingerabdruck: %s\n"
 msgid "Force pseudo-terminal allocation"
 msgstr ""
 
+#: lxc/cluster.go:27
+msgid "Force removing a node, even if degraded"
+msgstr ""
+
 #: lxc/action.go:46 lxc/action.go:47
 #, fuzzy
 msgid "Force the container to shutdown"
@@ -910,6 +914,11 @@ msgstr "Kein Zertifikat für diese Verbindung"
 msgid "No fingerprint specified."
 msgstr "Kein Fingerabdruck angegeben."
 
+#: lxc/cluster.go:72
+#, fuzzy, c-format
+msgid "Node %s removed"
+msgstr "Gerät %s wurde von %s entfernt\n"
+
 #: lxc/storage.go:355 lxc/storage.go:448
 msgid "Only \"custom\" volumes can be attached to containers."
 msgstr ""
@@ -977,7 +986,7 @@ msgstr "Alternatives config Verzeichnis."
 msgid "Path to an alternate server directory"
 msgstr "Alternatives config Verzeichnis."
 
-#: lxc/main.go:238
+#: lxc/main.go:239
 #, fuzzy
 msgid "Pause containers."
 msgstr "kann nicht zum selben Container Namen kopieren"
@@ -1116,7 +1125,7 @@ msgstr ""
 msgid "Resources:"
 msgstr ""
 
-#: lxc/main.go:246
+#: lxc/main.go:247
 #, fuzzy
 msgid "Restart containers."
 msgstr "kann nicht zum selben Container Namen kopieren"
@@ -1231,7 +1240,7 @@ msgstr "Anhalten des Containers fehlgeschlagen!"
 msgid "Source:"
 msgstr ""
 
-#: lxc/main.go:256
+#: lxc/main.go:257
 #, fuzzy
 msgid "Start containers."
 msgstr "kann nicht zum selben Container Namen kopieren"
@@ -1246,7 +1255,7 @@ msgstr ""
 msgid "Status: %s"
 msgstr ""
 
-#: lxc/main.go:262
+#: lxc/main.go:263
 #, fuzzy
 msgid "Stop containers."
 msgstr "Anhalten des Containers fehlgeschlagen!"
@@ -1459,6 +1468,17 @@ msgstr ""
 "Benutzung: lxc [Unterbefehl] [Optionen]\n"
 "Verfügbare Befehle:\n"
 
+#: lxc/cluster.go:16
+msgid ""
+"Usage: lxc cluster <subcommand> [options]\n"
+"\n"
+"Manage cluster nodes.\n"
+"\n"
+"*Cluster nodes*\n"
+"lxc cluster remove <node> [--force]\n"
+"    Remove a node from the cluster."
+msgstr ""
+
 #: lxc/config.go:85
 #, fuzzy
 msgid ""
@@ -2538,7 +2558,7 @@ msgstr ""
 msgid "ok (y/n)?"
 msgstr "OK (y/n)? "
 
-#: lxc/main.go:366 lxc/main.go:370
+#: lxc/main.go:367 lxc/main.go:371
 #, c-format
 msgid "processing aliases failed %s\n"
 msgstr ""
@@ -2580,7 +2600,7 @@ msgstr ""
 msgid "taken at %s"
 msgstr ""
 
-#: lxc/main.go:297
+#: lxc/main.go:298
 msgid "wrong number of subcommand arguments"
 msgstr "falsche Anzahl an Parametern für Unterbefehl"
 
diff --git a/po/el.po b/po/el.po
index e9eb9b386..5c0d23963 100644
--- a/po/el.po
+++ b/po/el.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: lxd\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2017-11-30 21:51-0800\n"
+"POT-Creation-Date: 2017-12-04 08:56+0000\n"
 "PO-Revision-Date: 2017-02-14 08:00+0000\n"
 "Last-Translator: Simos Xenitellis <simos.65 at gmail.com>\n"
 "Language-Team: Greek <https://hosted.weblate.org/projects/linux-containers/"
@@ -534,6 +534,10 @@ msgstr ""
 msgid "Force pseudo-terminal allocation"
 msgstr ""
 
+#: lxc/cluster.go:27
+msgid "Force removing a node, even if degraded"
+msgstr ""
+
 #: lxc/action.go:46 lxc/action.go:47
 msgid "Force the container to shutdown"
 msgstr ""
@@ -780,6 +784,11 @@ msgstr ""
 msgid "No fingerprint specified."
 msgstr ""
 
+#: lxc/cluster.go:72
+#, c-format
+msgid "Node %s removed"
+msgstr ""
+
 #: lxc/storage.go:355 lxc/storage.go:448
 msgid "Only \"custom\" volumes can be attached to containers."
 msgstr ""
@@ -845,7 +854,7 @@ msgstr ""
 msgid "Path to an alternate server directory"
 msgstr ""
 
-#: lxc/main.go:238
+#: lxc/main.go:239
 msgid "Pause containers."
 msgstr ""
 
@@ -980,7 +989,7 @@ msgstr ""
 msgid "Resources:"
 msgstr ""
 
-#: lxc/main.go:246
+#: lxc/main.go:247
 msgid "Restart containers."
 msgstr ""
 
@@ -1091,7 +1100,7 @@ msgstr ""
 msgid "Source:"
 msgstr ""
 
-#: lxc/main.go:256
+#: lxc/main.go:257
 msgid "Start containers."
 msgstr ""
 
@@ -1105,7 +1114,7 @@ msgstr ""
 msgid "Status: %s"
 msgstr ""
 
-#: lxc/main.go:262
+#: lxc/main.go:263
 msgid "Stop containers."
 msgstr ""
 
@@ -1303,6 +1312,17 @@ msgstr ""
 msgid "Usage: lxc <command> [options]"
 msgstr ""
 
+#: lxc/cluster.go:16
+msgid ""
+"Usage: lxc cluster <subcommand> [options]\n"
+"\n"
+"Manage cluster nodes.\n"
+"\n"
+"*Cluster nodes*\n"
+"lxc cluster remove <node> [--force]\n"
+"    Remove a node from the cluster."
+msgstr ""
+
 #: lxc/config.go:85
 msgid ""
 "Usage: lxc config <subcommand> [options]\n"
@@ -2219,7 +2239,7 @@ msgstr ""
 msgid "ok (y/n)?"
 msgstr ""
 
-#: lxc/main.go:366 lxc/main.go:370
+#: lxc/main.go:367 lxc/main.go:371
 #, c-format
 msgid "processing aliases failed %s\n"
 msgstr ""
@@ -2261,7 +2281,7 @@ msgstr ""
 msgid "taken at %s"
 msgstr ""
 
-#: lxc/main.go:297
+#: lxc/main.go:298
 msgid "wrong number of subcommand arguments"
 msgstr ""
 
diff --git a/po/fr.po b/po/fr.po
index fe85c60b8..000b40209 100644
--- a/po/fr.po
+++ b/po/fr.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: LXD\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2017-11-30 21:51-0800\n"
+"POT-Creation-Date: 2017-12-04 08:56+0000\n"
 "PO-Revision-Date: 2017-10-26 15:46+0000\n"
 "Last-Translator: Alban Vidal <alban.vidal at zordhak.fr>\n"
 "Language-Team: French <https://hosted.weblate.org/projects/linux-containers/"
@@ -638,6 +638,10 @@ msgstr "Empreinte : %s"
 msgid "Force pseudo-terminal allocation"
 msgstr "Forcer l'allocation de pseudo-terminal "
 
+#: lxc/cluster.go:27
+msgid "Force removing a node, even if degraded"
+msgstr ""
+
 #: lxc/action.go:46 lxc/action.go:47
 msgid "Force the container to shutdown"
 msgstr "Forcer le conteneur à s'arrêter"
@@ -894,6 +898,11 @@ msgstr "Aucun périphérique existant pour ce réseau"
 msgid "No fingerprint specified."
 msgstr "Aucune empreinte n'a été indiquée."
 
+#: lxc/cluster.go:72
+#, fuzzy, c-format
+msgid "Node %s removed"
+msgstr "Profil %s supprimé de %s"
+
 #: lxc/storage.go:355 lxc/storage.go:448
 msgid "Only \"custom\" volumes can be attached to containers."
 msgstr "Seul les volumes \"personnalisés\" peuvent être attaché aux conteneurs"
@@ -959,7 +968,7 @@ msgstr "Chemin vers un dossier de configuration client alternatif"
 msgid "Path to an alternate server directory"
 msgstr "Chemin vers un dossier de configuration serveur alternatif"
 
-#: lxc/main.go:238
+#: lxc/main.go:239
 #, fuzzy
 msgid "Pause containers."
 msgstr "Création du conteneur"
@@ -1096,7 +1105,7 @@ msgstr "Requérir une confirmation de l'utilisateur"
 msgid "Resources:"
 msgstr "Ressources :"
 
-#: lxc/main.go:246
+#: lxc/main.go:247
 #, fuzzy
 msgid "Restart containers."
 msgstr "Création du conteneur"
@@ -1212,7 +1221,7 @@ msgstr "L'arrêt du conteneur a échoué !"
 msgid "Source:"
 msgstr "Source :"
 
-#: lxc/main.go:256
+#: lxc/main.go:257
 #, fuzzy
 msgid "Start containers."
 msgstr "Création du conteneur"
@@ -1227,7 +1236,7 @@ msgstr "Démarrage de %s"
 msgid "Status: %s"
 msgstr "État : %s"
 
-#: lxc/main.go:262
+#: lxc/main.go:263
 #, fuzzy
 msgid "Stop containers."
 msgstr "L'arrêt du conteneur a échoué !"
@@ -1441,6 +1450,17 @@ msgstr ""
 msgid "Usage: lxc <command> [options]"
 msgstr "Utilisation : lxc <commande> [options]"
 
+#: lxc/cluster.go:16
+msgid ""
+"Usage: lxc cluster <subcommand> [options]\n"
+"\n"
+"Manage cluster nodes.\n"
+"\n"
+"*Cluster nodes*\n"
+"lxc cluster remove <node> [--force]\n"
+"    Remove a node from the cluster."
+msgstr ""
+
 #: lxc/config.go:85
 #, fuzzy
 msgid ""
@@ -2821,7 +2841,7 @@ msgstr "non"
 msgid "ok (y/n)?"
 msgstr "ok (y/n) ?"
 
-#: lxc/main.go:366 lxc/main.go:370
+#: lxc/main.go:367 lxc/main.go:371
 #, c-format
 msgid "processing aliases failed %s\n"
 msgstr "l'analyse des alias a échoué %s\n"
@@ -2863,7 +2883,7 @@ msgstr "sans suivi d'état"
 msgid "taken at %s"
 msgstr "pris à %s"
 
-#: lxc/main.go:297
+#: lxc/main.go:298
 msgid "wrong number of subcommand arguments"
 msgstr "nombre d'arguments incorrect pour la sous-comande"
 
diff --git a/po/id.po b/po/id.po
index 3d4557f3a..959c758f5 100644
--- a/po/id.po
+++ b/po/id.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: lxd\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2017-11-30 21:51-0800\n"
+"POT-Creation-Date: 2017-12-04 08:56+0000\n"
 "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
 "Last-Translator: Automatically generated\n"
 "Language-Team: none\n"
@@ -529,6 +529,10 @@ msgstr ""
 msgid "Force pseudo-terminal allocation"
 msgstr ""
 
+#: lxc/cluster.go:27
+msgid "Force removing a node, even if degraded"
+msgstr ""
+
 #: lxc/action.go:46 lxc/action.go:47
 msgid "Force the container to shutdown"
 msgstr ""
@@ -773,6 +777,11 @@ msgstr ""
 msgid "No fingerprint specified."
 msgstr ""
 
+#: lxc/cluster.go:72
+#, c-format
+msgid "Node %s removed"
+msgstr ""
+
 #: lxc/storage.go:355 lxc/storage.go:448
 msgid "Only \"custom\" volumes can be attached to containers."
 msgstr ""
@@ -838,7 +847,7 @@ msgstr ""
 msgid "Path to an alternate server directory"
 msgstr ""
 
-#: lxc/main.go:238
+#: lxc/main.go:239
 msgid "Pause containers."
 msgstr ""
 
@@ -973,7 +982,7 @@ msgstr ""
 msgid "Resources:"
 msgstr ""
 
-#: lxc/main.go:246
+#: lxc/main.go:247
 msgid "Restart containers."
 msgstr ""
 
@@ -1084,7 +1093,7 @@ msgstr ""
 msgid "Source:"
 msgstr ""
 
-#: lxc/main.go:256
+#: lxc/main.go:257
 msgid "Start containers."
 msgstr ""
 
@@ -1098,7 +1107,7 @@ msgstr ""
 msgid "Status: %s"
 msgstr ""
 
-#: lxc/main.go:262
+#: lxc/main.go:263
 msgid "Stop containers."
 msgstr ""
 
@@ -1296,6 +1305,17 @@ msgstr ""
 msgid "Usage: lxc <command> [options]"
 msgstr ""
 
+#: lxc/cluster.go:16
+msgid ""
+"Usage: lxc cluster <subcommand> [options]\n"
+"\n"
+"Manage cluster nodes.\n"
+"\n"
+"*Cluster nodes*\n"
+"lxc cluster remove <node> [--force]\n"
+"    Remove a node from the cluster."
+msgstr ""
+
 #: lxc/config.go:85
 msgid ""
 "Usage: lxc config <subcommand> [options]\n"
@@ -2212,7 +2232,7 @@ msgstr ""
 msgid "ok (y/n)?"
 msgstr ""
 
-#: lxc/main.go:366 lxc/main.go:370
+#: lxc/main.go:367 lxc/main.go:371
 #, c-format
 msgid "processing aliases failed %s\n"
 msgstr ""
@@ -2254,7 +2274,7 @@ msgstr ""
 msgid "taken at %s"
 msgstr ""
 
-#: lxc/main.go:297
+#: lxc/main.go:298
 msgid "wrong number of subcommand arguments"
 msgstr ""
 
diff --git a/po/it.po b/po/it.po
index 6fdfd6bf0..2632a015e 100644
--- a/po/it.po
+++ b/po/it.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: lxd\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2017-11-30 21:51-0800\n"
+"POT-Creation-Date: 2017-12-04 08:56+0000\n"
 "PO-Revision-Date: 2017-08-18 14:22+0000\n"
 "Last-Translator: Alberto Donato <alberto.donato at gmail.com>\n"
 "Language-Team: Italian <https://hosted.weblate.org/projects/linux-containers/"
@@ -555,6 +555,10 @@ msgstr ""
 msgid "Force pseudo-terminal allocation"
 msgstr ""
 
+#: lxc/cluster.go:27
+msgid "Force removing a node, even if degraded"
+msgstr ""
+
 #: lxc/action.go:46 lxc/action.go:47
 msgid "Force the container to shutdown"
 msgstr ""
@@ -799,6 +803,11 @@ msgstr ""
 msgid "No fingerprint specified."
 msgstr ""
 
+#: lxc/cluster.go:72
+#, c-format
+msgid "Node %s removed"
+msgstr ""
+
 #: lxc/storage.go:355 lxc/storage.go:448
 msgid "Only \"custom\" volumes can be attached to containers."
 msgstr ""
@@ -864,7 +873,7 @@ msgstr ""
 msgid "Path to an alternate server directory"
 msgstr ""
 
-#: lxc/main.go:238
+#: lxc/main.go:239
 msgid "Pause containers."
 msgstr ""
 
@@ -999,7 +1008,7 @@ msgstr ""
 msgid "Resources:"
 msgstr ""
 
-#: lxc/main.go:246
+#: lxc/main.go:247
 msgid "Restart containers."
 msgstr ""
 
@@ -1110,7 +1119,7 @@ msgstr ""
 msgid "Source:"
 msgstr ""
 
-#: lxc/main.go:256
+#: lxc/main.go:257
 msgid "Start containers."
 msgstr ""
 
@@ -1124,7 +1133,7 @@ msgstr ""
 msgid "Status: %s"
 msgstr ""
 
-#: lxc/main.go:262
+#: lxc/main.go:263
 msgid "Stop containers."
 msgstr ""
 
@@ -1322,6 +1331,17 @@ msgstr ""
 msgid "Usage: lxc <command> [options]"
 msgstr ""
 
+#: lxc/cluster.go:16
+msgid ""
+"Usage: lxc cluster <subcommand> [options]\n"
+"\n"
+"Manage cluster nodes.\n"
+"\n"
+"*Cluster nodes*\n"
+"lxc cluster remove <node> [--force]\n"
+"    Remove a node from the cluster."
+msgstr ""
+
 #: lxc/config.go:85
 msgid ""
 "Usage: lxc config <subcommand> [options]\n"
@@ -2238,7 +2258,7 @@ msgstr "no"
 msgid "ok (y/n)?"
 msgstr "ok (y/n)?"
 
-#: lxc/main.go:366 lxc/main.go:370
+#: lxc/main.go:367 lxc/main.go:371
 #, c-format
 msgid "processing aliases failed %s\n"
 msgstr "errore di processamento degli alias %s\n"
@@ -2280,7 +2300,7 @@ msgstr "senza stato"
 msgid "taken at %s"
 msgstr "salvato alle %s"
 
-#: lxc/main.go:297
+#: lxc/main.go:298
 msgid "wrong number of subcommand arguments"
 msgstr "numero errato di argomenti del sottocomando"
 
diff --git a/po/ja.po b/po/ja.po
index 32eda96cb..68a4db16d 100644
--- a/po/ja.po
+++ b/po/ja.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: LXD\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2017-11-30 21:51-0800\n"
+"POT-Creation-Date: 2017-12-04 08:56+0000\n"
 "PO-Revision-Date: 2017-09-28 20:29+0000\n"
 "Last-Translator: KATOH Yasufumi <karma at jazz.email.ne.jp>\n"
 "Language-Team: Japanese <https://hosted.weblate.org/projects/linux-"
@@ -537,6 +537,10 @@ msgstr "証明書のフィンガープリント: %s"
 msgid "Force pseudo-terminal allocation"
 msgstr "強制的に擬似端末を割り当てます"
 
+#: lxc/cluster.go:27
+msgid "Force removing a node, even if degraded"
+msgstr ""
+
 #: lxc/action.go:46 lxc/action.go:47
 msgid "Force the container to shutdown"
 msgstr "コンテナを強制シャットダウンします"
@@ -784,6 +788,11 @@ msgstr "このストレージボリュームに対するデバイスがありま
 msgid "No fingerprint specified."
 msgstr "フィンガープリントが指定されていません。"
 
+#: lxc/cluster.go:72
+#, fuzzy, c-format
+msgid "Node %s removed"
+msgstr "プロファイル %s が %s から削除されました"
+
 #: lxc/storage.go:355 lxc/storage.go:448
 msgid "Only \"custom\" volumes can be attached to containers."
 msgstr "\"カスタム\" のボリュームのみがコンテナにアタッチできます。"
@@ -849,7 +858,7 @@ msgstr "別のクライアント用設定ディレクトリ"
 msgid "Path to an alternate server directory"
 msgstr "別のサーバ用設定ディレクトリ"
 
-#: lxc/main.go:238
+#: lxc/main.go:239
 msgid "Pause containers."
 msgstr "コンテナを一時停止します。"
 
@@ -984,7 +993,7 @@ msgstr "ユーザの確認を要求する"
 msgid "Resources:"
 msgstr "リソース:"
 
-#: lxc/main.go:246
+#: lxc/main.go:247
 msgid "Restart containers."
 msgstr "コンテナを再起動します。"
 
@@ -1097,7 +1106,7 @@ msgstr "一部のコンテナで %s が失敗しました"
 msgid "Source:"
 msgstr "取得元:"
 
-#: lxc/main.go:256
+#: lxc/main.go:257
 msgid "Start containers."
 msgstr "コンテナを起動します。"
 
@@ -1111,7 +1120,7 @@ msgstr "%s を起動中"
 msgid "Status: %s"
 msgstr "状態: %s"
 
-#: lxc/main.go:262
+#: lxc/main.go:263
 msgid "Stop containers."
 msgstr "コンテナを停止します。"
 
@@ -1325,6 +1334,17 @@ msgstr ""
 msgid "Usage: lxc <command> [options]"
 msgstr "使い方: lxc <コマンド> [オプション]"
 
+#: lxc/cluster.go:16
+msgid ""
+"Usage: lxc cluster <subcommand> [options]\n"
+"\n"
+"Manage cluster nodes.\n"
+"\n"
+"*Cluster nodes*\n"
+"lxc cluster remove <node> [--force]\n"
+"    Remove a node from the cluster."
+msgstr ""
+
 #: lxc/config.go:85
 msgid ""
 "Usage: lxc config <subcommand> [options]\n"
@@ -2938,7 +2958,7 @@ msgstr ""
 msgid "ok (y/n)?"
 msgstr "ok (y/n)?"
 
-#: lxc/main.go:366 lxc/main.go:370
+#: lxc/main.go:367 lxc/main.go:371
 #, c-format
 msgid "processing aliases failed %s\n"
 msgstr "エイリアスの処理が失敗しました %s\n"
@@ -2980,7 +3000,7 @@ msgstr "ステートレス"
 msgid "taken at %s"
 msgstr "%s に取得しました"
 
-#: lxc/main.go:297
+#: lxc/main.go:298
 msgid "wrong number of subcommand arguments"
 msgstr "サブコマンドの引数の数が正しくありません"
 
diff --git a/po/lxd.pot b/po/lxd.pot
index 6563eaf87..06be2f45c 100644
--- a/po/lxd.pot
+++ b/po/lxd.pot
@@ -7,7 +7,7 @@
 msgid   ""
 msgstr  "Project-Id-Version: lxd\n"
         "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-        "POT-Creation-Date: 2017-11-30 21:51-0800\n"
+        "POT-Creation-Date: 2017-12-04 08:56+0000\n"
         "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
         "Last-Translator: FULL NAME <EMAIL at ADDRESS>\n"
         "Language-Team: LANGUAGE <LL at li.org>\n"
@@ -520,6 +520,10 @@ msgstr  ""
 msgid   "Force pseudo-terminal allocation"
 msgstr  ""
 
+#: lxc/cluster.go:27
+msgid   "Force removing a node, even if degraded"
+msgstr  ""
+
 #: lxc/action.go:46 lxc/action.go:47
 msgid   "Force the container to shutdown"
 msgstr  ""
@@ -763,6 +767,11 @@ msgstr  ""
 msgid   "No fingerprint specified."
 msgstr  ""
 
+#: lxc/cluster.go:72
+#, c-format
+msgid   "Node %s removed"
+msgstr  ""
+
 #: lxc/storage.go:355 lxc/storage.go:448
 msgid   "Only \"custom\" volumes can be attached to containers."
 msgstr  ""
@@ -828,7 +837,7 @@ msgstr  ""
 msgid   "Path to an alternate server directory"
 msgstr  ""
 
-#: lxc/main.go:238
+#: lxc/main.go:239
 msgid   "Pause containers."
 msgstr  ""
 
@@ -962,7 +971,7 @@ msgstr  ""
 msgid   "Resources:"
 msgstr  ""
 
-#: lxc/main.go:246
+#: lxc/main.go:247
 msgid   "Restart containers."
 msgstr  ""
 
@@ -1073,7 +1082,7 @@ msgstr  ""
 msgid   "Source:"
 msgstr  ""
 
-#: lxc/main.go:256
+#: lxc/main.go:257
 msgid   "Start containers."
 msgstr  ""
 
@@ -1087,7 +1096,7 @@ msgstr  ""
 msgid   "Status: %s"
 msgstr  ""
 
-#: lxc/main.go:262
+#: lxc/main.go:263
 msgid   "Stop containers."
 msgstr  ""
 
@@ -1280,6 +1289,16 @@ msgstr  ""
 msgid   "Usage: lxc <command> [options]"
 msgstr  ""
 
+#: lxc/cluster.go:16
+msgid   "Usage: lxc cluster <subcommand> [options]\n"
+        "\n"
+        "Manage cluster nodes.\n"
+        "\n"
+        "*Cluster nodes*\n"
+        "lxc cluster remove <node> [--force]\n"
+        "    Remove a node from the cluster."
+msgstr  ""
+
 #: lxc/config.go:85
 msgid   "Usage: lxc config <subcommand> [options]\n"
         "\n"
@@ -2105,7 +2124,7 @@ msgstr  ""
 msgid   "ok (y/n)?"
 msgstr  ""
 
-#: lxc/main.go:366 lxc/main.go:370
+#: lxc/main.go:367 lxc/main.go:371
 #, c-format
 msgid   "processing aliases failed %s\n"
 msgstr  ""
@@ -2147,7 +2166,7 @@ msgstr  ""
 msgid   "taken at %s"
 msgstr  ""
 
-#: lxc/main.go:297
+#: lxc/main.go:298
 msgid   "wrong number of subcommand arguments"
 msgstr  ""
 
diff --git a/po/nb_NO.po b/po/nb_NO.po
index 8e8e108ff..d877d8268 100644
--- a/po/nb_NO.po
+++ b/po/nb_NO.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: lxd\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2017-11-30 21:51-0800\n"
+"POT-Creation-Date: 2017-12-04 08:56+0000\n"
 "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
 "Last-Translator: Automatically generated\n"
 "Language-Team: none\n"
@@ -529,6 +529,10 @@ msgstr ""
 msgid "Force pseudo-terminal allocation"
 msgstr ""
 
+#: lxc/cluster.go:27
+msgid "Force removing a node, even if degraded"
+msgstr ""
+
 #: lxc/action.go:46 lxc/action.go:47
 msgid "Force the container to shutdown"
 msgstr ""
@@ -773,6 +777,11 @@ msgstr ""
 msgid "No fingerprint specified."
 msgstr ""
 
+#: lxc/cluster.go:72
+#, c-format
+msgid "Node %s removed"
+msgstr ""
+
 #: lxc/storage.go:355 lxc/storage.go:448
 msgid "Only \"custom\" volumes can be attached to containers."
 msgstr ""
@@ -838,7 +847,7 @@ msgstr ""
 msgid "Path to an alternate server directory"
 msgstr ""
 
-#: lxc/main.go:238
+#: lxc/main.go:239
 msgid "Pause containers."
 msgstr ""
 
@@ -973,7 +982,7 @@ msgstr ""
 msgid "Resources:"
 msgstr ""
 
-#: lxc/main.go:246
+#: lxc/main.go:247
 msgid "Restart containers."
 msgstr ""
 
@@ -1084,7 +1093,7 @@ msgstr ""
 msgid "Source:"
 msgstr ""
 
-#: lxc/main.go:256
+#: lxc/main.go:257
 msgid "Start containers."
 msgstr ""
 
@@ -1098,7 +1107,7 @@ msgstr ""
 msgid "Status: %s"
 msgstr ""
 
-#: lxc/main.go:262
+#: lxc/main.go:263
 msgid "Stop containers."
 msgstr ""
 
@@ -1296,6 +1305,17 @@ msgstr ""
 msgid "Usage: lxc <command> [options]"
 msgstr ""
 
+#: lxc/cluster.go:16
+msgid ""
+"Usage: lxc cluster <subcommand> [options]\n"
+"\n"
+"Manage cluster nodes.\n"
+"\n"
+"*Cluster nodes*\n"
+"lxc cluster remove <node> [--force]\n"
+"    Remove a node from the cluster."
+msgstr ""
+
 #: lxc/config.go:85
 msgid ""
 "Usage: lxc config <subcommand> [options]\n"
@@ -2212,7 +2232,7 @@ msgstr ""
 msgid "ok (y/n)?"
 msgstr ""
 
-#: lxc/main.go:366 lxc/main.go:370
+#: lxc/main.go:367 lxc/main.go:371
 #, c-format
 msgid "processing aliases failed %s\n"
 msgstr ""
@@ -2254,7 +2274,7 @@ msgstr ""
 msgid "taken at %s"
 msgstr ""
 
-#: lxc/main.go:297
+#: lxc/main.go:298
 msgid "wrong number of subcommand arguments"
 msgstr ""
 
diff --git a/po/nl.po b/po/nl.po
index 24a699635..e5030ceb9 100644
--- a/po/nl.po
+++ b/po/nl.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: lxd\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2017-11-30 21:51-0800\n"
+"POT-Creation-Date: 2017-12-04 08:56+0000\n"
 "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
 "Last-Translator: Automatically generated\n"
 "Language-Team: none\n"
@@ -529,6 +529,10 @@ msgstr ""
 msgid "Force pseudo-terminal allocation"
 msgstr ""
 
+#: lxc/cluster.go:27
+msgid "Force removing a node, even if degraded"
+msgstr ""
+
 #: lxc/action.go:46 lxc/action.go:47
 msgid "Force the container to shutdown"
 msgstr ""
@@ -773,6 +777,11 @@ msgstr ""
 msgid "No fingerprint specified."
 msgstr ""
 
+#: lxc/cluster.go:72
+#, c-format
+msgid "Node %s removed"
+msgstr ""
+
 #: lxc/storage.go:355 lxc/storage.go:448
 msgid "Only \"custom\" volumes can be attached to containers."
 msgstr ""
@@ -838,7 +847,7 @@ msgstr ""
 msgid "Path to an alternate server directory"
 msgstr ""
 
-#: lxc/main.go:238
+#: lxc/main.go:239
 msgid "Pause containers."
 msgstr ""
 
@@ -973,7 +982,7 @@ msgstr ""
 msgid "Resources:"
 msgstr ""
 
-#: lxc/main.go:246
+#: lxc/main.go:247
 msgid "Restart containers."
 msgstr ""
 
@@ -1084,7 +1093,7 @@ msgstr ""
 msgid "Source:"
 msgstr ""
 
-#: lxc/main.go:256
+#: lxc/main.go:257
 msgid "Start containers."
 msgstr ""
 
@@ -1098,7 +1107,7 @@ msgstr ""
 msgid "Status: %s"
 msgstr ""
 
-#: lxc/main.go:262
+#: lxc/main.go:263
 msgid "Stop containers."
 msgstr ""
 
@@ -1296,6 +1305,17 @@ msgstr ""
 msgid "Usage: lxc <command> [options]"
 msgstr ""
 
+#: lxc/cluster.go:16
+msgid ""
+"Usage: lxc cluster <subcommand> [options]\n"
+"\n"
+"Manage cluster nodes.\n"
+"\n"
+"*Cluster nodes*\n"
+"lxc cluster remove <node> [--force]\n"
+"    Remove a node from the cluster."
+msgstr ""
+
 #: lxc/config.go:85
 msgid ""
 "Usage: lxc config <subcommand> [options]\n"
@@ -2212,7 +2232,7 @@ msgstr ""
 msgid "ok (y/n)?"
 msgstr ""
 
-#: lxc/main.go:366 lxc/main.go:370
+#: lxc/main.go:367 lxc/main.go:371
 #, c-format
 msgid "processing aliases failed %s\n"
 msgstr ""
@@ -2254,7 +2274,7 @@ msgstr ""
 msgid "taken at %s"
 msgstr ""
 
-#: lxc/main.go:297
+#: lxc/main.go:298
 msgid "wrong number of subcommand arguments"
 msgstr ""
 
diff --git a/po/pt_BR.po b/po/pt_BR.po
index 6be213b4d..dc636a211 100644
--- a/po/pt_BR.po
+++ b/po/pt_BR.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: lxd\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2017-11-30 21:51-0800\n"
+"POT-Creation-Date: 2017-12-04 08:56+0000\n"
 "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
 "Last-Translator: Automatically generated\n"
 "Language-Team: none\n"
@@ -529,6 +529,10 @@ msgstr ""
 msgid "Force pseudo-terminal allocation"
 msgstr ""
 
+#: lxc/cluster.go:27
+msgid "Force removing a node, even if degraded"
+msgstr ""
+
 #: lxc/action.go:46 lxc/action.go:47
 msgid "Force the container to shutdown"
 msgstr ""
@@ -773,6 +777,11 @@ msgstr ""
 msgid "No fingerprint specified."
 msgstr ""
 
+#: lxc/cluster.go:72
+#, c-format
+msgid "Node %s removed"
+msgstr ""
+
 #: lxc/storage.go:355 lxc/storage.go:448
 msgid "Only \"custom\" volumes can be attached to containers."
 msgstr ""
@@ -838,7 +847,7 @@ msgstr ""
 msgid "Path to an alternate server directory"
 msgstr ""
 
-#: lxc/main.go:238
+#: lxc/main.go:239
 msgid "Pause containers."
 msgstr ""
 
@@ -973,7 +982,7 @@ msgstr ""
 msgid "Resources:"
 msgstr ""
 
-#: lxc/main.go:246
+#: lxc/main.go:247
 msgid "Restart containers."
 msgstr ""
 
@@ -1084,7 +1093,7 @@ msgstr ""
 msgid "Source:"
 msgstr ""
 
-#: lxc/main.go:256
+#: lxc/main.go:257
 msgid "Start containers."
 msgstr ""
 
@@ -1098,7 +1107,7 @@ msgstr ""
 msgid "Status: %s"
 msgstr ""
 
-#: lxc/main.go:262
+#: lxc/main.go:263
 msgid "Stop containers."
 msgstr ""
 
@@ -1296,6 +1305,17 @@ msgstr ""
 msgid "Usage: lxc <command> [options]"
 msgstr ""
 
+#: lxc/cluster.go:16
+msgid ""
+"Usage: lxc cluster <subcommand> [options]\n"
+"\n"
+"Manage cluster nodes.\n"
+"\n"
+"*Cluster nodes*\n"
+"lxc cluster remove <node> [--force]\n"
+"    Remove a node from the cluster."
+msgstr ""
+
 #: lxc/config.go:85
 msgid ""
 "Usage: lxc config <subcommand> [options]\n"
@@ -2212,7 +2232,7 @@ msgstr ""
 msgid "ok (y/n)?"
 msgstr ""
 
-#: lxc/main.go:366 lxc/main.go:370
+#: lxc/main.go:367 lxc/main.go:371
 #, c-format
 msgid "processing aliases failed %s\n"
 msgstr ""
@@ -2254,7 +2274,7 @@ msgstr ""
 msgid "taken at %s"
 msgstr ""
 
-#: lxc/main.go:297
+#: lxc/main.go:298
 msgid "wrong number of subcommand arguments"
 msgstr ""
 
diff --git a/po/ru.po b/po/ru.po
index f7f2b86fa..b98783d35 100644
--- a/po/ru.po
+++ b/po/ru.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: lxd\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2017-11-30 21:51-0800\n"
+"POT-Creation-Date: 2017-12-04 08:56+0000\n"
 "PO-Revision-Date: 2017-09-05 16:48+0000\n"
 "Last-Translator: Ilya Yakimavets <ilya.yakimavets at backend.expert>\n"
 "Language-Team: Russian <https://hosted.weblate.org/projects/linux-containers/"
@@ -618,6 +618,10 @@ msgstr ""
 msgid "Force pseudo-terminal allocation"
 msgstr ""
 
+#: lxc/cluster.go:27
+msgid "Force removing a node, even if degraded"
+msgstr ""
+
 #: lxc/action.go:46 lxc/action.go:47
 msgid "Force the container to shutdown"
 msgstr ""
@@ -864,6 +868,11 @@ msgstr ""
 msgid "No fingerprint specified."
 msgstr ""
 
+#: lxc/cluster.go:72
+#, c-format
+msgid "Node %s removed"
+msgstr ""
+
 #: lxc/storage.go:355 lxc/storage.go:448
 msgid "Only \"custom\" volumes can be attached to containers."
 msgstr ""
@@ -929,7 +938,7 @@ msgstr ""
 msgid "Path to an alternate server directory"
 msgstr ""
 
-#: lxc/main.go:238
+#: lxc/main.go:239
 msgid "Pause containers."
 msgstr ""
 
@@ -1064,7 +1073,7 @@ msgstr ""
 msgid "Resources:"
 msgstr ""
 
-#: lxc/main.go:246
+#: lxc/main.go:247
 msgid "Restart containers."
 msgstr ""
 
@@ -1175,7 +1184,7 @@ msgstr "Невозможно добавить имя контейнера в с
 msgid "Source:"
 msgstr ""
 
-#: lxc/main.go:256
+#: lxc/main.go:257
 msgid "Start containers."
 msgstr ""
 
@@ -1189,7 +1198,7 @@ msgstr ""
 msgid "Status: %s"
 msgstr ""
 
-#: lxc/main.go:262
+#: lxc/main.go:263
 msgid "Stop containers."
 msgstr ""
 
@@ -1390,6 +1399,17 @@ msgstr ""
 msgid "Usage: lxc <command> [options]"
 msgstr ""
 
+#: lxc/cluster.go:16
+msgid ""
+"Usage: lxc cluster <subcommand> [options]\n"
+"\n"
+"Manage cluster nodes.\n"
+"\n"
+"*Cluster nodes*\n"
+"lxc cluster remove <node> [--force]\n"
+"    Remove a node from the cluster."
+msgstr ""
+
 #: lxc/config.go:85
 msgid ""
 "Usage: lxc config <subcommand> [options]\n"
@@ -2315,7 +2335,7 @@ msgstr ""
 msgid "ok (y/n)?"
 msgstr ""
 
-#: lxc/main.go:366 lxc/main.go:370
+#: lxc/main.go:367 lxc/main.go:371
 #, c-format
 msgid "processing aliases failed %s\n"
 msgstr ""
@@ -2357,7 +2377,7 @@ msgstr ""
 msgid "taken at %s"
 msgstr ""
 
-#: lxc/main.go:297
+#: lxc/main.go:298
 msgid "wrong number of subcommand arguments"
 msgstr ""
 
diff --git a/po/sr.po b/po/sr.po
index 89e5cf360..4c6db9898 100644
--- a/po/sr.po
+++ b/po/sr.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: lxd\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2017-11-30 21:51-0800\n"
+"POT-Creation-Date: 2017-12-04 08:56+0000\n"
 "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
 "Last-Translator: Automatically generated\n"
 "Language-Team: none\n"
@@ -529,6 +529,10 @@ msgstr ""
 msgid "Force pseudo-terminal allocation"
 msgstr ""
 
+#: lxc/cluster.go:27
+msgid "Force removing a node, even if degraded"
+msgstr ""
+
 #: lxc/action.go:46 lxc/action.go:47
 msgid "Force the container to shutdown"
 msgstr ""
@@ -773,6 +777,11 @@ msgstr ""
 msgid "No fingerprint specified."
 msgstr ""
 
+#: lxc/cluster.go:72
+#, c-format
+msgid "Node %s removed"
+msgstr ""
+
 #: lxc/storage.go:355 lxc/storage.go:448
 msgid "Only \"custom\" volumes can be attached to containers."
 msgstr ""
@@ -838,7 +847,7 @@ msgstr ""
 msgid "Path to an alternate server directory"
 msgstr ""
 
-#: lxc/main.go:238
+#: lxc/main.go:239
 msgid "Pause containers."
 msgstr ""
 
@@ -973,7 +982,7 @@ msgstr ""
 msgid "Resources:"
 msgstr ""
 
-#: lxc/main.go:246
+#: lxc/main.go:247
 msgid "Restart containers."
 msgstr ""
 
@@ -1084,7 +1093,7 @@ msgstr ""
 msgid "Source:"
 msgstr ""
 
-#: lxc/main.go:256
+#: lxc/main.go:257
 msgid "Start containers."
 msgstr ""
 
@@ -1098,7 +1107,7 @@ msgstr ""
 msgid "Status: %s"
 msgstr ""
 
-#: lxc/main.go:262
+#: lxc/main.go:263
 msgid "Stop containers."
 msgstr ""
 
@@ -1296,6 +1305,17 @@ msgstr ""
 msgid "Usage: lxc <command> [options]"
 msgstr ""
 
+#: lxc/cluster.go:16
+msgid ""
+"Usage: lxc cluster <subcommand> [options]\n"
+"\n"
+"Manage cluster nodes.\n"
+"\n"
+"*Cluster nodes*\n"
+"lxc cluster remove <node> [--force]\n"
+"    Remove a node from the cluster."
+msgstr ""
+
 #: lxc/config.go:85
 msgid ""
 "Usage: lxc config <subcommand> [options]\n"
@@ -2212,7 +2232,7 @@ msgstr ""
 msgid "ok (y/n)?"
 msgstr ""
 
-#: lxc/main.go:366 lxc/main.go:370
+#: lxc/main.go:367 lxc/main.go:371
 #, c-format
 msgid "processing aliases failed %s\n"
 msgstr ""
@@ -2254,7 +2274,7 @@ msgstr ""
 msgid "taken at %s"
 msgstr ""
 
-#: lxc/main.go:297
+#: lxc/main.go:298
 msgid "wrong number of subcommand arguments"
 msgstr ""
 
diff --git a/po/sv.po b/po/sv.po
index 0bf89109c..26cf26b2f 100644
--- a/po/sv.po
+++ b/po/sv.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: lxd\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2017-11-30 21:51-0800\n"
+"POT-Creation-Date: 2017-12-04 08:56+0000\n"
 "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
 "Last-Translator: Automatically generated\n"
 "Language-Team: none\n"
@@ -529,6 +529,10 @@ msgstr ""
 msgid "Force pseudo-terminal allocation"
 msgstr ""
 
+#: lxc/cluster.go:27
+msgid "Force removing a node, even if degraded"
+msgstr ""
+
 #: lxc/action.go:46 lxc/action.go:47
 msgid "Force the container to shutdown"
 msgstr ""
@@ -773,6 +777,11 @@ msgstr ""
 msgid "No fingerprint specified."
 msgstr ""
 
+#: lxc/cluster.go:72
+#, c-format
+msgid "Node %s removed"
+msgstr ""
+
 #: lxc/storage.go:355 lxc/storage.go:448
 msgid "Only \"custom\" volumes can be attached to containers."
 msgstr ""
@@ -838,7 +847,7 @@ msgstr ""
 msgid "Path to an alternate server directory"
 msgstr ""
 
-#: lxc/main.go:238
+#: lxc/main.go:239
 msgid "Pause containers."
 msgstr ""
 
@@ -973,7 +982,7 @@ msgstr ""
 msgid "Resources:"
 msgstr ""
 
-#: lxc/main.go:246
+#: lxc/main.go:247
 msgid "Restart containers."
 msgstr ""
 
@@ -1084,7 +1093,7 @@ msgstr ""
 msgid "Source:"
 msgstr ""
 
-#: lxc/main.go:256
+#: lxc/main.go:257
 msgid "Start containers."
 msgstr ""
 
@@ -1098,7 +1107,7 @@ msgstr ""
 msgid "Status: %s"
 msgstr ""
 
-#: lxc/main.go:262
+#: lxc/main.go:263
 msgid "Stop containers."
 msgstr ""
 
@@ -1296,6 +1305,17 @@ msgstr ""
 msgid "Usage: lxc <command> [options]"
 msgstr ""
 
+#: lxc/cluster.go:16
+msgid ""
+"Usage: lxc cluster <subcommand> [options]\n"
+"\n"
+"Manage cluster nodes.\n"
+"\n"
+"*Cluster nodes*\n"
+"lxc cluster remove <node> [--force]\n"
+"    Remove a node from the cluster."
+msgstr ""
+
 #: lxc/config.go:85
 msgid ""
 "Usage: lxc config <subcommand> [options]\n"
@@ -2212,7 +2232,7 @@ msgstr ""
 msgid "ok (y/n)?"
 msgstr ""
 
-#: lxc/main.go:366 lxc/main.go:370
+#: lxc/main.go:367 lxc/main.go:371
 #, c-format
 msgid "processing aliases failed %s\n"
 msgstr ""
@@ -2254,7 +2274,7 @@ msgstr ""
 msgid "taken at %s"
 msgstr ""
 
-#: lxc/main.go:297
+#: lxc/main.go:298
 msgid "wrong number of subcommand arguments"
 msgstr ""
 
diff --git a/po/tr.po b/po/tr.po
index e86e9dcc3..be6119b9e 100644
--- a/po/tr.po
+++ b/po/tr.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: lxd\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2017-11-30 21:51-0800\n"
+"POT-Creation-Date: 2017-12-04 08:56+0000\n"
 "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
 "Last-Translator: Automatically generated\n"
 "Language-Team: none\n"
@@ -529,6 +529,10 @@ msgstr ""
 msgid "Force pseudo-terminal allocation"
 msgstr ""
 
+#: lxc/cluster.go:27
+msgid "Force removing a node, even if degraded"
+msgstr ""
+
 #: lxc/action.go:46 lxc/action.go:47
 msgid "Force the container to shutdown"
 msgstr ""
@@ -773,6 +777,11 @@ msgstr ""
 msgid "No fingerprint specified."
 msgstr ""
 
+#: lxc/cluster.go:72
+#, c-format
+msgid "Node %s removed"
+msgstr ""
+
 #: lxc/storage.go:355 lxc/storage.go:448
 msgid "Only \"custom\" volumes can be attached to containers."
 msgstr ""
@@ -838,7 +847,7 @@ msgstr ""
 msgid "Path to an alternate server directory"
 msgstr ""
 
-#: lxc/main.go:238
+#: lxc/main.go:239
 msgid "Pause containers."
 msgstr ""
 
@@ -973,7 +982,7 @@ msgstr ""
 msgid "Resources:"
 msgstr ""
 
-#: lxc/main.go:246
+#: lxc/main.go:247
 msgid "Restart containers."
 msgstr ""
 
@@ -1084,7 +1093,7 @@ msgstr ""
 msgid "Source:"
 msgstr ""
 
-#: lxc/main.go:256
+#: lxc/main.go:257
 msgid "Start containers."
 msgstr ""
 
@@ -1098,7 +1107,7 @@ msgstr ""
 msgid "Status: %s"
 msgstr ""
 
-#: lxc/main.go:262
+#: lxc/main.go:263
 msgid "Stop containers."
 msgstr ""
 
@@ -1296,6 +1305,17 @@ msgstr ""
 msgid "Usage: lxc <command> [options]"
 msgstr ""
 
+#: lxc/cluster.go:16
+msgid ""
+"Usage: lxc cluster <subcommand> [options]\n"
+"\n"
+"Manage cluster nodes.\n"
+"\n"
+"*Cluster nodes*\n"
+"lxc cluster remove <node> [--force]\n"
+"    Remove a node from the cluster."
+msgstr ""
+
 #: lxc/config.go:85
 msgid ""
 "Usage: lxc config <subcommand> [options]\n"
@@ -2212,7 +2232,7 @@ msgstr ""
 msgid "ok (y/n)?"
 msgstr ""
 
-#: lxc/main.go:366 lxc/main.go:370
+#: lxc/main.go:367 lxc/main.go:371
 #, c-format
 msgid "processing aliases failed %s\n"
 msgstr ""
@@ -2254,7 +2274,7 @@ msgstr ""
 msgid "taken at %s"
 msgstr ""
 
-#: lxc/main.go:297
+#: lxc/main.go:298
 msgid "wrong number of subcommand arguments"
 msgstr ""
 
diff --git a/po/zh.po b/po/zh.po
index 2c29f5bf2..8f0731f0f 100644
--- a/po/zh.po
+++ b/po/zh.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: lxd\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2017-11-30 21:51-0800\n"
+"POT-Creation-Date: 2017-12-04 08:56+0000\n"
 "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
 "Last-Translator: Automatically generated\n"
 "Language-Team: none\n"
@@ -529,6 +529,10 @@ msgstr ""
 msgid "Force pseudo-terminal allocation"
 msgstr ""
 
+#: lxc/cluster.go:27
+msgid "Force removing a node, even if degraded"
+msgstr ""
+
 #: lxc/action.go:46 lxc/action.go:47
 msgid "Force the container to shutdown"
 msgstr ""
@@ -773,6 +777,11 @@ msgstr ""
 msgid "No fingerprint specified."
 msgstr ""
 
+#: lxc/cluster.go:72
+#, c-format
+msgid "Node %s removed"
+msgstr ""
+
 #: lxc/storage.go:355 lxc/storage.go:448
 msgid "Only \"custom\" volumes can be attached to containers."
 msgstr ""
@@ -838,7 +847,7 @@ msgstr ""
 msgid "Path to an alternate server directory"
 msgstr ""
 
-#: lxc/main.go:238
+#: lxc/main.go:239
 msgid "Pause containers."
 msgstr ""
 
@@ -973,7 +982,7 @@ msgstr ""
 msgid "Resources:"
 msgstr ""
 
-#: lxc/main.go:246
+#: lxc/main.go:247
 msgid "Restart containers."
 msgstr ""
 
@@ -1084,7 +1093,7 @@ msgstr ""
 msgid "Source:"
 msgstr ""
 
-#: lxc/main.go:256
+#: lxc/main.go:257
 msgid "Start containers."
 msgstr ""
 
@@ -1098,7 +1107,7 @@ msgstr ""
 msgid "Status: %s"
 msgstr ""
 
-#: lxc/main.go:262
+#: lxc/main.go:263
 msgid "Stop containers."
 msgstr ""
 
@@ -1296,6 +1305,17 @@ msgstr ""
 msgid "Usage: lxc <command> [options]"
 msgstr ""
 
+#: lxc/cluster.go:16
+msgid ""
+"Usage: lxc cluster <subcommand> [options]\n"
+"\n"
+"Manage cluster nodes.\n"
+"\n"
+"*Cluster nodes*\n"
+"lxc cluster remove <node> [--force]\n"
+"    Remove a node from the cluster."
+msgstr ""
+
 #: lxc/config.go:85
 msgid ""
 "Usage: lxc config <subcommand> [options]\n"
@@ -2212,7 +2232,7 @@ msgstr ""
 msgid "ok (y/n)?"
 msgstr ""
 
-#: lxc/main.go:366 lxc/main.go:370
+#: lxc/main.go:367 lxc/main.go:371
 #, c-format
 msgid "processing aliases failed %s\n"
 msgstr ""
@@ -2254,7 +2274,7 @@ msgstr ""
 msgid "taken at %s"
 msgstr ""
 
-#: lxc/main.go:297
+#: lxc/main.go:298
 msgid "wrong number of subcommand arguments"
 msgstr ""
 
diff --git a/po/zh_Hans.po b/po/zh_Hans.po
index fed471a09..0546b522d 100644
--- a/po/zh_Hans.po
+++ b/po/zh_Hans.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: lxd\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2017-11-30 21:51-0800\n"
+"POT-Creation-Date: 2017-12-04 08:56+0000\n"
 "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
 "Last-Translator: Automatically generated\n"
 "Language-Team: none\n"
@@ -529,6 +529,10 @@ msgstr ""
 msgid "Force pseudo-terminal allocation"
 msgstr ""
 
+#: lxc/cluster.go:27
+msgid "Force removing a node, even if degraded"
+msgstr ""
+
 #: lxc/action.go:46 lxc/action.go:47
 msgid "Force the container to shutdown"
 msgstr ""
@@ -773,6 +777,11 @@ msgstr ""
 msgid "No fingerprint specified."
 msgstr ""
 
+#: lxc/cluster.go:72
+#, c-format
+msgid "Node %s removed"
+msgstr ""
+
 #: lxc/storage.go:355 lxc/storage.go:448
 msgid "Only \"custom\" volumes can be attached to containers."
 msgstr ""
@@ -838,7 +847,7 @@ msgstr ""
 msgid "Path to an alternate server directory"
 msgstr ""
 
-#: lxc/main.go:238
+#: lxc/main.go:239
 msgid "Pause containers."
 msgstr ""
 
@@ -973,7 +982,7 @@ msgstr ""
 msgid "Resources:"
 msgstr ""
 
-#: lxc/main.go:246
+#: lxc/main.go:247
 msgid "Restart containers."
 msgstr ""
 
@@ -1084,7 +1093,7 @@ msgstr ""
 msgid "Source:"
 msgstr ""
 
-#: lxc/main.go:256
+#: lxc/main.go:257
 msgid "Start containers."
 msgstr ""
 
@@ -1098,7 +1107,7 @@ msgstr ""
 msgid "Status: %s"
 msgstr ""
 
-#: lxc/main.go:262
+#: lxc/main.go:263
 msgid "Stop containers."
 msgstr ""
 
@@ -1296,6 +1305,17 @@ msgstr ""
 msgid "Usage: lxc <command> [options]"
 msgstr ""
 
+#: lxc/cluster.go:16
+msgid ""
+"Usage: lxc cluster <subcommand> [options]\n"
+"\n"
+"Manage cluster nodes.\n"
+"\n"
+"*Cluster nodes*\n"
+"lxc cluster remove <node> [--force]\n"
+"    Remove a node from the cluster."
+msgstr ""
+
 #: lxc/config.go:85
 msgid ""
 "Usage: lxc config <subcommand> [options]\n"
@@ -2212,7 +2232,7 @@ msgstr ""
 msgid "ok (y/n)?"
 msgstr ""
 
-#: lxc/main.go:366 lxc/main.go:370
+#: lxc/main.go:367 lxc/main.go:371
 #, c-format
 msgid "processing aliases failed %s\n"
 msgstr ""
@@ -2254,7 +2274,7 @@ msgstr ""
 msgid "taken at %s"
 msgstr ""
 
-#: lxc/main.go:297
+#: lxc/main.go:298
 msgid "wrong number of subcommand arguments"
 msgstr ""
 
diff --git a/test/suites/clustering.sh b/test/suites/clustering.sh
index 27812d889..119529ed9 100644
--- a/test/suites/clustering.sh
+++ b/test/suites/clustering.sh
@@ -49,11 +49,24 @@ test_clustering() {
   ns5="${prefix}5"
   spawn_lxd_and_join_cluster "${ns5}" "${bridge}" "${cert}" 5 4 "${LXD_FIVE_DIR}"
 
-  # The preseeded network can be deleted from any node, other nodes
-  # are notified.
+  # Shutdown a non-database node, and wait a few seconds so it will be
+  # detected as down.
+  LXD_DIR="${LXD_FIVE_DIR}" lxd shutdown
+  sleep 5
+
+  # Trying to delete the preseeded network now fails, because a node is degraded.
+  ! LXD_DIR="${LXD_TWO_DIR}" lxc network delete "${bridge}"
+
+  # Force the removal of the degraded node.
+  LXD_DIR="${LXD_THREE_DIR}" lxc cluster remove node5 --force
+
+  # Now the preseeded network can be deleted, and all nodes are
+  # notified.
   LXD_DIR="${LXD_TWO_DIR}" lxc network delete "${bridge}"
 
-  LXD_DIR="${LXD_FIVE_DIR}" lxd shutdown
+  # Remove a node gracefully.
+  LXD_DIR="${LXD_FOUR_DIR}" lxc cluster remove node4
+
   LXD_DIR="${LXD_FOUR_DIR}" lxd shutdown
   LXD_DIR="${LXD_THREE_DIR}" lxd shutdown
   LXD_DIR="${LXD_TWO_DIR}" lxd shutdown

From 470462155a19947440d90b9775d0785448e0c057 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Mon, 6 Nov 2017 12:26:14 +0000
Subject: [PATCH 077/116] Add cluster.List to get a list of current cluster
 nodes

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/cluster/membership.go      | 39 +++++++++++++++++++++++++++++++++++++++
 lxd/cluster/membership_test.go | 21 ++++++++++++++-------
 2 files changed, 53 insertions(+), 7 deletions(-)

diff --git a/lxd/cluster/membership.go b/lxd/cluster/membership.go
index 6b5db4ffa..77e17b74f 100644
--- a/lxd/cluster/membership.go
+++ b/lxd/cluster/membership.go
@@ -427,6 +427,45 @@ func Leave(state *state.State, gateway *Gateway, name string, force bool) (strin
 	return address, nil
 }
 
+// List the nodes of the cluster.
+//
+// Upon success return a list of the current nodes and a map that for each ID
+// tells if the node is part of the database cluster or not.
+func List(state *state.State) ([]db.NodeInfo, map[int64]bool, error) {
+	addresses := []string{} // Addresses of database nodes
+	err := state.Node.Transaction(func(tx *db.NodeTx) error {
+		nodes, err := tx.RaftNodes()
+		if err != nil {
+			return errors.Wrap(err, "failed to fetch current raft nodes")
+		}
+		for _, node := range nodes {
+			addresses = append(addresses, node.Address)
+		}
+		return nil
+	})
+	if err != nil {
+		return nil, nil, err
+	}
+
+	var nodes []db.NodeInfo
+	err = state.Cluster.Transaction(func(tx *db.ClusterTx) error {
+		nodes, err = tx.Nodes()
+		if err != nil {
+			return err
+		}
+		return nil
+	})
+	if err != nil {
+		return nil, nil, err
+	}
+	flags := make(map[int64]bool) // Whether a node is a database node
+	for _, node := range nodes {
+		flags[node.ID] = shared.StringInSlice(node.Address, addresses)
+	}
+
+	return nodes, flags, nil
+}
+
 // Check that node-related preconditions are met for bootstrapping or joining a
 // cluster.
 func membershipCheckNodeStateForBootstrapOrJoin(tx *db.NodeTx, address string) error {
diff --git a/lxd/cluster/membership_test.go b/lxd/cluster/membership_test.go
index 4cc58d012..bfa5cce8f 100644
--- a/lxd/cluster/membership_test.go
+++ b/lxd/cluster/membership_test.go
@@ -294,22 +294,29 @@ func TestJoin(t *testing.T) {
 	f.NetworkAddress(address)
 
 	// Accept the joining node.
-	nodes, err := cluster.Accept(
+	raftNodes, err := cluster.Accept(
 		targetState, "rusp", address, cluster.SchemaVersion, len(version.APIExtensions))
 	require.NoError(t, err)
 
 	// Actually join the cluster.
-	err = cluster.Join(state, gateway, targetCert, "rusp", nodes)
+	err = cluster.Join(state, gateway, targetCert, "rusp", raftNodes)
 	require.NoError(t, err)
 
 	// The leader now returns an updated list of raft nodes.
-	nodes, err = targetGateway.RaftNodes()
+	raftNodes, err = targetGateway.RaftNodes()
+	require.NoError(t, err)
+	assert.Len(t, raftNodes, 2)
+	assert.Equal(t, int64(1), raftNodes[0].ID)
+	assert.Equal(t, targetAddress, raftNodes[0].Address)
+	assert.Equal(t, int64(2), raftNodes[1].ID)
+	assert.Equal(t, address, raftNodes[1].Address)
+
+	// The List function returns all nodes in the cluster.
+	nodes, flags, err := cluster.List(state)
 	require.NoError(t, err)
 	assert.Len(t, nodes, 2)
-	assert.Equal(t, int64(1), nodes[0].ID)
-	assert.Equal(t, targetAddress, nodes[0].Address)
-	assert.Equal(t, int64(2), nodes[1].ID)
-	assert.Equal(t, address, nodes[1].Address)
+	assert.True(t, flags[1])
+	assert.True(t, flags[2])
 
 	// Leave the cluster.
 	leaving, err := cluster.Leave(state, gateway, "rusp", false /* force */)

From c3ce38f3510cf553116a1f0b5804fd55d7953fa2 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Mon, 6 Nov 2017 12:42:29 +0000
Subject: [PATCH 078/116] Add GetNodes API client method

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 client/interfaces.go      |  1 +
 client/lxd_cluster.go     | 13 ++++++++++++
 lxc/cluster.go            | 53 +++++++++++++++++++++++++++++++++++++++++++++++
 lxd/api_cluster.go        | 27 +++++++++++++++++++++++-
 lxd/api_cluster_test.go   |  9 ++++++++
 po/de.po                  | 20 +++++++++++-------
 po/el.po                  | 20 +++++++++++-------
 po/fr.po                  | 20 +++++++++++-------
 po/id.po                  | 20 +++++++++++-------
 po/it.po                  | 20 +++++++++++-------
 po/ja.po                  | 20 +++++++++++-------
 po/lxd.pot                | 18 +++++++++-------
 po/nb_NO.po               | 20 +++++++++++-------
 po/nl.po                  | 20 +++++++++++-------
 po/pt_BR.po               | 20 +++++++++++-------
 po/ru.po                  | 20 +++++++++++-------
 po/sr.po                  | 20 +++++++++++-------
 po/sv.po                  | 20 +++++++++++-------
 po/tr.po                  | 20 +++++++++++-------
 po/zh.po                  | 20 +++++++++++-------
 po/zh_Hans.po             | 20 +++++++++++-------
 shared/api/cluster.go     |  8 +++++++
 test/suites/clustering.sh |  6 +++++-
 23 files changed, 306 insertions(+), 129 deletions(-)

diff --git a/client/interfaces.go b/client/interfaces.go
index 30c1fbd1c..6b40d887e 100644
--- a/client/interfaces.go
+++ b/client/interfaces.go
@@ -166,6 +166,7 @@ type ContainerServer interface {
 	AcceptNode(targetPassword, name, address string, schema, api int) (info *api.ClusterNodeAccepted, err error)
 	JoinCluster(targetAddress, targetPassword, targetCert, name string) (op *Operation, err error)
 	LeaveCluster(name string, force bool) (op *Operation, err error)
+	GetNodes() (nodes []api.Node, err error)
 
 	// Internal functions (for internal use)
 	RawQuery(method string, path string, data interface{}, queryETag string) (resp *api.Response, ETag string, err error)
diff --git a/client/lxd_cluster.go b/client/lxd_cluster.go
index 7afc29fef..5d702459c 100644
--- a/client/lxd_cluster.go
+++ b/client/lxd_cluster.go
@@ -84,3 +84,16 @@ func (r *ProtocolLXD) LeaveCluster(name string, force bool) (*Operation, error)
 	}
 	return op, nil
 }
+
+// GetNodes returns the current nodes in the cluster.
+func (r *ProtocolLXD) GetNodes() ([]api.Node, error) {
+	nodes := []api.Node{}
+	path := "/cluster/nodes"
+	_, err := r.queryStruct("GET", path, nil, "", &nodes)
+
+	if err != nil {
+		return nil, err
+	}
+
+	return nodes, nil
+}
diff --git a/lxc/cluster.go b/lxc/cluster.go
index 7abfaa284..8cac3c4fa 100644
--- a/lxc/cluster.go
+++ b/lxc/cluster.go
@@ -2,10 +2,13 @@ package main
 
 import (
 	"fmt"
+	"os"
+	"sort"
 
 	"github.com/lxc/lxd/lxc/config"
 	"github.com/lxc/lxd/shared/gnuflag"
 	"github.com/lxc/lxd/shared/i18n"
+	"github.com/olekukonko/tablewriter"
 )
 
 type clusterCmd struct {
@@ -36,6 +39,10 @@ func (c *clusterCmd) run(conf *config.Config, args []string) error {
 		return errUsage
 	}
 
+	if args[0] == "list" {
+		return c.doClusterList(conf, args)
+	}
+
 	if args[0] == "remove" {
 		return c.doClusterNodeRemove(conf, args)
 	}
@@ -72,3 +79,49 @@ func (c *clusterCmd) doClusterNodeRemove(conf *config.Config, args []string) err
 	fmt.Printf(i18n.G("Node %s removed")+"\n", name)
 	return nil
 }
+
+func (c *clusterCmd) doClusterList(conf *config.Config, args []string) error {
+	remote := conf.DefaultRemote
+
+	if len(args) > 1 {
+		var err error
+		remote, _, err = conf.ParseRemote(args[1])
+		if err != nil {
+			return err
+		}
+	}
+
+	client, err := conf.GetContainerServer(remote)
+	if err != nil {
+		return err
+	}
+
+	nodes, err := client.GetNodes()
+	if err != nil {
+		return err
+	}
+
+	data := [][]string{}
+	for _, node := range nodes {
+		database := "NO"
+		if node.Database {
+			database = "YES"
+		}
+		data = append(data, []string{node.Name, node.URL, database, node.State})
+	}
+
+	table := tablewriter.NewWriter(os.Stdout)
+	table.SetAutoWrapText(false)
+	table.SetAlignment(tablewriter.ALIGN_LEFT)
+	table.SetRowLine(true)
+	table.SetHeader([]string{
+		i18n.G("NAME"),
+		i18n.G("URL"),
+		i18n.G("DATABASE"),
+		i18n.G("STATE")})
+	sort.Sort(byName(data))
+	table.AppendBulk(data)
+	table.Render()
+
+	return nil
+}
diff --git a/lxd/api_cluster.go b/lxd/api_cluster.go
index 400f3a50f..e31ac010e 100644
--- a/lxd/api_cluster.go
+++ b/lxd/api_cluster.go
@@ -97,7 +97,11 @@ func clusterDelete(d *Daemon, r *http.Request) Response {
 	return EmptySyncResponse
 }
 
-var clusterNodesCmd = Command{name: "cluster/nodes", untrustedPost: true, post: clusterNodesPost}
+var clusterNodesCmd = Command{
+	name: "cluster/nodes",
+	post: clusterNodesPost, untrustedPost: true,
+	get: clusterNodesGet,
+}
 
 // Depending on the parameters passed and on local state this endpoint will
 // either:
@@ -249,6 +253,27 @@ func clusterNodesPostJoin(d *Daemon, req api.ClusterPost) Response {
 	return OperationResponse(op)
 }
 
+func clusterNodesGet(d *Daemon, r *http.Request) Response {
+	dbNodes, flags, err := cluster.List(d.State())
+	if err != nil {
+		return SmartError(err)
+	}
+
+	nodes := make([]api.Node, len(dbNodes))
+	for i, dbNode := range dbNodes {
+		nodes[i].Name = dbNode.Name
+		nodes[i].URL = fmt.Sprintf("https://%s", dbNode.Address)
+		nodes[i].Database = flags[dbNode.ID]
+		if dbNode.IsDown() {
+			nodes[i].State = "OFFLINE"
+		} else {
+			nodes[i].State = "ONLINE"
+		}
+	}
+
+	return SyncResponse(true, nodes)
+}
+
 var clusterNodeCmd = Command{name: "cluster/nodes/{name}", delete: clusterNodeDelete}
 
 func clusterNodeDelete(d *Daemon, r *http.Request) Response {
diff --git a/lxd/api_cluster_test.go b/lxd/api_cluster_test.go
index e4b58bb33..1c700ca94 100644
--- a/lxd/api_cluster_test.go
+++ b/lxd/api_cluster_test.go
@@ -80,6 +80,15 @@ func TestCluster_Join(t *testing.T) {
 	for _, daemon := range daemons {
 		assert.NotNil(t, daemon.externalAuth)
 	}
+
+	// The GetNodes client method returns both nodes.
+	nodes, err := client.GetNodes()
+	require.NoError(t, err)
+	assert.Len(t, nodes, 2)
+	assert.Equal(t, "buzz", nodes[0].Name)
+	assert.Equal(t, "rusp", nodes[1].Name)
+	assert.Equal(t, "ONLINE", nodes[0].State)
+	assert.Equal(t, "ONLINE", nodes[1].State)
 }
 
 // If the wrong trust password is given, the join request fails.
diff --git a/po/de.po b/po/de.po
index 8f14e96da..5eb520041 100644
--- a/po/de.po
+++ b/po/de.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: LXD\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2017-12-04 08:56+0000\n"
+"POT-Creation-Date: 2017-12-04 08:57+0000\n"
 "PO-Revision-Date: 2017-02-14 17:11+0000\n"
 "Last-Translator: Tim Rose <tim at netlope.de>\n"
 "Language-Team: German <https://hosted.weblate.org/projects/linux-containers/"
@@ -498,6 +498,10 @@ msgstr ""
 msgid "Creating the container"
 msgstr "kann nicht zum selben Container Namen kopieren"
 
+#: lxc/cluster.go:120
+msgid "DATABASE"
+msgstr ""
+
 #: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:463 lxc/network.go:525
 #: lxc/storage.go:682 lxc/storage.go:793
 msgid "DESCRIPTION"
@@ -654,7 +658,7 @@ msgstr "Fingerabdruck: %s\n"
 msgid "Force pseudo-terminal allocation"
 msgstr ""
 
-#: lxc/cluster.go:27
+#: lxc/cluster.go:30
 msgid "Force removing a node, even if degraded"
 msgstr ""
 
@@ -853,8 +857,8 @@ msgstr ""
 msgid "Must supply container name for: "
 msgstr "der Name des Ursprung Containers muss angegeben werden"
 
-#: lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573 lxc/remote.go:409
-#: lxc/storage.go:681 lxc/storage.go:792
+#: lxc/cluster.go:118 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
+#: lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
 msgid "NAME"
 msgstr ""
 
@@ -914,7 +918,7 @@ msgstr "Kein Zertifikat für diese Verbindung"
 msgid "No fingerprint specified."
 msgstr "Kein Fingerabdruck angegeben."
 
-#: lxc/cluster.go:72
+#: lxc/cluster.go:79
 #, fuzzy, c-format
 msgid "Node %s removed"
 msgstr "Gerät %s wurde von %s entfernt\n"
@@ -1152,7 +1156,7 @@ msgstr ""
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/list.go:469
+#: lxc/cluster.go:121 lxc/list.go:469
 msgid "STATE"
 msgstr ""
 
@@ -1428,7 +1432,7 @@ msgstr ""
 msgid "UPLOAD DATE"
 msgstr ""
 
-#: lxc/remote.go:410
+#: lxc/cluster.go:119 lxc/remote.go:410
 msgid "URL"
 msgstr ""
 
@@ -1468,7 +1472,7 @@ msgstr ""
 "Benutzung: lxc [Unterbefehl] [Optionen]\n"
 "Verfügbare Befehle:\n"
 
-#: lxc/cluster.go:16
+#: lxc/cluster.go:19
 msgid ""
 "Usage: lxc cluster <subcommand> [options]\n"
 "\n"
diff --git a/po/el.po b/po/el.po
index 5c0d23963..099a69a97 100644
--- a/po/el.po
+++ b/po/el.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: lxd\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2017-12-04 08:56+0000\n"
+"POT-Creation-Date: 2017-12-04 08:57+0000\n"
 "PO-Revision-Date: 2017-02-14 08:00+0000\n"
 "Last-Translator: Simos Xenitellis <simos.65 at gmail.com>\n"
 "Language-Team: Greek <https://hosted.weblate.org/projects/linux-containers/"
@@ -384,6 +384,10 @@ msgstr ""
 msgid "Creating the container"
 msgstr ""
 
+#: lxc/cluster.go:120
+msgid "DATABASE"
+msgstr ""
+
 #: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:463 lxc/network.go:525
 #: lxc/storage.go:682 lxc/storage.go:793
 msgid "DESCRIPTION"
@@ -534,7 +538,7 @@ msgstr ""
 msgid "Force pseudo-terminal allocation"
 msgstr ""
 
-#: lxc/cluster.go:27
+#: lxc/cluster.go:30
 msgid "Force removing a node, even if degraded"
 msgstr ""
 
@@ -726,8 +730,8 @@ msgstr ""
 msgid "Must supply container name for: "
 msgstr ""
 
-#: lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573 lxc/remote.go:409
-#: lxc/storage.go:681 lxc/storage.go:792
+#: lxc/cluster.go:118 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
+#: lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
 msgid "NAME"
 msgstr ""
 
@@ -784,7 +788,7 @@ msgstr ""
 msgid "No fingerprint specified."
 msgstr ""
 
-#: lxc/cluster.go:72
+#: lxc/cluster.go:79
 #, c-format
 msgid "Node %s removed"
 msgstr ""
@@ -1014,7 +1018,7 @@ msgstr ""
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/list.go:469
+#: lxc/cluster.go:121 lxc/list.go:469
 msgid "STATE"
 msgstr ""
 
@@ -1278,7 +1282,7 @@ msgstr ""
 msgid "UPLOAD DATE"
 msgstr ""
 
-#: lxc/remote.go:410
+#: lxc/cluster.go:119 lxc/remote.go:410
 msgid "URL"
 msgstr ""
 
@@ -1312,7 +1316,7 @@ msgstr ""
 msgid "Usage: lxc <command> [options]"
 msgstr ""
 
-#: lxc/cluster.go:16
+#: lxc/cluster.go:19
 msgid ""
 "Usage: lxc cluster <subcommand> [options]\n"
 "\n"
diff --git a/po/fr.po b/po/fr.po
index 000b40209..52ebac8d4 100644
--- a/po/fr.po
+++ b/po/fr.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: LXD\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2017-12-04 08:56+0000\n"
+"POT-Creation-Date: 2017-12-04 08:57+0000\n"
 "PO-Revision-Date: 2017-10-26 15:46+0000\n"
 "Last-Translator: Alban Vidal <alban.vidal at zordhak.fr>\n"
 "Language-Team: French <https://hosted.weblate.org/projects/linux-containers/"
@@ -485,6 +485,10 @@ msgstr "Création de %s"
 msgid "Creating the container"
 msgstr "Création du conteneur"
 
+#: lxc/cluster.go:120
+msgid "DATABASE"
+msgstr ""
+
 #: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:463 lxc/network.go:525
 #: lxc/storage.go:682 lxc/storage.go:793
 msgid "DESCRIPTION"
@@ -638,7 +642,7 @@ msgstr "Empreinte : %s"
 msgid "Force pseudo-terminal allocation"
 msgstr "Forcer l'allocation de pseudo-terminal "
 
-#: lxc/cluster.go:27
+#: lxc/cluster.go:30
 msgid "Force removing a node, even if degraded"
 msgstr ""
 
@@ -839,8 +843,8 @@ msgstr ""
 msgid "Must supply container name for: "
 msgstr "Vous devez fournir le nom d'un conteneur pour : "
 
-#: lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573 lxc/remote.go:409
-#: lxc/storage.go:681 lxc/storage.go:792
+#: lxc/cluster.go:118 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
+#: lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
 msgid "NAME"
 msgstr "NOM"
 
@@ -898,7 +902,7 @@ msgstr "Aucun périphérique existant pour ce réseau"
 msgid "No fingerprint specified."
 msgstr "Aucune empreinte n'a été indiquée."
 
-#: lxc/cluster.go:72
+#: lxc/cluster.go:79
 #, fuzzy, c-format
 msgid "Node %s removed"
 msgstr "Profil %s supprimé de %s"
@@ -1132,7 +1136,7 @@ msgstr "INSTANTANÉS"
 msgid "SOURCE"
 msgstr "SOURCE"
 
-#: lxc/list.go:469
+#: lxc/cluster.go:121 lxc/list.go:469
 msgid "STATE"
 msgstr "ÉTAT"
 
@@ -1413,7 +1417,7 @@ msgstr "Type : persistant"
 msgid "UPLOAD DATE"
 msgstr "DATE DE PUBLICATION"
 
-#: lxc/remote.go:410
+#: lxc/cluster.go:119 lxc/remote.go:410
 msgid "URL"
 msgstr "URL"
 
@@ -1450,7 +1454,7 @@ msgstr ""
 msgid "Usage: lxc <command> [options]"
 msgstr "Utilisation : lxc <commande> [options]"
 
-#: lxc/cluster.go:16
+#: lxc/cluster.go:19
 msgid ""
 "Usage: lxc cluster <subcommand> [options]\n"
 "\n"
diff --git a/po/id.po b/po/id.po
index 959c758f5..23cf51d7f 100644
--- a/po/id.po
+++ b/po/id.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: lxd\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2017-12-04 08:56+0000\n"
+"POT-Creation-Date: 2017-12-04 08:57+0000\n"
 "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
 "Last-Translator: Automatically generated\n"
 "Language-Team: none\n"
@@ -380,6 +380,10 @@ msgstr ""
 msgid "Creating the container"
 msgstr ""
 
+#: lxc/cluster.go:120
+msgid "DATABASE"
+msgstr ""
+
 #: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:463 lxc/network.go:525
 #: lxc/storage.go:682 lxc/storage.go:793
 msgid "DESCRIPTION"
@@ -529,7 +533,7 @@ msgstr ""
 msgid "Force pseudo-terminal allocation"
 msgstr ""
 
-#: lxc/cluster.go:27
+#: lxc/cluster.go:30
 msgid "Force removing a node, even if degraded"
 msgstr ""
 
@@ -720,8 +724,8 @@ msgstr ""
 msgid "Must supply container name for: "
 msgstr ""
 
-#: lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573 lxc/remote.go:409
-#: lxc/storage.go:681 lxc/storage.go:792
+#: lxc/cluster.go:118 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
+#: lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
 msgid "NAME"
 msgstr ""
 
@@ -777,7 +781,7 @@ msgstr ""
 msgid "No fingerprint specified."
 msgstr ""
 
-#: lxc/cluster.go:72
+#: lxc/cluster.go:79
 #, c-format
 msgid "Node %s removed"
 msgstr ""
@@ -1007,7 +1011,7 @@ msgstr ""
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/list.go:469
+#: lxc/cluster.go:121 lxc/list.go:469
 msgid "STATE"
 msgstr ""
 
@@ -1271,7 +1275,7 @@ msgstr ""
 msgid "UPLOAD DATE"
 msgstr ""
 
-#: lxc/remote.go:410
+#: lxc/cluster.go:119 lxc/remote.go:410
 msgid "URL"
 msgstr ""
 
@@ -1305,7 +1309,7 @@ msgstr ""
 msgid "Usage: lxc <command> [options]"
 msgstr ""
 
-#: lxc/cluster.go:16
+#: lxc/cluster.go:19
 msgid ""
 "Usage: lxc cluster <subcommand> [options]\n"
 "\n"
diff --git a/po/it.po b/po/it.po
index 2632a015e..7097dc17c 100644
--- a/po/it.po
+++ b/po/it.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: lxd\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2017-12-04 08:56+0000\n"
+"POT-Creation-Date: 2017-12-04 08:57+0000\n"
 "PO-Revision-Date: 2017-08-18 14:22+0000\n"
 "Last-Translator: Alberto Donato <alberto.donato at gmail.com>\n"
 "Language-Team: Italian <https://hosted.weblate.org/projects/linux-containers/"
@@ -405,6 +405,10 @@ msgstr "Creazione di %s in corso"
 msgid "Creating the container"
 msgstr "Creazione del container in corso"
 
+#: lxc/cluster.go:120
+msgid "DATABASE"
+msgstr ""
+
 #: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:463 lxc/network.go:525
 #: lxc/storage.go:682 lxc/storage.go:793
 msgid "DESCRIPTION"
@@ -555,7 +559,7 @@ msgstr ""
 msgid "Force pseudo-terminal allocation"
 msgstr ""
 
-#: lxc/cluster.go:27
+#: lxc/cluster.go:30
 msgid "Force removing a node, even if degraded"
 msgstr ""
 
@@ -746,8 +750,8 @@ msgstr ""
 msgid "Must supply container name for: "
 msgstr ""
 
-#: lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573 lxc/remote.go:409
-#: lxc/storage.go:681 lxc/storage.go:792
+#: lxc/cluster.go:118 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
+#: lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
 msgid "NAME"
 msgstr ""
 
@@ -803,7 +807,7 @@ msgstr ""
 msgid "No fingerprint specified."
 msgstr ""
 
-#: lxc/cluster.go:72
+#: lxc/cluster.go:79
 #, c-format
 msgid "Node %s removed"
 msgstr ""
@@ -1033,7 +1037,7 @@ msgstr ""
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/list.go:469
+#: lxc/cluster.go:121 lxc/list.go:469
 msgid "STATE"
 msgstr ""
 
@@ -1297,7 +1301,7 @@ msgstr ""
 msgid "UPLOAD DATE"
 msgstr ""
 
-#: lxc/remote.go:410
+#: lxc/cluster.go:119 lxc/remote.go:410
 msgid "URL"
 msgstr ""
 
@@ -1331,7 +1335,7 @@ msgstr ""
 msgid "Usage: lxc <command> [options]"
 msgstr ""
 
-#: lxc/cluster.go:16
+#: lxc/cluster.go:19
 msgid ""
 "Usage: lxc cluster <subcommand> [options]\n"
 "\n"
diff --git a/po/ja.po b/po/ja.po
index 68a4db16d..57f2af611 100644
--- a/po/ja.po
+++ b/po/ja.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: LXD\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2017-12-04 08:56+0000\n"
+"POT-Creation-Date: 2017-12-04 08:57+0000\n"
 "PO-Revision-Date: 2017-09-28 20:29+0000\n"
 "Last-Translator: KATOH Yasufumi <karma at jazz.email.ne.jp>\n"
 "Language-Team: Japanese <https://hosted.weblate.org/projects/linux-"
@@ -386,6 +386,10 @@ msgstr "%s を作成中"
 msgid "Creating the container"
 msgstr "コンテナを作成中"
 
+#: lxc/cluster.go:120
+msgid "DATABASE"
+msgstr ""
+
 #: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:463 lxc/network.go:525
 #: lxc/storage.go:682 lxc/storage.go:793
 msgid "DESCRIPTION"
@@ -537,7 +541,7 @@ msgstr "証明書のフィンガープリント: %s"
 msgid "Force pseudo-terminal allocation"
 msgstr "強制的に擬似端末を割り当てます"
 
-#: lxc/cluster.go:27
+#: lxc/cluster.go:30
 msgid "Force removing a node, even if degraded"
 msgstr ""
 
@@ -731,8 +735,8 @@ msgstr "ディレクトリからのインポートは root で実行する必要
 msgid "Must supply container name for: "
 msgstr "コンテナ名を指定する必要があります: "
 
-#: lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573 lxc/remote.go:409
-#: lxc/storage.go:681 lxc/storage.go:792
+#: lxc/cluster.go:118 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
+#: lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
 msgid "NAME"
 msgstr ""
 
@@ -788,7 +792,7 @@ msgstr "このストレージボリュームに対するデバイスがありま
 msgid "No fingerprint specified."
 msgstr "フィンガープリントが指定されていません。"
 
-#: lxc/cluster.go:72
+#: lxc/cluster.go:79
 #, fuzzy, c-format
 msgid "Node %s removed"
 msgstr "プロファイル %s が %s から削除されました"
@@ -1019,7 +1023,7 @@ msgstr ""
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/list.go:469
+#: lxc/cluster.go:121 lxc/list.go:469
 msgid "STATE"
 msgstr ""
 
@@ -1297,7 +1301,7 @@ msgstr "タイプ: persistent"
 msgid "UPLOAD DATE"
 msgstr ""
 
-#: lxc/remote.go:410
+#: lxc/cluster.go:119 lxc/remote.go:410
 msgid "URL"
 msgstr ""
 
@@ -1334,7 +1338,7 @@ msgstr ""
 msgid "Usage: lxc <command> [options]"
 msgstr "使い方: lxc <コマンド> [オプション]"
 
-#: lxc/cluster.go:16
+#: lxc/cluster.go:19
 msgid ""
 "Usage: lxc cluster <subcommand> [options]\n"
 "\n"
diff --git a/po/lxd.pot b/po/lxd.pot
index 06be2f45c..67870d594 100644
--- a/po/lxd.pot
+++ b/po/lxd.pot
@@ -7,7 +7,7 @@
 msgid   ""
 msgstr  "Project-Id-Version: lxd\n"
         "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-        "POT-Creation-Date: 2017-12-04 08:56+0000\n"
+        "POT-Creation-Date: 2017-12-04 08:57+0000\n"
         "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
         "Last-Translator: FULL NAME <EMAIL at ADDRESS>\n"
         "Language-Team: LANGUAGE <LL at li.org>\n"
@@ -372,6 +372,10 @@ msgstr  ""
 msgid   "Creating the container"
 msgstr  ""
 
+#: lxc/cluster.go:120
+msgid   "DATABASE"
+msgstr  ""
+
 #: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:463 lxc/network.go:525 lxc/storage.go:682 lxc/storage.go:793
 msgid   "DESCRIPTION"
 msgstr  ""
@@ -520,7 +524,7 @@ msgstr  ""
 msgid   "Force pseudo-terminal allocation"
 msgstr  ""
 
-#: lxc/cluster.go:27
+#: lxc/cluster.go:30
 msgid   "Force removing a node, even if degraded"
 msgstr  ""
 
@@ -711,7 +715,7 @@ msgstr  ""
 msgid   "Must supply container name for: "
 msgstr  ""
 
-#: lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573 lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
+#: lxc/cluster.go:118 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573 lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
 msgid   "NAME"
 msgstr  ""
 
@@ -767,7 +771,7 @@ msgstr  ""
 msgid   "No fingerprint specified."
 msgstr  ""
 
-#: lxc/cluster.go:72
+#: lxc/cluster.go:79
 #, c-format
 msgid   "Node %s removed"
 msgstr  ""
@@ -996,7 +1000,7 @@ msgstr  ""
 msgid   "SOURCE"
 msgstr  ""
 
-#: lxc/list.go:469
+#: lxc/cluster.go:121 lxc/list.go:469
 msgid   "STATE"
 msgstr  ""
 
@@ -1256,7 +1260,7 @@ msgstr  ""
 msgid   "UPLOAD DATE"
 msgstr  ""
 
-#: lxc/remote.go:410
+#: lxc/cluster.go:119 lxc/remote.go:410
 msgid   "URL"
 msgstr  ""
 
@@ -1289,7 +1293,7 @@ msgstr  ""
 msgid   "Usage: lxc <command> [options]"
 msgstr  ""
 
-#: lxc/cluster.go:16
+#: lxc/cluster.go:19
 msgid   "Usage: lxc cluster <subcommand> [options]\n"
         "\n"
         "Manage cluster nodes.\n"
diff --git a/po/nb_NO.po b/po/nb_NO.po
index d877d8268..8cdddde61 100644
--- a/po/nb_NO.po
+++ b/po/nb_NO.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: lxd\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2017-12-04 08:56+0000\n"
+"POT-Creation-Date: 2017-12-04 08:57+0000\n"
 "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
 "Last-Translator: Automatically generated\n"
 "Language-Team: none\n"
@@ -380,6 +380,10 @@ msgstr ""
 msgid "Creating the container"
 msgstr ""
 
+#: lxc/cluster.go:120
+msgid "DATABASE"
+msgstr ""
+
 #: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:463 lxc/network.go:525
 #: lxc/storage.go:682 lxc/storage.go:793
 msgid "DESCRIPTION"
@@ -529,7 +533,7 @@ msgstr ""
 msgid "Force pseudo-terminal allocation"
 msgstr ""
 
-#: lxc/cluster.go:27
+#: lxc/cluster.go:30
 msgid "Force removing a node, even if degraded"
 msgstr ""
 
@@ -720,8 +724,8 @@ msgstr ""
 msgid "Must supply container name for: "
 msgstr ""
 
-#: lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573 lxc/remote.go:409
-#: lxc/storage.go:681 lxc/storage.go:792
+#: lxc/cluster.go:118 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
+#: lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
 msgid "NAME"
 msgstr ""
 
@@ -777,7 +781,7 @@ msgstr ""
 msgid "No fingerprint specified."
 msgstr ""
 
-#: lxc/cluster.go:72
+#: lxc/cluster.go:79
 #, c-format
 msgid "Node %s removed"
 msgstr ""
@@ -1007,7 +1011,7 @@ msgstr ""
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/list.go:469
+#: lxc/cluster.go:121 lxc/list.go:469
 msgid "STATE"
 msgstr ""
 
@@ -1271,7 +1275,7 @@ msgstr ""
 msgid "UPLOAD DATE"
 msgstr ""
 
-#: lxc/remote.go:410
+#: lxc/cluster.go:119 lxc/remote.go:410
 msgid "URL"
 msgstr ""
 
@@ -1305,7 +1309,7 @@ msgstr ""
 msgid "Usage: lxc <command> [options]"
 msgstr ""
 
-#: lxc/cluster.go:16
+#: lxc/cluster.go:19
 msgid ""
 "Usage: lxc cluster <subcommand> [options]\n"
 "\n"
diff --git a/po/nl.po b/po/nl.po
index e5030ceb9..6dc8d124f 100644
--- a/po/nl.po
+++ b/po/nl.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: lxd\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2017-12-04 08:56+0000\n"
+"POT-Creation-Date: 2017-12-04 08:57+0000\n"
 "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
 "Last-Translator: Automatically generated\n"
 "Language-Team: none\n"
@@ -380,6 +380,10 @@ msgstr ""
 msgid "Creating the container"
 msgstr ""
 
+#: lxc/cluster.go:120
+msgid "DATABASE"
+msgstr ""
+
 #: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:463 lxc/network.go:525
 #: lxc/storage.go:682 lxc/storage.go:793
 msgid "DESCRIPTION"
@@ -529,7 +533,7 @@ msgstr ""
 msgid "Force pseudo-terminal allocation"
 msgstr ""
 
-#: lxc/cluster.go:27
+#: lxc/cluster.go:30
 msgid "Force removing a node, even if degraded"
 msgstr ""
 
@@ -720,8 +724,8 @@ msgstr ""
 msgid "Must supply container name for: "
 msgstr ""
 
-#: lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573 lxc/remote.go:409
-#: lxc/storage.go:681 lxc/storage.go:792
+#: lxc/cluster.go:118 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
+#: lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
 msgid "NAME"
 msgstr ""
 
@@ -777,7 +781,7 @@ msgstr ""
 msgid "No fingerprint specified."
 msgstr ""
 
-#: lxc/cluster.go:72
+#: lxc/cluster.go:79
 #, c-format
 msgid "Node %s removed"
 msgstr ""
@@ -1007,7 +1011,7 @@ msgstr ""
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/list.go:469
+#: lxc/cluster.go:121 lxc/list.go:469
 msgid "STATE"
 msgstr ""
 
@@ -1271,7 +1275,7 @@ msgstr ""
 msgid "UPLOAD DATE"
 msgstr ""
 
-#: lxc/remote.go:410
+#: lxc/cluster.go:119 lxc/remote.go:410
 msgid "URL"
 msgstr ""
 
@@ -1305,7 +1309,7 @@ msgstr ""
 msgid "Usage: lxc <command> [options]"
 msgstr ""
 
-#: lxc/cluster.go:16
+#: lxc/cluster.go:19
 msgid ""
 "Usage: lxc cluster <subcommand> [options]\n"
 "\n"
diff --git a/po/pt_BR.po b/po/pt_BR.po
index dc636a211..eeae2cafa 100644
--- a/po/pt_BR.po
+++ b/po/pt_BR.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: lxd\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2017-12-04 08:56+0000\n"
+"POT-Creation-Date: 2017-12-04 08:57+0000\n"
 "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
 "Last-Translator: Automatically generated\n"
 "Language-Team: none\n"
@@ -380,6 +380,10 @@ msgstr ""
 msgid "Creating the container"
 msgstr ""
 
+#: lxc/cluster.go:120
+msgid "DATABASE"
+msgstr ""
+
 #: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:463 lxc/network.go:525
 #: lxc/storage.go:682 lxc/storage.go:793
 msgid "DESCRIPTION"
@@ -529,7 +533,7 @@ msgstr ""
 msgid "Force pseudo-terminal allocation"
 msgstr ""
 
-#: lxc/cluster.go:27
+#: lxc/cluster.go:30
 msgid "Force removing a node, even if degraded"
 msgstr ""
 
@@ -720,8 +724,8 @@ msgstr ""
 msgid "Must supply container name for: "
 msgstr ""
 
-#: lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573 lxc/remote.go:409
-#: lxc/storage.go:681 lxc/storage.go:792
+#: lxc/cluster.go:118 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
+#: lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
 msgid "NAME"
 msgstr ""
 
@@ -777,7 +781,7 @@ msgstr ""
 msgid "No fingerprint specified."
 msgstr ""
 
-#: lxc/cluster.go:72
+#: lxc/cluster.go:79
 #, c-format
 msgid "Node %s removed"
 msgstr ""
@@ -1007,7 +1011,7 @@ msgstr ""
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/list.go:469
+#: lxc/cluster.go:121 lxc/list.go:469
 msgid "STATE"
 msgstr ""
 
@@ -1271,7 +1275,7 @@ msgstr ""
 msgid "UPLOAD DATE"
 msgstr ""
 
-#: lxc/remote.go:410
+#: lxc/cluster.go:119 lxc/remote.go:410
 msgid "URL"
 msgstr ""
 
@@ -1305,7 +1309,7 @@ msgstr ""
 msgid "Usage: lxc <command> [options]"
 msgstr ""
 
-#: lxc/cluster.go:16
+#: lxc/cluster.go:19
 msgid ""
 "Usage: lxc cluster <subcommand> [options]\n"
 "\n"
diff --git a/po/ru.po b/po/ru.po
index b98783d35..2745d4c4c 100644
--- a/po/ru.po
+++ b/po/ru.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: lxd\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2017-12-04 08:56+0000\n"
+"POT-Creation-Date: 2017-12-04 08:57+0000\n"
 "PO-Revision-Date: 2017-09-05 16:48+0000\n"
 "Last-Translator: Ilya Yakimavets <ilya.yakimavets at backend.expert>\n"
 "Language-Team: Russian <https://hosted.weblate.org/projects/linux-containers/"
@@ -468,6 +468,10 @@ msgstr ""
 msgid "Creating the container"
 msgstr ""
 
+#: lxc/cluster.go:120
+msgid "DATABASE"
+msgstr ""
+
 #: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:463 lxc/network.go:525
 #: lxc/storage.go:682 lxc/storage.go:793
 msgid "DESCRIPTION"
@@ -618,7 +622,7 @@ msgstr ""
 msgid "Force pseudo-terminal allocation"
 msgstr ""
 
-#: lxc/cluster.go:27
+#: lxc/cluster.go:30
 msgid "Force removing a node, even if degraded"
 msgstr ""
 
@@ -810,8 +814,8 @@ msgstr ""
 msgid "Must supply container name for: "
 msgstr ""
 
-#: lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573 lxc/remote.go:409
-#: lxc/storage.go:681 lxc/storage.go:792
+#: lxc/cluster.go:118 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
+#: lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
 msgid "NAME"
 msgstr ""
 
@@ -868,7 +872,7 @@ msgstr ""
 msgid "No fingerprint specified."
 msgstr ""
 
-#: lxc/cluster.go:72
+#: lxc/cluster.go:79
 #, c-format
 msgid "Node %s removed"
 msgstr ""
@@ -1098,7 +1102,7 @@ msgstr ""
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/list.go:469
+#: lxc/cluster.go:121 lxc/list.go:469
 msgid "STATE"
 msgstr ""
 
@@ -1362,7 +1366,7 @@ msgstr ""
 msgid "UPLOAD DATE"
 msgstr ""
 
-#: lxc/remote.go:410
+#: lxc/cluster.go:119 lxc/remote.go:410
 msgid "URL"
 msgstr ""
 
@@ -1399,7 +1403,7 @@ msgstr ""
 msgid "Usage: lxc <command> [options]"
 msgstr ""
 
-#: lxc/cluster.go:16
+#: lxc/cluster.go:19
 msgid ""
 "Usage: lxc cluster <subcommand> [options]\n"
 "\n"
diff --git a/po/sr.po b/po/sr.po
index 4c6db9898..679adb46d 100644
--- a/po/sr.po
+++ b/po/sr.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: lxd\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2017-12-04 08:56+0000\n"
+"POT-Creation-Date: 2017-12-04 08:57+0000\n"
 "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
 "Last-Translator: Automatically generated\n"
 "Language-Team: none\n"
@@ -380,6 +380,10 @@ msgstr ""
 msgid "Creating the container"
 msgstr ""
 
+#: lxc/cluster.go:120
+msgid "DATABASE"
+msgstr ""
+
 #: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:463 lxc/network.go:525
 #: lxc/storage.go:682 lxc/storage.go:793
 msgid "DESCRIPTION"
@@ -529,7 +533,7 @@ msgstr ""
 msgid "Force pseudo-terminal allocation"
 msgstr ""
 
-#: lxc/cluster.go:27
+#: lxc/cluster.go:30
 msgid "Force removing a node, even if degraded"
 msgstr ""
 
@@ -720,8 +724,8 @@ msgstr ""
 msgid "Must supply container name for: "
 msgstr ""
 
-#: lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573 lxc/remote.go:409
-#: lxc/storage.go:681 lxc/storage.go:792
+#: lxc/cluster.go:118 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
+#: lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
 msgid "NAME"
 msgstr ""
 
@@ -777,7 +781,7 @@ msgstr ""
 msgid "No fingerprint specified."
 msgstr ""
 
-#: lxc/cluster.go:72
+#: lxc/cluster.go:79
 #, c-format
 msgid "Node %s removed"
 msgstr ""
@@ -1007,7 +1011,7 @@ msgstr ""
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/list.go:469
+#: lxc/cluster.go:121 lxc/list.go:469
 msgid "STATE"
 msgstr ""
 
@@ -1271,7 +1275,7 @@ msgstr ""
 msgid "UPLOAD DATE"
 msgstr ""
 
-#: lxc/remote.go:410
+#: lxc/cluster.go:119 lxc/remote.go:410
 msgid "URL"
 msgstr ""
 
@@ -1305,7 +1309,7 @@ msgstr ""
 msgid "Usage: lxc <command> [options]"
 msgstr ""
 
-#: lxc/cluster.go:16
+#: lxc/cluster.go:19
 msgid ""
 "Usage: lxc cluster <subcommand> [options]\n"
 "\n"
diff --git a/po/sv.po b/po/sv.po
index 26cf26b2f..bb11b1ff6 100644
--- a/po/sv.po
+++ b/po/sv.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: lxd\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2017-12-04 08:56+0000\n"
+"POT-Creation-Date: 2017-12-04 08:57+0000\n"
 "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
 "Last-Translator: Automatically generated\n"
 "Language-Team: none\n"
@@ -380,6 +380,10 @@ msgstr ""
 msgid "Creating the container"
 msgstr ""
 
+#: lxc/cluster.go:120
+msgid "DATABASE"
+msgstr ""
+
 #: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:463 lxc/network.go:525
 #: lxc/storage.go:682 lxc/storage.go:793
 msgid "DESCRIPTION"
@@ -529,7 +533,7 @@ msgstr ""
 msgid "Force pseudo-terminal allocation"
 msgstr ""
 
-#: lxc/cluster.go:27
+#: lxc/cluster.go:30
 msgid "Force removing a node, even if degraded"
 msgstr ""
 
@@ -720,8 +724,8 @@ msgstr ""
 msgid "Must supply container name for: "
 msgstr ""
 
-#: lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573 lxc/remote.go:409
-#: lxc/storage.go:681 lxc/storage.go:792
+#: lxc/cluster.go:118 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
+#: lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
 msgid "NAME"
 msgstr ""
 
@@ -777,7 +781,7 @@ msgstr ""
 msgid "No fingerprint specified."
 msgstr ""
 
-#: lxc/cluster.go:72
+#: lxc/cluster.go:79
 #, c-format
 msgid "Node %s removed"
 msgstr ""
@@ -1007,7 +1011,7 @@ msgstr ""
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/list.go:469
+#: lxc/cluster.go:121 lxc/list.go:469
 msgid "STATE"
 msgstr ""
 
@@ -1271,7 +1275,7 @@ msgstr ""
 msgid "UPLOAD DATE"
 msgstr ""
 
-#: lxc/remote.go:410
+#: lxc/cluster.go:119 lxc/remote.go:410
 msgid "URL"
 msgstr ""
 
@@ -1305,7 +1309,7 @@ msgstr ""
 msgid "Usage: lxc <command> [options]"
 msgstr ""
 
-#: lxc/cluster.go:16
+#: lxc/cluster.go:19
 msgid ""
 "Usage: lxc cluster <subcommand> [options]\n"
 "\n"
diff --git a/po/tr.po b/po/tr.po
index be6119b9e..10e00075d 100644
--- a/po/tr.po
+++ b/po/tr.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: lxd\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2017-12-04 08:56+0000\n"
+"POT-Creation-Date: 2017-12-04 08:57+0000\n"
 "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
 "Last-Translator: Automatically generated\n"
 "Language-Team: none\n"
@@ -380,6 +380,10 @@ msgstr ""
 msgid "Creating the container"
 msgstr ""
 
+#: lxc/cluster.go:120
+msgid "DATABASE"
+msgstr ""
+
 #: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:463 lxc/network.go:525
 #: lxc/storage.go:682 lxc/storage.go:793
 msgid "DESCRIPTION"
@@ -529,7 +533,7 @@ msgstr ""
 msgid "Force pseudo-terminal allocation"
 msgstr ""
 
-#: lxc/cluster.go:27
+#: lxc/cluster.go:30
 msgid "Force removing a node, even if degraded"
 msgstr ""
 
@@ -720,8 +724,8 @@ msgstr ""
 msgid "Must supply container name for: "
 msgstr ""
 
-#: lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573 lxc/remote.go:409
-#: lxc/storage.go:681 lxc/storage.go:792
+#: lxc/cluster.go:118 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
+#: lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
 msgid "NAME"
 msgstr ""
 
@@ -777,7 +781,7 @@ msgstr ""
 msgid "No fingerprint specified."
 msgstr ""
 
-#: lxc/cluster.go:72
+#: lxc/cluster.go:79
 #, c-format
 msgid "Node %s removed"
 msgstr ""
@@ -1007,7 +1011,7 @@ msgstr ""
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/list.go:469
+#: lxc/cluster.go:121 lxc/list.go:469
 msgid "STATE"
 msgstr ""
 
@@ -1271,7 +1275,7 @@ msgstr ""
 msgid "UPLOAD DATE"
 msgstr ""
 
-#: lxc/remote.go:410
+#: lxc/cluster.go:119 lxc/remote.go:410
 msgid "URL"
 msgstr ""
 
@@ -1305,7 +1309,7 @@ msgstr ""
 msgid "Usage: lxc <command> [options]"
 msgstr ""
 
-#: lxc/cluster.go:16
+#: lxc/cluster.go:19
 msgid ""
 "Usage: lxc cluster <subcommand> [options]\n"
 "\n"
diff --git a/po/zh.po b/po/zh.po
index 8f0731f0f..1055d5578 100644
--- a/po/zh.po
+++ b/po/zh.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: lxd\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2017-12-04 08:56+0000\n"
+"POT-Creation-Date: 2017-12-04 08:57+0000\n"
 "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
 "Last-Translator: Automatically generated\n"
 "Language-Team: none\n"
@@ -380,6 +380,10 @@ msgstr ""
 msgid "Creating the container"
 msgstr ""
 
+#: lxc/cluster.go:120
+msgid "DATABASE"
+msgstr ""
+
 #: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:463 lxc/network.go:525
 #: lxc/storage.go:682 lxc/storage.go:793
 msgid "DESCRIPTION"
@@ -529,7 +533,7 @@ msgstr ""
 msgid "Force pseudo-terminal allocation"
 msgstr ""
 
-#: lxc/cluster.go:27
+#: lxc/cluster.go:30
 msgid "Force removing a node, even if degraded"
 msgstr ""
 
@@ -720,8 +724,8 @@ msgstr ""
 msgid "Must supply container name for: "
 msgstr ""
 
-#: lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573 lxc/remote.go:409
-#: lxc/storage.go:681 lxc/storage.go:792
+#: lxc/cluster.go:118 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
+#: lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
 msgid "NAME"
 msgstr ""
 
@@ -777,7 +781,7 @@ msgstr ""
 msgid "No fingerprint specified."
 msgstr ""
 
-#: lxc/cluster.go:72
+#: lxc/cluster.go:79
 #, c-format
 msgid "Node %s removed"
 msgstr ""
@@ -1007,7 +1011,7 @@ msgstr ""
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/list.go:469
+#: lxc/cluster.go:121 lxc/list.go:469
 msgid "STATE"
 msgstr ""
 
@@ -1271,7 +1275,7 @@ msgstr ""
 msgid "UPLOAD DATE"
 msgstr ""
 
-#: lxc/remote.go:410
+#: lxc/cluster.go:119 lxc/remote.go:410
 msgid "URL"
 msgstr ""
 
@@ -1305,7 +1309,7 @@ msgstr ""
 msgid "Usage: lxc <command> [options]"
 msgstr ""
 
-#: lxc/cluster.go:16
+#: lxc/cluster.go:19
 msgid ""
 "Usage: lxc cluster <subcommand> [options]\n"
 "\n"
diff --git a/po/zh_Hans.po b/po/zh_Hans.po
index 0546b522d..fb06eb0d7 100644
--- a/po/zh_Hans.po
+++ b/po/zh_Hans.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: lxd\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2017-12-04 08:56+0000\n"
+"POT-Creation-Date: 2017-12-04 08:57+0000\n"
 "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
 "Last-Translator: Automatically generated\n"
 "Language-Team: none\n"
@@ -380,6 +380,10 @@ msgstr ""
 msgid "Creating the container"
 msgstr ""
 
+#: lxc/cluster.go:120
+msgid "DATABASE"
+msgstr ""
+
 #: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:463 lxc/network.go:525
 #: lxc/storage.go:682 lxc/storage.go:793
 msgid "DESCRIPTION"
@@ -529,7 +533,7 @@ msgstr ""
 msgid "Force pseudo-terminal allocation"
 msgstr ""
 
-#: lxc/cluster.go:27
+#: lxc/cluster.go:30
 msgid "Force removing a node, even if degraded"
 msgstr ""
 
@@ -720,8 +724,8 @@ msgstr ""
 msgid "Must supply container name for: "
 msgstr ""
 
-#: lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573 lxc/remote.go:409
-#: lxc/storage.go:681 lxc/storage.go:792
+#: lxc/cluster.go:118 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
+#: lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
 msgid "NAME"
 msgstr ""
 
@@ -777,7 +781,7 @@ msgstr ""
 msgid "No fingerprint specified."
 msgstr ""
 
-#: lxc/cluster.go:72
+#: lxc/cluster.go:79
 #, c-format
 msgid "Node %s removed"
 msgstr ""
@@ -1007,7 +1011,7 @@ msgstr ""
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/list.go:469
+#: lxc/cluster.go:121 lxc/list.go:469
 msgid "STATE"
 msgstr ""
 
@@ -1271,7 +1275,7 @@ msgstr ""
 msgid "UPLOAD DATE"
 msgstr ""
 
-#: lxc/remote.go:410
+#: lxc/cluster.go:119 lxc/remote.go:410
 msgid "URL"
 msgstr ""
 
@@ -1305,7 +1309,7 @@ msgstr ""
 msgid "Usage: lxc <command> [options]"
 msgstr ""
 
-#: lxc/cluster.go:16
+#: lxc/cluster.go:19
 msgid ""
 "Usage: lxc cluster <subcommand> [options]\n"
 "\n"
diff --git a/shared/api/cluster.go b/shared/api/cluster.go
index 61339f650..b82cfde25 100644
--- a/shared/api/cluster.go
+++ b/shared/api/cluster.go
@@ -36,3 +36,11 @@ type RaftNode struct {
 	ID      int64  `json:"id" yaml:"id"`
 	Address string `json:"address" yaml:"address"`
 }
+
+// Node represents the a LXD node in the cluster.
+type Node struct {
+	Name     string `json:"name" yaml:"name"`
+	URL      string `json:"url" yaml:"url"`
+	Database bool   `json:"database" yaml:"database"`
+	State    string `json:"state" yaml:"state"`
+}
diff --git a/test/suites/clustering.sh b/test/suites/clustering.sh
index 119529ed9..b73a0e51a 100644
--- a/test/suites/clustering.sh
+++ b/test/suites/clustering.sh
@@ -49,10 +49,14 @@ test_clustering() {
   ns5="${prefix}5"
   spawn_lxd_and_join_cluster "${ns5}" "${bridge}" "${cert}" 5 4 "${LXD_FIVE_DIR}"
 
+  # List all nodes
+  LXD_DIR="${LXD_THREE_DIR}" lxc cluster list | grep -q "ONLINE"
+
   # Shutdown a non-database node, and wait a few seconds so it will be
   # detected as down.
   LXD_DIR="${LXD_FIVE_DIR}" lxd shutdown
-  sleep 5
+  sleep 22
+  LXD_DIR="${LXD_THREE_DIR}" lxc cluster list | grep "node5" | grep -q "OFFLINE"
 
   # Trying to delete the preseeded network now fails, because a node is degraded.
   ! LXD_DIR="${LXD_TWO_DIR}" lxc network delete "${bridge}"

From 1a4f9ec2545b8103223087f2b09af9bb260070be Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Tue, 7 Nov 2017 09:09:04 +0000
Subject: [PATCH 079/116] Add Gateway.LeaderAddress returning the address of
 the raft leader

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/cluster/gateway.go      | 96 +++++++++++++++++++++++++++++++++++++++++++++
 lxd/cluster/gateway_test.go |  8 ++++
 2 files changed, 104 insertions(+)

diff --git a/lxd/cluster/gateway.go b/lxd/cluster/gateway.go
index ae5aa01b9..c0ba5cfff 100644
--- a/lxd/cluster/gateway.go
+++ b/lxd/cluster/gateway.go
@@ -132,6 +132,17 @@ func (g *Gateway) HandlerFuncs() map[string]http.HandlerFunc {
 			return
 		}
 
+		// Handle leader address requests.
+		if r.Method == "GET" {
+			leader, err := g.LeaderAddress()
+			if err != nil {
+				http.Error(w, "500 no elected leader", http.StatusInternalServerError)
+				return
+			}
+			util.WriteJSON(w, map[string]string{"leader": leader}, false)
+			return
+		}
+
 		g.server.ServeHTTP(w, r)
 	}
 	raft := func(w http.ResponseWriter, r *http.Request) {
@@ -263,6 +274,91 @@ func (g *Gateway) Reset(cert *shared.CertInfo) error {
 	return g.init()
 }
 
+// LeaderAddress returns the address of the current raft leader.
+func (g *Gateway) LeaderAddress() (string, error) {
+	// If we aren't clustered, return an error.
+	if g.memoryDial != nil {
+		return "", fmt.Errorf("node is not clustered")
+	}
+
+	ctx, cancel := context.WithTimeout(g.ctx, 5*time.Second)
+	defer cancel()
+
+	// If this is a raft node, return the address of the current leader, or
+	// wait a bit until one is elected.
+	if g.raft != nil {
+		for ctx.Err() == nil {
+			address := g.raft.Raft().Leader()
+			if address != "" {
+				return string(address), nil
+			}
+		}
+		return "", ctx.Err()
+
+	}
+
+	// If this isn't a raft node, contact a raft node and ask for the
+	// address of the current leader.
+	config, err := tlsClientConfig(g.cert)
+	if err != nil {
+		return "", err
+	}
+	addresses := []string{}
+	err = g.db.Transaction(func(tx *db.NodeTx) error {
+		nodes, err := tx.RaftNodes()
+		if err != nil {
+			return err
+		}
+		for _, node := range nodes {
+			addresses = append(addresses, node.Address)
+		}
+		return nil
+	})
+	if err != nil {
+		return "", errors.Wrap(err, "failed to fetch raft nodes addresses")
+	}
+
+	if len(addresses) == 0 {
+		// This should never happen because the raft_nodes table should
+		// be never empty for a clustered node, but check it for good
+		// measure.
+		return "", fmt.Errorf("no raft node known")
+	}
+
+	for _, address := range addresses {
+		url := fmt.Sprintf("https://%s%s", address, grpcEndpoint)
+		request, err := http.NewRequest("GET", url, nil)
+		if err != nil {
+			return "", err
+		}
+		request = request.WithContext(ctx)
+		client := &http.Client{Transport: &http.Transport{TLSClientConfig: config}}
+		response, err := client.Do(request)
+		if err != nil {
+			logger.Debugf("Failed to fetch leader address from %s", address)
+			continue
+		}
+		if response.StatusCode != http.StatusOK {
+			logger.Debugf("Request for leader address from %s failed", address)
+			continue
+		}
+		info := map[string]string{}
+		err = shared.ReadToJSON(response.Body, &info)
+		if err != nil {
+			logger.Debugf("Failed to parse leader address from %s", address)
+			continue
+		}
+		leader := info["leader"]
+		if leader == "" {
+			logger.Debugf("Raft node %s returned no leader address", address)
+			continue
+		}
+		return leader, nil
+	}
+
+	return "", fmt.Errorf("raft cluster is unavailable")
+}
+
 // Initialize the gateway, creating a new raft factory and gRPC server (if this
 // node is a database node), and a gRPC dialer.
 func (g *Gateway) init() error {
diff --git a/lxd/cluster/gateway_test.go b/lxd/cluster/gateway_test.go
index 10536978b..3e1e904fa 100644
--- a/lxd/cluster/gateway_test.go
+++ b/lxd/cluster/gateway_test.go
@@ -41,6 +41,10 @@ func TestGateway_Single(t *testing.T) {
 	conn, err := dialer()
 	assert.NoError(t, err)
 	assert.NotNil(t, conn)
+
+	leader, err := gateway.LeaderAddress()
+	assert.Equal(t, "", leader)
+	assert.EqualError(t, err, "node is not clustered")
 }
 
 // If there's a network address configured, we expose the gRPC endpoint with
@@ -68,6 +72,10 @@ func TestGateway_SingleWithNetworkAddress(t *testing.T) {
 	conn, err := driver.Open("test.db")
 	require.NoError(t, err)
 	require.NoError(t, conn.Close())
+
+	leader, err := gateway.LeaderAddress()
+	require.NoError(t, err)
+	assert.Equal(t, address, leader)
 }
 
 // When networked, the grpc and raft endpoints requires the cluster

From 0bff96dc68bfb54545d4823bacf255ecc95bd775 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Tue, 7 Nov 2017 11:57:11 +0000
Subject: [PATCH 080/116] Redirect to the raft leader all requests to accept a
 new node

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/api_cluster.go             | 24 ++++++++++++++++++++-
 lxd/cluster/gateway.go         | 49 ++++++++++++++++++++++++++----------------
 lxd/cluster/gateway_test.go    |  7 ++++++
 lxd/cluster/heartbeat_test.go  |  2 +-
 lxd/cluster/membership.go      | 31 ++++++++++++--------------
 lxd/cluster/membership_test.go | 34 ++++++++++-------------------
 lxd/response.go                | 11 +++++++++-
 test/suites/clustering.sh      | 10 +++++++--
 8 files changed, 104 insertions(+), 64 deletions(-)

diff --git a/lxd/api_cluster.go b/lxd/api_cluster.go
index e31ac010e..f8cd22960 100644
--- a/lxd/api_cluster.go
+++ b/lxd/api_cluster.go
@@ -4,6 +4,7 @@ import (
 	"encoding/json"
 	"fmt"
 	"net/http"
+	"net/url"
 	"os"
 	"path/filepath"
 	"strconv"
@@ -16,6 +17,7 @@ import (
 	"github.com/lxc/lxd/lxd/util"
 	"github.com/lxc/lxd/shared"
 	"github.com/lxc/lxd/shared/api"
+	"github.com/lxc/lxd/shared/logger"
 	"github.com/lxc/lxd/shared/version"
 	"github.com/pkg/errors"
 )
@@ -164,6 +166,26 @@ func clusterNodesPostBootstrap(d *Daemon, req api.ClusterPost) Response {
 }
 
 func clusterNodesPostAccept(d *Daemon, req api.ClusterPost) Response {
+	// Redirect all requests to the leader, which is the one with
+	// knowning what nodes are part of the raft cluster.
+	address, err := node.HTTPSAddress(d.db)
+	if err != nil {
+		return SmartError(err)
+	}
+	leader, err := d.gateway.LeaderAddress()
+	if err != nil {
+		return InternalError(err)
+	}
+	if address != leader {
+		logger.Debugf("Redirect node accept request to %s", leader)
+		url := &url.URL{
+			Scheme: "https",
+			Path:   "/1.0/cluster/nodes",
+			Host:   leader,
+		}
+		return SyncResponseRedirect(url.String())
+	}
+
 	// Accepting a node requires the client to provide the correct
 	// trust password.
 	secret, err := cluster.ConfigGetString(d.cluster, "core.trust_password")
@@ -173,7 +195,7 @@ func clusterNodesPostAccept(d *Daemon, req api.ClusterPost) Response {
 	if util.PasswordCheck(secret, req.TargetPassword) != nil {
 		return Forbidden
 	}
-	nodes, err := cluster.Accept(d.State(), req.Name, req.Address, req.Schema, req.API)
+	nodes, err := cluster.Accept(d.State(), d.gateway, req.Name, req.Address, req.Schema, req.API)
 	if err != nil {
 		return BadRequest(err)
 	}
diff --git a/lxd/cluster/gateway.go b/lxd/cluster/gateway.go
index c0ba5cfff..ceec8f5d9 100644
--- a/lxd/cluster/gateway.go
+++ b/lxd/cluster/gateway.go
@@ -93,27 +93,11 @@ type Gateway struct {
 // database node part of the dqlite cluster.
 func (g *Gateway) HandlerFuncs() map[string]http.HandlerFunc {
 	grpc := func(w http.ResponseWriter, r *http.Request) {
-		if g.server == nil || g.memoryDial != nil {
-			http.NotFound(w, r)
-			return
-		}
-
 		if !tlsCheckCert(r, g.cert) {
 			http.Error(w, "403 invalid client certificate", http.StatusForbidden)
 			return
 		}
 
-		// Before actually establishing the gRPC SQL connection, our
-		// dialer probes the node to see if it's currently the leader
-		// (otherwise it tries with another node or retry later).
-		if r.Method == "HEAD" {
-			if g.raft.Raft().State() != raft.Leader {
-				http.Error(w, "503 not leader", http.StatusServiceUnavailable)
-				return
-			}
-			return
-		}
-
 		// Handle heatbeats.
 		if r.Method == "PUT" {
 			var nodes []db.RaftNode
@@ -132,6 +116,23 @@ func (g *Gateway) HandlerFuncs() map[string]http.HandlerFunc {
 			return
 		}
 
+		// From here on we require that this node is part of the raft cluster.
+		if g.server == nil || g.memoryDial != nil {
+			http.NotFound(w, r)
+			return
+		}
+
+		// Before actually establishing the gRPC SQL connection, our
+		// dialer probes the node to see if it's currently the leader
+		// (otherwise it tries with another node or retry later).
+		if r.Method == "HEAD" {
+			if g.raft.Raft().State() != raft.Leader {
+				http.Error(w, "503 not leader", http.StatusServiceUnavailable)
+				return
+			}
+			return
+		}
+
 		// Handle leader address requests.
 		if r.Method == "GET" {
 			leader, err := g.LeaderAddress()
@@ -288,10 +289,11 @@ func (g *Gateway) LeaderAddress() (string, error) {
 	// wait a bit until one is elected.
 	if g.raft != nil {
 		for ctx.Err() == nil {
-			address := g.raft.Raft().Leader()
+			address := string(g.raft.Raft().Leader())
 			if address != "" {
-				return string(address), nil
+				return address, nil
 			}
+			time.Sleep(time.Second)
 		}
 		return "", ctx.Err()
 
@@ -388,6 +390,9 @@ func (g *Gateway) init() error {
 
 		g.server = server
 		g.raft = raft
+	} else {
+		g.server = nil
+		g.raft = nil
 	}
 	return nil
 }
@@ -420,7 +425,13 @@ func (g *Gateway) currentRaftNodes() ([]db.RaftNode, error) {
 	for i, server := range servers {
 		address, err := provider.ServerAddr(server.ID)
 		if err != nil {
-			return nil, errors.Wrap(err, "failed to fetch raft server address")
+			if err != db.NoSuchObjectError {
+				return nil, errors.Wrap(err, "failed to fetch raft server address")
+			}
+			// Use the initial address as fallback. This is an edge
+			// case that happens when a new leader is elected and
+			// its raft_nodes table is not fully up-to-date yet.
+			address = server.Address
 		}
 		id, err := strconv.Atoi(string(server.ID))
 		if err != nil {
diff --git a/lxd/cluster/gateway_test.go b/lxd/cluster/gateway_test.go
index 3e1e904fa..48d074bca 100644
--- a/lxd/cluster/gateway_test.go
+++ b/lxd/cluster/gateway_test.go
@@ -1,6 +1,8 @@
 package cluster_test
 
 import (
+	"crypto/tls"
+	"crypto/x509"
 	"fmt"
 	"net/http"
 	"net/http/httptest"
@@ -31,8 +33,13 @@ func TestGateway_Single(t *testing.T) {
 	handlerFuncs := gateway.HandlerFuncs()
 	assert.Len(t, handlerFuncs, 2)
 	for endpoint, f := range handlerFuncs {
+		c, err := x509.ParseCertificate(cert.KeyPair().Certificate[0])
+		require.NoError(t, err)
 		w := httptest.NewRecorder()
 		r := &http.Request{}
+		r.TLS = &tls.ConnectionState{
+			PeerCertificates: []*x509.Certificate{c},
+		}
 		f(w, r)
 		assert.Equal(t, 404, w.Code, endpoint)
 	}
diff --git a/lxd/cluster/heartbeat_test.go b/lxd/cluster/heartbeat_test.go
index 1e78496f3..b40d4292e 100644
--- a/lxd/cluster/heartbeat_test.go
+++ b/lxd/cluster/heartbeat_test.go
@@ -135,7 +135,7 @@ func (f *heartbeatFixture) Grow() *cluster.Gateway {
 	targetState := f.states[target]
 
 	nodes, err := cluster.Accept(
-		targetState, name, address, cluster.SchemaVersion, len(version.APIExtensions))
+		targetState, target, name, address, cluster.SchemaVersion, len(version.APIExtensions))
 
 	err = cluster.Join(state, gateway, target.Cert(), name, nodes)
 	require.NoError(f.t, err)
diff --git a/lxd/cluster/membership.go b/lxd/cluster/membership.go
index 77e17b74f..ff6540f95 100644
--- a/lxd/cluster/membership.go
+++ b/lxd/cluster/membership.go
@@ -136,7 +136,7 @@ func Bootstrap(state *state.State, gateway *Gateway, name string) error {
 //
 // Return an updated list raft database nodes (possibly including the newly
 // accepted node).
-func Accept(state *state.State, name, address string, schema, api int) ([]db.RaftNode, error) {
+func Accept(state *state.State, gateway *Gateway, name, address string, schema, api int) ([]db.RaftNode, error) {
 	// Check parameters
 	if name == "" {
 		return nil, fmt.Errorf("node name must not be empty")
@@ -166,25 +166,22 @@ func Accept(state *state.State, name, address string, schema, api int) ([]db.Raf
 
 	// Possibly insert the new node into the raft_nodes table (if we have
 	// less than 3 database nodes).
-	var nodes []db.RaftNode
-	err = state.Node.Transaction(func(tx *db.NodeTx) error {
-		var err error
-		nodes, err = tx.RaftNodes()
-		if err != nil {
-			return errors.Wrap(err, "failed to fetch current raft nodes")
-		}
-		if len(nodes) >= membershipMaxRaftNodes {
+	nodes, err := gateway.currentRaftNodes()
+	if err != nil {
+		return nil, errors.Wrap(err, "failed to get raft nodes from the log")
+	}
+	if len(nodes) < membershipMaxRaftNodes {
+		err = state.Node.Transaction(func(tx *db.NodeTx) error {
+			id, err := tx.RaftNodeAdd(address)
+			if err != nil {
+				return err
+			}
+			nodes = append(nodes, db.RaftNode{ID: id, Address: address})
 			return nil
-		}
-		id, err := tx.RaftNodeAdd(address)
+		})
 		if err != nil {
-			return err
+			return nil, errors.Wrap(err, "failed to insert new node into raft_nodes")
 		}
-		nodes = append(nodes, db.RaftNode{ID: id, Address: address})
-		return nil
-	})
-	if err != nil {
-		return nil, err
 	}
 
 	return nodes, nil
diff --git a/lxd/cluster/membership_test.go b/lxd/cluster/membership_test.go
index bfa5cce8f..b454e7824 100644
--- a/lxd/cluster/membership_test.go
+++ b/lxd/cluster/membership_test.go
@@ -193,9 +193,13 @@ func TestAccept_UnmetPreconditions(t *testing.T) {
 			state, cleanup := state.NewTestState(t)
 			defer cleanup()
 
+			cert := shared.TestingKeyPair()
+			gateway := newGateway(t, state.Node, cert)
+			defer gateway.Shutdown()
+
 			c.setup(&membershipFixtures{t: t, state: state})
 
-			_, err := cluster.Accept(state, c.name, c.address, c.schema, c.api)
+			_, err := cluster.Accept(state, gateway, c.name, c.address, c.schema, c.api)
 			assert.EqualError(t, err, c.error)
 		})
 	}
@@ -206,12 +210,16 @@ func TestAccept(t *testing.T) {
 	state, cleanup := state.NewTestState(t)
 	defer cleanup()
 
+	cert := shared.TestingKeyPair()
+	gateway := newGateway(t, state.Node, cert)
+	defer gateway.Shutdown()
+
 	f := &membershipFixtures{t: t, state: state}
 	f.RaftNode("1.2.3.4:666")
 	f.ClusterNode("1.2.3.4:666")
 
 	nodes, err := cluster.Accept(
-		state, "buzz", "5.6.7.8:666", cluster.SchemaVersion, len(version.APIExtensions))
+		state, gateway, "buzz", "5.6.7.8:666", cluster.SchemaVersion, len(version.APIExtensions))
 	assert.NoError(t, err)
 	assert.Len(t, nodes, 2)
 	assert.Equal(t, int64(1), nodes[0].ID)
@@ -220,26 +228,6 @@ func TestAccept(t *testing.T) {
 	assert.Equal(t, "5.6.7.8:666", nodes[1].Address)
 }
 
-// If the cluster has already reached its maximum number of raft nodes, the
-// joining node is not included in the returned raft nodes list.
-func TestAccept_MaxRaftNodes(t *testing.T) {
-	state, cleanup := state.NewTestState(t)
-	defer cleanup()
-
-	f := &membershipFixtures{t: t, state: state}
-	f.RaftNode("1.1.1.1:666")
-	f.RaftNode("2.2.2.2:666")
-	f.RaftNode("3.3.3.3:666")
-	f.ClusterNode("1.2.3.4:666")
-
-	nodes, err := cluster.Accept(
-		state, "buzz", "4.5.6.7:666", cluster.SchemaVersion, len(version.APIExtensions))
-	assert.NoError(t, err)
-	for _, node := range nodes {
-		assert.NotEqual(t, "4.5.6.7:666", node.Address)
-	}
-}
-
 func TestJoin(t *testing.T) {
 	// Setup a target node running as leader of a cluster.
 	targetCert := shared.TestingKeyPair()
@@ -295,7 +283,7 @@ func TestJoin(t *testing.T) {
 
 	// Accept the joining node.
 	raftNodes, err := cluster.Accept(
-		targetState, "rusp", address, cluster.SchemaVersion, len(version.APIExtensions))
+		targetState, targetGateway, "rusp", address, cluster.SchemaVersion, len(version.APIExtensions))
 	require.NoError(t, err)
 
 	// Actually join the cluster.
diff --git a/lxd/response.go b/lxd/response.go
index 41629738e..73d8540c1 100644
--- a/lxd/response.go
+++ b/lxd/response.go
@@ -30,6 +30,7 @@ type syncResponse struct {
 	etag     interface{}
 	metadata interface{}
 	location string
+	code     int
 	headers  map[string]string
 }
 
@@ -56,7 +57,11 @@ func (r *syncResponse) Render(w http.ResponseWriter) error {
 
 	if r.location != "" {
 		w.Header().Set("Location", r.location)
-		w.WriteHeader(201)
+		code := r.code
+		if code == 0 {
+			code = 201
+		}
+		w.WriteHeader(code)
 	}
 
 	resp := api.ResponseRaw{
@@ -90,6 +95,10 @@ func SyncResponseLocation(success bool, metadata interface{}, location string) R
 	return &syncResponse{success: success, metadata: metadata, location: location}
 }
 
+func SyncResponseRedirect(address string) Response {
+	return &syncResponse{success: true, location: address, code: http.StatusPermanentRedirect}
+}
+
 func SyncResponseHeaders(success bool, metadata interface{}, headers map[string]string) Response {
 	return &syncResponse{success: success, metadata: metadata, headers: headers}
 }
diff --git a/test/suites/clustering.sh b/test/suites/clustering.sh
index b73a0e51a..80411bad0 100644
--- a/test/suites/clustering.sh
+++ b/test/suites/clustering.sh
@@ -49,8 +49,14 @@ test_clustering() {
   ns5="${prefix}5"
   spawn_lxd_and_join_cluster "${ns5}" "${bridge}" "${cert}" 5 4 "${LXD_FIVE_DIR}"
 
-  # List all nodes
-  LXD_DIR="${LXD_THREE_DIR}" lxc cluster list | grep -q "ONLINE"
+  # List all nodes, using clients points to different nodes and
+  # checking which are database nodes and which are not.
+  LXD_DIR="${LXD_THREE_DIR}" lxc cluster list
+  LXD_DIR="${LXD_THREE_DIR}" lxc cluster list | grep "node1" | grep -q "YES"
+  LXD_DIR="${LXD_FOUR_DIR}" lxc cluster list | grep "node2" | grep -q "YES"
+  LXD_DIR="${LXD_ONE_DIR}" lxc cluster list | grep "node3" | grep -q "YES"
+  LXD_DIR="${LXD_TWO_DIR}" lxc cluster list | grep "node4" | grep -q "NO"
+  LXD_DIR="${LXD_FIVE_DIR}" lxc cluster list | grep "node5" | grep -q "NO"
 
   # Shutdown a non-database node, and wait a few seconds so it will be
   # detected as down.

From 0417cf4c11f48fd6b9e8ec0530d0df73994e8851 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Thu, 9 Nov 2017 09:07:01 +0000
Subject: [PATCH 081/116] Document new clustering-related public REST APIs

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 doc/api-extensions.md |  13 ++++
 doc/rest-api.md       | 194 ++++++++++++++++++++++++++++++++++++++++++++++++++
 shared/api/cluster.go |   4 +-
 3 files changed, 209 insertions(+), 2 deletions(-)

diff --git a/doc/api-extensions.md b/doc/api-extensions.md
index 70e2eb0c0..68ca8afc4 100644
--- a/doc/api-extensions.md
+++ b/doc/api-extensions.md
@@ -370,3 +370,16 @@ This adds support for optimized memory transfer during live migration.
 
 ## infiniband
 This adds support to use infiniband network devices.
+
+## clustering
+Clustering API for LXD.
+
+This includes the following new endpoints:
+
+* `GET /1.0/cluster`
+* `DELETE /1.0/cluster` (see [RESTful API](rest-api.md) for details)
+
+* `GET /1.0/cluster/nodes`
+* `POST /1.0/cluster/nodes` (see [RESTful API](rest-api.md) for details)
+
+* `DELETE /1.0/cluster/nodes/<name>` (see [RESTful API](rest-api.md) for details)
diff --git a/doc/rest-api.md b/doc/rest-api.md
index b9249da59..14dc8e5bd 100644
--- a/doc/rest-api.md
+++ b/doc/rest-api.md
@@ -2455,3 +2455,197 @@ Input (none at present):
             }
         }
     }
+## `/1.0/storage-pools`
+### GET
+ * Description: list of storage pools
+ * Introduced: with API extension `storage`
+ * Authentication: trusted
+ * Operation: sync
+ * Return: list of storage pools that are currently defined on the host
+
+    [
+        "/1.0/storage-pools/default",
+        "/1.0/storage-pools/pool1"
+        "/1.0/storage-pools/pool2"
+        "/1.0/storage-pools/pool3"
+        "/1.0/storage-pools/pool4"
+    ]
+
+### POST
+ * Description: create a new storage pool
+ * Introduced: with API extension `storage`
+ * Authentication: trusted
+ * Operation: sync
+ * Return: standard return value or standard error
+
+Input:
+
+    {
+        "config": {
+            "size": "10GB"
+        },
+        "driver": "zfs",
+        "name": "pool1"
+    }
+
+## `/1.0/cluster`
+### GET (optional `?password=<trust-password>`)
+ * Description: information about a cluster (such as networks and storage pools)
+ * Introduced: with API extension `clustering`
+ * Authentication: trusted or untrusted
+ * Operation: sync
+ * Return: dict representing a cluster
+
+    {
+        "type": "sync",
+        "status": "Success",
+        "status_code": 200,
+        "operation": "",
+        "error_code": 0,
+        "error": "",
+        "metadata": {
+            "storage_pools": [
+                {
+                    "name": "default",
+                    "description": "",
+                    "config": {
+                        "source": "/var/lib/lxd/storage-pools/default"
+                    },
+                    "driver": "dir",
+                    "used_by": null
+                }
+            ],
+            "networks": [
+                {
+                    "name": "lxdbr0",
+                    "description": "",
+                    "type": "bridge",
+                    "config": {
+                        "ipv4.address": "10.8.219.1/24",
+                        "ipv4.nat": "true",
+                        "ipv6.address": "fd42:f5a2:e47e:2185::1/64",
+                        "ipv6.nat": "true"
+                    },
+                    "used_by": null,
+                    "managed": true
+                }
+            ]
+    	}
+    }
+
+### DELETE
+ * Description: disable clustering
+ * Introduced: with API extension `clustering`
+ * Authentication: trusted
+ * Operation: sync
+ * Return: standard return value or standard error
+
+Input (none at present):
+
+    {
+    }
+
+## `/1.0/cluster/nodes`
+### GET
+ * Description: list of LXD nodes in the cluster
+ * Introduced: with API extension `clustering`
+ * Authentication: trusted
+ * Operation: sync
+ * Return: list of dicts with information about each node
+
+	{
+		"type": "sync",
+		"status": "Success",
+		"status_code": 200,
+		"operation": "",
+		"error_code": 0,
+		"error": "",
+		"metadata": [
+			{
+				"name": "lxd1",
+				"url": "https://10.1.1.101:8443",
+				"database": true,
+				"state": "ONLINE"
+			},
+			{
+				"name": "lxd2",
+				"url": "https://10.1.1.102:8443",
+				"database": true,
+				"state": "ONLINE"
+			},
+		]
+	} 
+
+### POST
+ * Description: bootstrap, join, or accept a node in the cluster
+ * Introduced: with API extension `clustering`
+ * Authentication: trusted or untrusted
+ * Operation: sync or async
+ * Return: various payloads depending on the input
+
+Input (bootstrap a new cluster):
+
+    {
+		"name": "lxd1",
+	}
+
+Return background operation or standard error.
+
+Input (request to join an existing cluster):
+
+	{
+		"name": "node2",
+		"target_address": "10.1.1.101:8443",
+		"target_cert": "-----BEGIN CERTIFICATE-----MIFf\n-----END CERTIFICATE-----",
+		"target_password": "sekret"
+	}
+
+Return background operation or standard error.
+
+Input (accept a node requesting to join the cluster):
+
+	{
+		"name": "node2",
+		"address": "10.1.1.102:8443",
+		"schema": 2,
+		"api": 63,
+		"target_password": "sekret"
+	}
+
+Return information about raft nodes in the cluster and the private key
+of the cluster certificate:
+
+	{
+		"type": "sync",
+		"status": "Success",
+		"status_code": 200,
+		"operation": "",
+		"error_code": 0,
+		"error": "",
+		"metadata": {
+			"raft_nodes": [
+				{
+					"id": 1,
+					"address": "10.1.1.101:8443"
+				},
+				{
+					"id": 2,
+					"address": "10.1.1.102:8443"
+				}
+			],
+			"private_key": "LS0tLS1CRU"
+		}
+	}
+
+## `/1.0/cluster/nodes/<name>`
+### DELETE (optional `?force=1`)
+ * Description: remove a node from the cluster
+ * Introduced: with API extension `clustering`
+ * Authentication: trusted
+ * Operation: async
+ * Return: background operation or standard error
+
+Input (none at present):
+
+    {
+    }
diff --git a/shared/api/cluster.go b/shared/api/cluster.go
index b82cfde25..5b653e869 100644
--- a/shared/api/cluster.go
+++ b/shared/api/cluster.go
@@ -2,8 +2,8 @@ package api
 
 // Cluster represents high-level information about a LXD cluster.
 type Cluster struct {
-	StoragePools []StoragePool
-	Networks     []Network
+	StoragePools []StoragePool `json:"storage_pools" yaml:"storage_pools"`
+	Networks     []Network     `json:"networks" yaml:"networks"`
 }
 
 // ClusterPost represents the fields required to bootstrap or join a LXD

From dbbf6e8e15defba1dc2d7384a7107317bed215ed Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Thu, 9 Nov 2017 11:46:53 +0000
Subject: [PATCH 082/116] Sanity check that cluster notifications use the
 cluster certificate

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/daemon.go | 24 ++++++++++++++++++------
 1 file changed, 18 insertions(+), 6 deletions(-)

diff --git a/lxd/daemon.go b/lxd/daemon.go
index 1f7fc625b..d25a08646 100644
--- a/lxd/daemon.go
+++ b/lxd/daemon.go
@@ -119,6 +119,23 @@ type Command struct {
 
 // Check whether the request comes from a trusted client.
 func (d *Daemon) checkTrustedClient(r *http.Request) error {
+	// Check the cluster certificate first, so we return an error if the
+	// notification header is set but the client is not presenting the
+	// cluster certificate (iow this request does not appear to come from a
+	// cluster node).
+	cert, _ := x509.ParseCertificate(d.endpoints.NetworkCert().KeyPair().Certificate[0])
+	clusterCerts := []x509.Certificate{*cert}
+	if r.TLS != nil {
+		for i := range r.TLS.PeerCertificates {
+			if util.CheckTrustState(*r.TLS.PeerCertificates[i], clusterCerts) {
+				return nil
+			}
+		}
+	}
+	if isClusterNotification(r) {
+		return fmt.Errorf("cluster notification not using cluster certificate")
+	}
+
 	if r.RemoteAddr == "@" {
 		// Unix socket
 		return nil
@@ -137,13 +154,8 @@ func (d *Daemon) checkTrustedClient(r *http.Request) error {
 		return err
 	}
 
-	// Add the server or cluster certificate to the list of trusted ones.
-	cert, _ := x509.ParseCertificate(d.endpoints.NetworkCert().KeyPair().Certificate[0])
-	certs := d.clientCerts
-	certs = append(certs, *cert)
-
 	for i := range r.TLS.PeerCertificates {
-		if util.CheckTrustState(*r.TLS.PeerCertificates[i], certs) {
+		if util.CheckTrustState(*r.TLS.PeerCertificates[i], d.clientCerts) {
 			return nil
 		}
 	}

From 530d6c6e68bd028cb34ddbf3ff4e4b14696d4766 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Mon, 13 Nov 2017 13:10:50 +0000
Subject: [PATCH 083/116] Add GET node rest API

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 client/interfaces.go    |  1 +
 client/lxd_cluster.go   | 13 +++++++++++++
 doc/api-extensions.md   |  1 +
 doc/rest-api.md         | 25 +++++++++++++++++++++++++
 lxd/api_cluster.go      | 43 ++++++++++++++++++++++++++++++++++++++++++-
 lxd/api_cluster_test.go |  5 +++++
 6 files changed, 87 insertions(+), 1 deletion(-)

diff --git a/client/interfaces.go b/client/interfaces.go
index 6b40d887e..c45c09e3b 100644
--- a/client/interfaces.go
+++ b/client/interfaces.go
@@ -167,6 +167,7 @@ type ContainerServer interface {
 	JoinCluster(targetAddress, targetPassword, targetCert, name string) (op *Operation, err error)
 	LeaveCluster(name string, force bool) (op *Operation, err error)
 	GetNodes() (nodes []api.Node, err error)
+	GetNode(name string) (node *api.Node, err error)
 
 	// Internal functions (for internal use)
 	RawQuery(method string, path string, data interface{}, queryETag string) (resp *api.Response, ETag string, err error)
diff --git a/client/lxd_cluster.go b/client/lxd_cluster.go
index 5d702459c..1bb302dc9 100644
--- a/client/lxd_cluster.go
+++ b/client/lxd_cluster.go
@@ -97,3 +97,16 @@ func (r *ProtocolLXD) GetNodes() ([]api.Node, error) {
 
 	return nodes, nil
 }
+
+// GetNode returns information about the given node.
+func (r *ProtocolLXD) GetNode(name string) (*api.Node, error) {
+	node := api.Node{}
+	path := fmt.Sprintf("/cluster/nodes/%s", name)
+	_, err := r.queryStruct("GET", path, nil, "", &node)
+
+	if err != nil {
+		return nil, err
+	}
+
+	return &node, nil
+}
diff --git a/doc/api-extensions.md b/doc/api-extensions.md
index 68ca8afc4..49744fb05 100644
--- a/doc/api-extensions.md
+++ b/doc/api-extensions.md
@@ -382,4 +382,5 @@ This includes the following new endpoints:
 * `GET /1.0/cluster/nodes`
 * `POST /1.0/cluster/nodes` (see [RESTful API](rest-api.md) for details)
 
+* `GET /1.0/cluster/nodes/<name>` (see [RESTful API](rest-api.md) for details)
 * `DELETE /1.0/cluster/nodes/<name>` (see [RESTful API](rest-api.md) for details)
diff --git a/doc/rest-api.md b/doc/rest-api.md
index 14dc8e5bd..4010278d1 100644
--- a/doc/rest-api.md
+++ b/doc/rest-api.md
@@ -2638,6 +2638,31 @@ of the cluster certificate:
 	}
 
 ## `/1.0/cluster/nodes/<name>`
+### GET
+ * Description: retrieve the node's information and status
+ * Introduced: with API extension `clustering`
+ * Authentication: trusted
+ * Operation: sync
+ * Return: dict representing the node
+
+    {
+        "type": "sync",
+        "status": "Success",
+        "status_code": 200,
+        "error_code": 0,
+        "error": "",
+        "metadata": {
+            "type": "custom",
+            "used_by": [],
+            "name": "vol1",
+            "config": {
+                "block.filesystem": "ext4",
+                "block.mount_options": "discard",
+                "size": "10737418240"
+            }
+        }
+    }
+
 ### DELETE (optional `?force=1`)
  * Description: remove a node from the cluster
  * Introduced: with API extension `clustering`
diff --git a/lxd/api_cluster.go b/lxd/api_cluster.go
index f8cd22960..24e3776bd 100644
--- a/lxd/api_cluster.go
+++ b/lxd/api_cluster.go
@@ -296,7 +296,48 @@ func clusterNodesGet(d *Daemon, r *http.Request) Response {
 	return SyncResponse(true, nodes)
 }
 
-var clusterNodeCmd = Command{name: "cluster/nodes/{name}", delete: clusterNodeDelete}
+var clusterNodeCmd = Command{
+	name:   "cluster/nodes/{name}",
+	get:    clusterNodeGet,
+	delete: clusterNodeDelete,
+}
+
+func clusterNodeGet(d *Daemon, r *http.Request) Response {
+	name := mux.Vars(r)["name"]
+	node := api.Node{Name: name}
+	address := ""
+	err := d.cluster.Transaction(func(tx *db.ClusterTx) error {
+		dbNode, err := tx.NodeByName(name)
+		if err != nil {
+			return err
+		}
+		address = dbNode.Address
+		node.URL = fmt.Sprintf("https://%s", dbNode.Address)
+		if dbNode.IsDown() {
+			node.State = "OFFLINE"
+		} else {
+			node.State = "ONLINE"
+		}
+		return nil
+	})
+	if err != nil {
+		return SmartError(err)
+	}
+
+	// Figure out if this node is currently a database node.
+	err = d.db.Transaction(func(tx *db.NodeTx) error {
+		addresses, err := tx.RaftNodeAddresses()
+		if err != nil {
+			return err
+		}
+		if shared.StringInSlice(address, addresses) {
+			node.Database = true
+		}
+		return nil
+	})
+
+	return SyncResponse(true, node)
+}
 
 func clusterNodeDelete(d *Daemon, r *http.Request) Response {
 	force, err := strconv.Atoi(r.FormValue("force"))
diff --git a/lxd/api_cluster_test.go b/lxd/api_cluster_test.go
index 1c700ca94..8d7c3a944 100644
--- a/lxd/api_cluster_test.go
+++ b/lxd/api_cluster_test.go
@@ -89,6 +89,11 @@ func TestCluster_Join(t *testing.T) {
 	assert.Equal(t, "rusp", nodes[1].Name)
 	assert.Equal(t, "ONLINE", nodes[0].State)
 	assert.Equal(t, "ONLINE", nodes[1].State)
+
+	// The GetNode method returns the requested node.
+	node, err := client.GetNode("buzz")
+	require.NoError(t, err)
+	assert.Equal(t, "buzz", node.Name)
 }
 
 // If the wrong trust password is given, the join request fails.

From e975c58879aff0a5f7f3416b850c9dca9c9c5abc Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Tue, 14 Nov 2017 08:52:53 +0000
Subject: [PATCH 084/116] Rename lxc cluster remove to lxc cluster delete

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxc/cluster.go            | 14 ++++++++------
 po/de.po                  | 20 +++++++++++---------
 po/el.po                  | 20 +++++++++++---------
 po/fr.po                  | 20 +++++++++++---------
 po/id.po                  | 20 +++++++++++---------
 po/it.po                  | 20 +++++++++++---------
 po/ja.po                  | 20 +++++++++++---------
 po/lxd.pot                | 20 +++++++++++---------
 po/nb_NO.po               | 20 +++++++++++---------
 po/nl.po                  | 20 +++++++++++---------
 po/pt_BR.po               | 20 +++++++++++---------
 po/ru.po                  | 20 +++++++++++---------
 po/sr.po                  | 20 +++++++++++---------
 po/sv.po                  | 20 +++++++++++---------
 po/tr.po                  | 20 +++++++++++---------
 po/zh.po                  | 20 +++++++++++---------
 po/zh_Hans.po             | 20 +++++++++++---------
 test/suites/clustering.sh |  4 ++--
 18 files changed, 186 insertions(+), 152 deletions(-)

diff --git a/lxc/cluster.go b/lxc/cluster.go
index 8cac3c4fa..f913cba20 100644
--- a/lxc/cluster.go
+++ b/lxc/cluster.go
@@ -21,9 +21,11 @@ func (c *clusterCmd) usage() string {
 
 Manage cluster nodes.
 
-*Cluster nodes*
-lxc cluster remove <node> [--force]
-    Remove a node from the cluster.`)
+lxc cluster list [<remote>:]
+    List all nodes in the cluster.
+
+lxc cluster delete [<remote>:]<node> [--force]
+    Delete a node from the cluster.`)
 }
 
 func (c *clusterCmd) flags() {
@@ -43,14 +45,14 @@ func (c *clusterCmd) run(conf *config.Config, args []string) error {
 		return c.doClusterList(conf, args)
 	}
 
-	if args[0] == "remove" {
-		return c.doClusterNodeRemove(conf, args)
+	if args[0] == "delete" {
+		return c.doClusterNodeDelete(conf, args)
 	}
 
 	return nil
 }
 
-func (c *clusterCmd) doClusterNodeRemove(conf *config.Config, args []string) error {
+func (c *clusterCmd) doClusterNodeDelete(conf *config.Config, args []string) error {
 	if len(args) < 2 {
 		return errArgs
 	}
diff --git a/po/de.po b/po/de.po
index 5eb520041..6c56a87dc 100644
--- a/po/de.po
+++ b/po/de.po
@@ -498,7 +498,7 @@ msgstr ""
 msgid "Creating the container"
 msgstr "kann nicht zum selben Container Namen kopieren"
 
-#: lxc/cluster.go:120
+#: lxc/cluster.go:122
 msgid "DATABASE"
 msgstr ""
 
@@ -658,7 +658,7 @@ msgstr "Fingerabdruck: %s\n"
 msgid "Force pseudo-terminal allocation"
 msgstr ""
 
-#: lxc/cluster.go:30
+#: lxc/cluster.go:32
 msgid "Force removing a node, even if degraded"
 msgstr ""
 
@@ -857,7 +857,7 @@ msgstr ""
 msgid "Must supply container name for: "
 msgstr "der Name des Ursprung Containers muss angegeben werden"
 
-#: lxc/cluster.go:118 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
+#: lxc/cluster.go:120 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
 #: lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
 msgid "NAME"
 msgstr ""
@@ -918,7 +918,7 @@ msgstr "Kein Zertifikat für diese Verbindung"
 msgid "No fingerprint specified."
 msgstr "Kein Fingerabdruck angegeben."
 
-#: lxc/cluster.go:79
+#: lxc/cluster.go:81
 #, fuzzy, c-format
 msgid "Node %s removed"
 msgstr "Gerät %s wurde von %s entfernt\n"
@@ -1156,7 +1156,7 @@ msgstr ""
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/cluster.go:121 lxc/list.go:469
+#: lxc/cluster.go:123 lxc/list.go:469
 msgid "STATE"
 msgstr ""
 
@@ -1432,7 +1432,7 @@ msgstr ""
 msgid "UPLOAD DATE"
 msgstr ""
 
-#: lxc/cluster.go:119 lxc/remote.go:410
+#: lxc/cluster.go:121 lxc/remote.go:410
 msgid "URL"
 msgstr ""
 
@@ -1478,9 +1478,11 @@ msgid ""
 "\n"
 "Manage cluster nodes.\n"
 "\n"
-"*Cluster nodes*\n"
-"lxc cluster remove <node> [--force]\n"
-"    Remove a node from the cluster."
+"lxc cluster list [<remote>:]\n"
+"    List all nodes in the cluster.\n"
+"\n"
+"lxc cluster delete [<remote>:]<node> [--force]\n"
+"    Delete a node from the cluster."
 msgstr ""
 
 #: lxc/config.go:85
diff --git a/po/el.po b/po/el.po
index 099a69a97..cc9830df8 100644
--- a/po/el.po
+++ b/po/el.po
@@ -384,7 +384,7 @@ msgstr ""
 msgid "Creating the container"
 msgstr ""
 
-#: lxc/cluster.go:120
+#: lxc/cluster.go:122
 msgid "DATABASE"
 msgstr ""
 
@@ -538,7 +538,7 @@ msgstr ""
 msgid "Force pseudo-terminal allocation"
 msgstr ""
 
-#: lxc/cluster.go:30
+#: lxc/cluster.go:32
 msgid "Force removing a node, even if degraded"
 msgstr ""
 
@@ -730,7 +730,7 @@ msgstr ""
 msgid "Must supply container name for: "
 msgstr ""
 
-#: lxc/cluster.go:118 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
+#: lxc/cluster.go:120 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
 #: lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
 msgid "NAME"
 msgstr ""
@@ -788,7 +788,7 @@ msgstr ""
 msgid "No fingerprint specified."
 msgstr ""
 
-#: lxc/cluster.go:79
+#: lxc/cluster.go:81
 #, c-format
 msgid "Node %s removed"
 msgstr ""
@@ -1018,7 +1018,7 @@ msgstr ""
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/cluster.go:121 lxc/list.go:469
+#: lxc/cluster.go:123 lxc/list.go:469
 msgid "STATE"
 msgstr ""
 
@@ -1282,7 +1282,7 @@ msgstr ""
 msgid "UPLOAD DATE"
 msgstr ""
 
-#: lxc/cluster.go:119 lxc/remote.go:410
+#: lxc/cluster.go:121 lxc/remote.go:410
 msgid "URL"
 msgstr ""
 
@@ -1322,9 +1322,11 @@ msgid ""
 "\n"
 "Manage cluster nodes.\n"
 "\n"
-"*Cluster nodes*\n"
-"lxc cluster remove <node> [--force]\n"
-"    Remove a node from the cluster."
+"lxc cluster list [<remote>:]\n"
+"    List all nodes in the cluster.\n"
+"\n"
+"lxc cluster delete [<remote>:]<node> [--force]\n"
+"    Delete a node from the cluster."
 msgstr ""
 
 #: lxc/config.go:85
diff --git a/po/fr.po b/po/fr.po
index 52ebac8d4..8ea314763 100644
--- a/po/fr.po
+++ b/po/fr.po
@@ -485,7 +485,7 @@ msgstr "Création de %s"
 msgid "Creating the container"
 msgstr "Création du conteneur"
 
-#: lxc/cluster.go:120
+#: lxc/cluster.go:122
 msgid "DATABASE"
 msgstr ""
 
@@ -642,7 +642,7 @@ msgstr "Empreinte : %s"
 msgid "Force pseudo-terminal allocation"
 msgstr "Forcer l'allocation de pseudo-terminal "
 
-#: lxc/cluster.go:30
+#: lxc/cluster.go:32
 msgid "Force removing a node, even if degraded"
 msgstr ""
 
@@ -843,7 +843,7 @@ msgstr ""
 msgid "Must supply container name for: "
 msgstr "Vous devez fournir le nom d'un conteneur pour : "
 
-#: lxc/cluster.go:118 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
+#: lxc/cluster.go:120 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
 #: lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
 msgid "NAME"
 msgstr "NOM"
@@ -902,7 +902,7 @@ msgstr "Aucun périphérique existant pour ce réseau"
 msgid "No fingerprint specified."
 msgstr "Aucune empreinte n'a été indiquée."
 
-#: lxc/cluster.go:79
+#: lxc/cluster.go:81
 #, fuzzy, c-format
 msgid "Node %s removed"
 msgstr "Profil %s supprimé de %s"
@@ -1136,7 +1136,7 @@ msgstr "INSTANTANÉS"
 msgid "SOURCE"
 msgstr "SOURCE"
 
-#: lxc/cluster.go:121 lxc/list.go:469
+#: lxc/cluster.go:123 lxc/list.go:469
 msgid "STATE"
 msgstr "ÉTAT"
 
@@ -1417,7 +1417,7 @@ msgstr "Type : persistant"
 msgid "UPLOAD DATE"
 msgstr "DATE DE PUBLICATION"
 
-#: lxc/cluster.go:119 lxc/remote.go:410
+#: lxc/cluster.go:121 lxc/remote.go:410
 msgid "URL"
 msgstr "URL"
 
@@ -1460,9 +1460,11 @@ msgid ""
 "\n"
 "Manage cluster nodes.\n"
 "\n"
-"*Cluster nodes*\n"
-"lxc cluster remove <node> [--force]\n"
-"    Remove a node from the cluster."
+"lxc cluster list [<remote>:]\n"
+"    List all nodes in the cluster.\n"
+"\n"
+"lxc cluster delete [<remote>:]<node> [--force]\n"
+"    Delete a node from the cluster."
 msgstr ""
 
 #: lxc/config.go:85
diff --git a/po/id.po b/po/id.po
index 23cf51d7f..896139c1e 100644
--- a/po/id.po
+++ b/po/id.po
@@ -380,7 +380,7 @@ msgstr ""
 msgid "Creating the container"
 msgstr ""
 
-#: lxc/cluster.go:120
+#: lxc/cluster.go:122
 msgid "DATABASE"
 msgstr ""
 
@@ -533,7 +533,7 @@ msgstr ""
 msgid "Force pseudo-terminal allocation"
 msgstr ""
 
-#: lxc/cluster.go:30
+#: lxc/cluster.go:32
 msgid "Force removing a node, even if degraded"
 msgstr ""
 
@@ -724,7 +724,7 @@ msgstr ""
 msgid "Must supply container name for: "
 msgstr ""
 
-#: lxc/cluster.go:118 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
+#: lxc/cluster.go:120 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
 #: lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
 msgid "NAME"
 msgstr ""
@@ -781,7 +781,7 @@ msgstr ""
 msgid "No fingerprint specified."
 msgstr ""
 
-#: lxc/cluster.go:79
+#: lxc/cluster.go:81
 #, c-format
 msgid "Node %s removed"
 msgstr ""
@@ -1011,7 +1011,7 @@ msgstr ""
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/cluster.go:121 lxc/list.go:469
+#: lxc/cluster.go:123 lxc/list.go:469
 msgid "STATE"
 msgstr ""
 
@@ -1275,7 +1275,7 @@ msgstr ""
 msgid "UPLOAD DATE"
 msgstr ""
 
-#: lxc/cluster.go:119 lxc/remote.go:410
+#: lxc/cluster.go:121 lxc/remote.go:410
 msgid "URL"
 msgstr ""
 
@@ -1315,9 +1315,11 @@ msgid ""
 "\n"
 "Manage cluster nodes.\n"
 "\n"
-"*Cluster nodes*\n"
-"lxc cluster remove <node> [--force]\n"
-"    Remove a node from the cluster."
+"lxc cluster list [<remote>:]\n"
+"    List all nodes in the cluster.\n"
+"\n"
+"lxc cluster delete [<remote>:]<node> [--force]\n"
+"    Delete a node from the cluster."
 msgstr ""
 
 #: lxc/config.go:85
diff --git a/po/it.po b/po/it.po
index 7097dc17c..ebd1d8f4f 100644
--- a/po/it.po
+++ b/po/it.po
@@ -405,7 +405,7 @@ msgstr "Creazione di %s in corso"
 msgid "Creating the container"
 msgstr "Creazione del container in corso"
 
-#: lxc/cluster.go:120
+#: lxc/cluster.go:122
 msgid "DATABASE"
 msgstr ""
 
@@ -559,7 +559,7 @@ msgstr ""
 msgid "Force pseudo-terminal allocation"
 msgstr ""
 
-#: lxc/cluster.go:30
+#: lxc/cluster.go:32
 msgid "Force removing a node, even if degraded"
 msgstr ""
 
@@ -750,7 +750,7 @@ msgstr ""
 msgid "Must supply container name for: "
 msgstr ""
 
-#: lxc/cluster.go:118 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
+#: lxc/cluster.go:120 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
 #: lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
 msgid "NAME"
 msgstr ""
@@ -807,7 +807,7 @@ msgstr ""
 msgid "No fingerprint specified."
 msgstr ""
 
-#: lxc/cluster.go:79
+#: lxc/cluster.go:81
 #, c-format
 msgid "Node %s removed"
 msgstr ""
@@ -1037,7 +1037,7 @@ msgstr ""
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/cluster.go:121 lxc/list.go:469
+#: lxc/cluster.go:123 lxc/list.go:469
 msgid "STATE"
 msgstr ""
 
@@ -1301,7 +1301,7 @@ msgstr ""
 msgid "UPLOAD DATE"
 msgstr ""
 
-#: lxc/cluster.go:119 lxc/remote.go:410
+#: lxc/cluster.go:121 lxc/remote.go:410
 msgid "URL"
 msgstr ""
 
@@ -1341,9 +1341,11 @@ msgid ""
 "\n"
 "Manage cluster nodes.\n"
 "\n"
-"*Cluster nodes*\n"
-"lxc cluster remove <node> [--force]\n"
-"    Remove a node from the cluster."
+"lxc cluster list [<remote>:]\n"
+"    List all nodes in the cluster.\n"
+"\n"
+"lxc cluster delete [<remote>:]<node> [--force]\n"
+"    Delete a node from the cluster."
 msgstr ""
 
 #: lxc/config.go:85
diff --git a/po/ja.po b/po/ja.po
index 57f2af611..3fb512686 100644
--- a/po/ja.po
+++ b/po/ja.po
@@ -386,7 +386,7 @@ msgstr "%s を作成中"
 msgid "Creating the container"
 msgstr "コンテナを作成中"
 
-#: lxc/cluster.go:120
+#: lxc/cluster.go:122
 msgid "DATABASE"
 msgstr ""
 
@@ -541,7 +541,7 @@ msgstr "証明書のフィンガープリント: %s"
 msgid "Force pseudo-terminal allocation"
 msgstr "強制的に擬似端末を割り当てます"
 
-#: lxc/cluster.go:30
+#: lxc/cluster.go:32
 msgid "Force removing a node, even if degraded"
 msgstr ""
 
@@ -735,7 +735,7 @@ msgstr "ディレクトリからのインポートは root で実行する必要
 msgid "Must supply container name for: "
 msgstr "コンテナ名を指定する必要があります: "
 
-#: lxc/cluster.go:118 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
+#: lxc/cluster.go:120 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
 #: lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
 msgid "NAME"
 msgstr ""
@@ -792,7 +792,7 @@ msgstr "このストレージボリュームに対するデバイスがありま
 msgid "No fingerprint specified."
 msgstr "フィンガープリントが指定されていません。"
 
-#: lxc/cluster.go:79
+#: lxc/cluster.go:81
 #, fuzzy, c-format
 msgid "Node %s removed"
 msgstr "プロファイル %s が %s から削除されました"
@@ -1023,7 +1023,7 @@ msgstr ""
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/cluster.go:121 lxc/list.go:469
+#: lxc/cluster.go:123 lxc/list.go:469
 msgid "STATE"
 msgstr ""
 
@@ -1301,7 +1301,7 @@ msgstr "タイプ: persistent"
 msgid "UPLOAD DATE"
 msgstr ""
 
-#: lxc/cluster.go:119 lxc/remote.go:410
+#: lxc/cluster.go:121 lxc/remote.go:410
 msgid "URL"
 msgstr ""
 
@@ -1344,9 +1344,11 @@ msgid ""
 "\n"
 "Manage cluster nodes.\n"
 "\n"
-"*Cluster nodes*\n"
-"lxc cluster remove <node> [--force]\n"
-"    Remove a node from the cluster."
+"lxc cluster list [<remote>:]\n"
+"    List all nodes in the cluster.\n"
+"\n"
+"lxc cluster delete [<remote>:]<node> [--force]\n"
+"    Delete a node from the cluster."
 msgstr ""
 
 #: lxc/config.go:85
diff --git a/po/lxd.pot b/po/lxd.pot
index 67870d594..8c512ac8c 100644
--- a/po/lxd.pot
+++ b/po/lxd.pot
@@ -372,7 +372,7 @@ msgstr  ""
 msgid   "Creating the container"
 msgstr  ""
 
-#: lxc/cluster.go:120
+#: lxc/cluster.go:122
 msgid   "DATABASE"
 msgstr  ""
 
@@ -524,7 +524,7 @@ msgstr  ""
 msgid   "Force pseudo-terminal allocation"
 msgstr  ""
 
-#: lxc/cluster.go:30
+#: lxc/cluster.go:32
 msgid   "Force removing a node, even if degraded"
 msgstr  ""
 
@@ -715,7 +715,7 @@ msgstr  ""
 msgid   "Must supply container name for: "
 msgstr  ""
 
-#: lxc/cluster.go:118 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573 lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
+#: lxc/cluster.go:120 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573 lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
 msgid   "NAME"
 msgstr  ""
 
@@ -771,7 +771,7 @@ msgstr  ""
 msgid   "No fingerprint specified."
 msgstr  ""
 
-#: lxc/cluster.go:79
+#: lxc/cluster.go:81
 #, c-format
 msgid   "Node %s removed"
 msgstr  ""
@@ -1000,7 +1000,7 @@ msgstr  ""
 msgid   "SOURCE"
 msgstr  ""
 
-#: lxc/cluster.go:121 lxc/list.go:469
+#: lxc/cluster.go:123 lxc/list.go:469
 msgid   "STATE"
 msgstr  ""
 
@@ -1260,7 +1260,7 @@ msgstr  ""
 msgid   "UPLOAD DATE"
 msgstr  ""
 
-#: lxc/cluster.go:119 lxc/remote.go:410
+#: lxc/cluster.go:121 lxc/remote.go:410
 msgid   "URL"
 msgstr  ""
 
@@ -1298,9 +1298,11 @@ msgid   "Usage: lxc cluster <subcommand> [options]\n"
         "\n"
         "Manage cluster nodes.\n"
         "\n"
-        "*Cluster nodes*\n"
-        "lxc cluster remove <node> [--force]\n"
-        "    Remove a node from the cluster."
+        "lxc cluster list [<remote>:]\n"
+        "    List all nodes in the cluster.\n"
+        "\n"
+        "lxc cluster delete [<remote>:]<node> [--force]\n"
+        "    Delete a node from the cluster."
 msgstr  ""
 
 #: lxc/config.go:85
diff --git a/po/nb_NO.po b/po/nb_NO.po
index 8cdddde61..acaf873c9 100644
--- a/po/nb_NO.po
+++ b/po/nb_NO.po
@@ -380,7 +380,7 @@ msgstr ""
 msgid "Creating the container"
 msgstr ""
 
-#: lxc/cluster.go:120
+#: lxc/cluster.go:122
 msgid "DATABASE"
 msgstr ""
 
@@ -533,7 +533,7 @@ msgstr ""
 msgid "Force pseudo-terminal allocation"
 msgstr ""
 
-#: lxc/cluster.go:30
+#: lxc/cluster.go:32
 msgid "Force removing a node, even if degraded"
 msgstr ""
 
@@ -724,7 +724,7 @@ msgstr ""
 msgid "Must supply container name for: "
 msgstr ""
 
-#: lxc/cluster.go:118 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
+#: lxc/cluster.go:120 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
 #: lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
 msgid "NAME"
 msgstr ""
@@ -781,7 +781,7 @@ msgstr ""
 msgid "No fingerprint specified."
 msgstr ""
 
-#: lxc/cluster.go:79
+#: lxc/cluster.go:81
 #, c-format
 msgid "Node %s removed"
 msgstr ""
@@ -1011,7 +1011,7 @@ msgstr ""
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/cluster.go:121 lxc/list.go:469
+#: lxc/cluster.go:123 lxc/list.go:469
 msgid "STATE"
 msgstr ""
 
@@ -1275,7 +1275,7 @@ msgstr ""
 msgid "UPLOAD DATE"
 msgstr ""
 
-#: lxc/cluster.go:119 lxc/remote.go:410
+#: lxc/cluster.go:121 lxc/remote.go:410
 msgid "URL"
 msgstr ""
 
@@ -1315,9 +1315,11 @@ msgid ""
 "\n"
 "Manage cluster nodes.\n"
 "\n"
-"*Cluster nodes*\n"
-"lxc cluster remove <node> [--force]\n"
-"    Remove a node from the cluster."
+"lxc cluster list [<remote>:]\n"
+"    List all nodes in the cluster.\n"
+"\n"
+"lxc cluster delete [<remote>:]<node> [--force]\n"
+"    Delete a node from the cluster."
 msgstr ""
 
 #: lxc/config.go:85
diff --git a/po/nl.po b/po/nl.po
index 6dc8d124f..da95f0702 100644
--- a/po/nl.po
+++ b/po/nl.po
@@ -380,7 +380,7 @@ msgstr ""
 msgid "Creating the container"
 msgstr ""
 
-#: lxc/cluster.go:120
+#: lxc/cluster.go:122
 msgid "DATABASE"
 msgstr ""
 
@@ -533,7 +533,7 @@ msgstr ""
 msgid "Force pseudo-terminal allocation"
 msgstr ""
 
-#: lxc/cluster.go:30
+#: lxc/cluster.go:32
 msgid "Force removing a node, even if degraded"
 msgstr ""
 
@@ -724,7 +724,7 @@ msgstr ""
 msgid "Must supply container name for: "
 msgstr ""
 
-#: lxc/cluster.go:118 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
+#: lxc/cluster.go:120 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
 #: lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
 msgid "NAME"
 msgstr ""
@@ -781,7 +781,7 @@ msgstr ""
 msgid "No fingerprint specified."
 msgstr ""
 
-#: lxc/cluster.go:79
+#: lxc/cluster.go:81
 #, c-format
 msgid "Node %s removed"
 msgstr ""
@@ -1011,7 +1011,7 @@ msgstr ""
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/cluster.go:121 lxc/list.go:469
+#: lxc/cluster.go:123 lxc/list.go:469
 msgid "STATE"
 msgstr ""
 
@@ -1275,7 +1275,7 @@ msgstr ""
 msgid "UPLOAD DATE"
 msgstr ""
 
-#: lxc/cluster.go:119 lxc/remote.go:410
+#: lxc/cluster.go:121 lxc/remote.go:410
 msgid "URL"
 msgstr ""
 
@@ -1315,9 +1315,11 @@ msgid ""
 "\n"
 "Manage cluster nodes.\n"
 "\n"
-"*Cluster nodes*\n"
-"lxc cluster remove <node> [--force]\n"
-"    Remove a node from the cluster."
+"lxc cluster list [<remote>:]\n"
+"    List all nodes in the cluster.\n"
+"\n"
+"lxc cluster delete [<remote>:]<node> [--force]\n"
+"    Delete a node from the cluster."
 msgstr ""
 
 #: lxc/config.go:85
diff --git a/po/pt_BR.po b/po/pt_BR.po
index eeae2cafa..750b314d1 100644
--- a/po/pt_BR.po
+++ b/po/pt_BR.po
@@ -380,7 +380,7 @@ msgstr ""
 msgid "Creating the container"
 msgstr ""
 
-#: lxc/cluster.go:120
+#: lxc/cluster.go:122
 msgid "DATABASE"
 msgstr ""
 
@@ -533,7 +533,7 @@ msgstr ""
 msgid "Force pseudo-terminal allocation"
 msgstr ""
 
-#: lxc/cluster.go:30
+#: lxc/cluster.go:32
 msgid "Force removing a node, even if degraded"
 msgstr ""
 
@@ -724,7 +724,7 @@ msgstr ""
 msgid "Must supply container name for: "
 msgstr ""
 
-#: lxc/cluster.go:118 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
+#: lxc/cluster.go:120 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
 #: lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
 msgid "NAME"
 msgstr ""
@@ -781,7 +781,7 @@ msgstr ""
 msgid "No fingerprint specified."
 msgstr ""
 
-#: lxc/cluster.go:79
+#: lxc/cluster.go:81
 #, c-format
 msgid "Node %s removed"
 msgstr ""
@@ -1011,7 +1011,7 @@ msgstr ""
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/cluster.go:121 lxc/list.go:469
+#: lxc/cluster.go:123 lxc/list.go:469
 msgid "STATE"
 msgstr ""
 
@@ -1275,7 +1275,7 @@ msgstr ""
 msgid "UPLOAD DATE"
 msgstr ""
 
-#: lxc/cluster.go:119 lxc/remote.go:410
+#: lxc/cluster.go:121 lxc/remote.go:410
 msgid "URL"
 msgstr ""
 
@@ -1315,9 +1315,11 @@ msgid ""
 "\n"
 "Manage cluster nodes.\n"
 "\n"
-"*Cluster nodes*\n"
-"lxc cluster remove <node> [--force]\n"
-"    Remove a node from the cluster."
+"lxc cluster list [<remote>:]\n"
+"    List all nodes in the cluster.\n"
+"\n"
+"lxc cluster delete [<remote>:]<node> [--force]\n"
+"    Delete a node from the cluster."
 msgstr ""
 
 #: lxc/config.go:85
diff --git a/po/ru.po b/po/ru.po
index 2745d4c4c..2c2b2753b 100644
--- a/po/ru.po
+++ b/po/ru.po
@@ -468,7 +468,7 @@ msgstr ""
 msgid "Creating the container"
 msgstr ""
 
-#: lxc/cluster.go:120
+#: lxc/cluster.go:122
 msgid "DATABASE"
 msgstr ""
 
@@ -622,7 +622,7 @@ msgstr ""
 msgid "Force pseudo-terminal allocation"
 msgstr ""
 
-#: lxc/cluster.go:30
+#: lxc/cluster.go:32
 msgid "Force removing a node, even if degraded"
 msgstr ""
 
@@ -814,7 +814,7 @@ msgstr ""
 msgid "Must supply container name for: "
 msgstr ""
 
-#: lxc/cluster.go:118 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
+#: lxc/cluster.go:120 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
 #: lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
 msgid "NAME"
 msgstr ""
@@ -872,7 +872,7 @@ msgstr ""
 msgid "No fingerprint specified."
 msgstr ""
 
-#: lxc/cluster.go:79
+#: lxc/cluster.go:81
 #, c-format
 msgid "Node %s removed"
 msgstr ""
@@ -1102,7 +1102,7 @@ msgstr ""
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/cluster.go:121 lxc/list.go:469
+#: lxc/cluster.go:123 lxc/list.go:469
 msgid "STATE"
 msgstr ""
 
@@ -1366,7 +1366,7 @@ msgstr ""
 msgid "UPLOAD DATE"
 msgstr ""
 
-#: lxc/cluster.go:119 lxc/remote.go:410
+#: lxc/cluster.go:121 lxc/remote.go:410
 msgid "URL"
 msgstr ""
 
@@ -1409,9 +1409,11 @@ msgid ""
 "\n"
 "Manage cluster nodes.\n"
 "\n"
-"*Cluster nodes*\n"
-"lxc cluster remove <node> [--force]\n"
-"    Remove a node from the cluster."
+"lxc cluster list [<remote>:]\n"
+"    List all nodes in the cluster.\n"
+"\n"
+"lxc cluster delete [<remote>:]<node> [--force]\n"
+"    Delete a node from the cluster."
 msgstr ""
 
 #: lxc/config.go:85
diff --git a/po/sr.po b/po/sr.po
index 679adb46d..12a5693d9 100644
--- a/po/sr.po
+++ b/po/sr.po
@@ -380,7 +380,7 @@ msgstr ""
 msgid "Creating the container"
 msgstr ""
 
-#: lxc/cluster.go:120
+#: lxc/cluster.go:122
 msgid "DATABASE"
 msgstr ""
 
@@ -533,7 +533,7 @@ msgstr ""
 msgid "Force pseudo-terminal allocation"
 msgstr ""
 
-#: lxc/cluster.go:30
+#: lxc/cluster.go:32
 msgid "Force removing a node, even if degraded"
 msgstr ""
 
@@ -724,7 +724,7 @@ msgstr ""
 msgid "Must supply container name for: "
 msgstr ""
 
-#: lxc/cluster.go:118 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
+#: lxc/cluster.go:120 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
 #: lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
 msgid "NAME"
 msgstr ""
@@ -781,7 +781,7 @@ msgstr ""
 msgid "No fingerprint specified."
 msgstr ""
 
-#: lxc/cluster.go:79
+#: lxc/cluster.go:81
 #, c-format
 msgid "Node %s removed"
 msgstr ""
@@ -1011,7 +1011,7 @@ msgstr ""
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/cluster.go:121 lxc/list.go:469
+#: lxc/cluster.go:123 lxc/list.go:469
 msgid "STATE"
 msgstr ""
 
@@ -1275,7 +1275,7 @@ msgstr ""
 msgid "UPLOAD DATE"
 msgstr ""
 
-#: lxc/cluster.go:119 lxc/remote.go:410
+#: lxc/cluster.go:121 lxc/remote.go:410
 msgid "URL"
 msgstr ""
 
@@ -1315,9 +1315,11 @@ msgid ""
 "\n"
 "Manage cluster nodes.\n"
 "\n"
-"*Cluster nodes*\n"
-"lxc cluster remove <node> [--force]\n"
-"    Remove a node from the cluster."
+"lxc cluster list [<remote>:]\n"
+"    List all nodes in the cluster.\n"
+"\n"
+"lxc cluster delete [<remote>:]<node> [--force]\n"
+"    Delete a node from the cluster."
 msgstr ""
 
 #: lxc/config.go:85
diff --git a/po/sv.po b/po/sv.po
index bb11b1ff6..b8a41cba3 100644
--- a/po/sv.po
+++ b/po/sv.po
@@ -380,7 +380,7 @@ msgstr ""
 msgid "Creating the container"
 msgstr ""
 
-#: lxc/cluster.go:120
+#: lxc/cluster.go:122
 msgid "DATABASE"
 msgstr ""
 
@@ -533,7 +533,7 @@ msgstr ""
 msgid "Force pseudo-terminal allocation"
 msgstr ""
 
-#: lxc/cluster.go:30
+#: lxc/cluster.go:32
 msgid "Force removing a node, even if degraded"
 msgstr ""
 
@@ -724,7 +724,7 @@ msgstr ""
 msgid "Must supply container name for: "
 msgstr ""
 
-#: lxc/cluster.go:118 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
+#: lxc/cluster.go:120 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
 #: lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
 msgid "NAME"
 msgstr ""
@@ -781,7 +781,7 @@ msgstr ""
 msgid "No fingerprint specified."
 msgstr ""
 
-#: lxc/cluster.go:79
+#: lxc/cluster.go:81
 #, c-format
 msgid "Node %s removed"
 msgstr ""
@@ -1011,7 +1011,7 @@ msgstr ""
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/cluster.go:121 lxc/list.go:469
+#: lxc/cluster.go:123 lxc/list.go:469
 msgid "STATE"
 msgstr ""
 
@@ -1275,7 +1275,7 @@ msgstr ""
 msgid "UPLOAD DATE"
 msgstr ""
 
-#: lxc/cluster.go:119 lxc/remote.go:410
+#: lxc/cluster.go:121 lxc/remote.go:410
 msgid "URL"
 msgstr ""
 
@@ -1315,9 +1315,11 @@ msgid ""
 "\n"
 "Manage cluster nodes.\n"
 "\n"
-"*Cluster nodes*\n"
-"lxc cluster remove <node> [--force]\n"
-"    Remove a node from the cluster."
+"lxc cluster list [<remote>:]\n"
+"    List all nodes in the cluster.\n"
+"\n"
+"lxc cluster delete [<remote>:]<node> [--force]\n"
+"    Delete a node from the cluster."
 msgstr ""
 
 #: lxc/config.go:85
diff --git a/po/tr.po b/po/tr.po
index 10e00075d..e7306f707 100644
--- a/po/tr.po
+++ b/po/tr.po
@@ -380,7 +380,7 @@ msgstr ""
 msgid "Creating the container"
 msgstr ""
 
-#: lxc/cluster.go:120
+#: lxc/cluster.go:122
 msgid "DATABASE"
 msgstr ""
 
@@ -533,7 +533,7 @@ msgstr ""
 msgid "Force pseudo-terminal allocation"
 msgstr ""
 
-#: lxc/cluster.go:30
+#: lxc/cluster.go:32
 msgid "Force removing a node, even if degraded"
 msgstr ""
 
@@ -724,7 +724,7 @@ msgstr ""
 msgid "Must supply container name for: "
 msgstr ""
 
-#: lxc/cluster.go:118 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
+#: lxc/cluster.go:120 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
 #: lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
 msgid "NAME"
 msgstr ""
@@ -781,7 +781,7 @@ msgstr ""
 msgid "No fingerprint specified."
 msgstr ""
 
-#: lxc/cluster.go:79
+#: lxc/cluster.go:81
 #, c-format
 msgid "Node %s removed"
 msgstr ""
@@ -1011,7 +1011,7 @@ msgstr ""
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/cluster.go:121 lxc/list.go:469
+#: lxc/cluster.go:123 lxc/list.go:469
 msgid "STATE"
 msgstr ""
 
@@ -1275,7 +1275,7 @@ msgstr ""
 msgid "UPLOAD DATE"
 msgstr ""
 
-#: lxc/cluster.go:119 lxc/remote.go:410
+#: lxc/cluster.go:121 lxc/remote.go:410
 msgid "URL"
 msgstr ""
 
@@ -1315,9 +1315,11 @@ msgid ""
 "\n"
 "Manage cluster nodes.\n"
 "\n"
-"*Cluster nodes*\n"
-"lxc cluster remove <node> [--force]\n"
-"    Remove a node from the cluster."
+"lxc cluster list [<remote>:]\n"
+"    List all nodes in the cluster.\n"
+"\n"
+"lxc cluster delete [<remote>:]<node> [--force]\n"
+"    Delete a node from the cluster."
 msgstr ""
 
 #: lxc/config.go:85
diff --git a/po/zh.po b/po/zh.po
index 1055d5578..532febbff 100644
--- a/po/zh.po
+++ b/po/zh.po
@@ -380,7 +380,7 @@ msgstr ""
 msgid "Creating the container"
 msgstr ""
 
-#: lxc/cluster.go:120
+#: lxc/cluster.go:122
 msgid "DATABASE"
 msgstr ""
 
@@ -533,7 +533,7 @@ msgstr ""
 msgid "Force pseudo-terminal allocation"
 msgstr ""
 
-#: lxc/cluster.go:30
+#: lxc/cluster.go:32
 msgid "Force removing a node, even if degraded"
 msgstr ""
 
@@ -724,7 +724,7 @@ msgstr ""
 msgid "Must supply container name for: "
 msgstr ""
 
-#: lxc/cluster.go:118 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
+#: lxc/cluster.go:120 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
 #: lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
 msgid "NAME"
 msgstr ""
@@ -781,7 +781,7 @@ msgstr ""
 msgid "No fingerprint specified."
 msgstr ""
 
-#: lxc/cluster.go:79
+#: lxc/cluster.go:81
 #, c-format
 msgid "Node %s removed"
 msgstr ""
@@ -1011,7 +1011,7 @@ msgstr ""
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/cluster.go:121 lxc/list.go:469
+#: lxc/cluster.go:123 lxc/list.go:469
 msgid "STATE"
 msgstr ""
 
@@ -1275,7 +1275,7 @@ msgstr ""
 msgid "UPLOAD DATE"
 msgstr ""
 
-#: lxc/cluster.go:119 lxc/remote.go:410
+#: lxc/cluster.go:121 lxc/remote.go:410
 msgid "URL"
 msgstr ""
 
@@ -1315,9 +1315,11 @@ msgid ""
 "\n"
 "Manage cluster nodes.\n"
 "\n"
-"*Cluster nodes*\n"
-"lxc cluster remove <node> [--force]\n"
-"    Remove a node from the cluster."
+"lxc cluster list [<remote>:]\n"
+"    List all nodes in the cluster.\n"
+"\n"
+"lxc cluster delete [<remote>:]<node> [--force]\n"
+"    Delete a node from the cluster."
 msgstr ""
 
 #: lxc/config.go:85
diff --git a/po/zh_Hans.po b/po/zh_Hans.po
index fb06eb0d7..26ad0ee0d 100644
--- a/po/zh_Hans.po
+++ b/po/zh_Hans.po
@@ -380,7 +380,7 @@ msgstr ""
 msgid "Creating the container"
 msgstr ""
 
-#: lxc/cluster.go:120
+#: lxc/cluster.go:122
 msgid "DATABASE"
 msgstr ""
 
@@ -533,7 +533,7 @@ msgstr ""
 msgid "Force pseudo-terminal allocation"
 msgstr ""
 
-#: lxc/cluster.go:30
+#: lxc/cluster.go:32
 msgid "Force removing a node, even if degraded"
 msgstr ""
 
@@ -724,7 +724,7 @@ msgstr ""
 msgid "Must supply container name for: "
 msgstr ""
 
-#: lxc/cluster.go:118 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
+#: lxc/cluster.go:120 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
 #: lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
 msgid "NAME"
 msgstr ""
@@ -781,7 +781,7 @@ msgstr ""
 msgid "No fingerprint specified."
 msgstr ""
 
-#: lxc/cluster.go:79
+#: lxc/cluster.go:81
 #, c-format
 msgid "Node %s removed"
 msgstr ""
@@ -1011,7 +1011,7 @@ msgstr ""
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/cluster.go:121 lxc/list.go:469
+#: lxc/cluster.go:123 lxc/list.go:469
 msgid "STATE"
 msgstr ""
 
@@ -1275,7 +1275,7 @@ msgstr ""
 msgid "UPLOAD DATE"
 msgstr ""
 
-#: lxc/cluster.go:119 lxc/remote.go:410
+#: lxc/cluster.go:121 lxc/remote.go:410
 msgid "URL"
 msgstr ""
 
@@ -1315,9 +1315,11 @@ msgid ""
 "\n"
 "Manage cluster nodes.\n"
 "\n"
-"*Cluster nodes*\n"
-"lxc cluster remove <node> [--force]\n"
-"    Remove a node from the cluster."
+"lxc cluster list [<remote>:]\n"
+"    List all nodes in the cluster.\n"
+"\n"
+"lxc cluster delete [<remote>:]<node> [--force]\n"
+"    Delete a node from the cluster."
 msgstr ""
 
 #: lxc/config.go:85
diff --git a/test/suites/clustering.sh b/test/suites/clustering.sh
index 80411bad0..5d240394b 100644
--- a/test/suites/clustering.sh
+++ b/test/suites/clustering.sh
@@ -68,14 +68,14 @@ test_clustering() {
   ! LXD_DIR="${LXD_TWO_DIR}" lxc network delete "${bridge}"
 
   # Force the removal of the degraded node.
-  LXD_DIR="${LXD_THREE_DIR}" lxc cluster remove node5 --force
+  LXD_DIR="${LXD_THREE_DIR}" lxc cluster delete node5 --force
 
   # Now the preseeded network can be deleted, and all nodes are
   # notified.
   LXD_DIR="${LXD_TWO_DIR}" lxc network delete "${bridge}"
 
   # Remove a node gracefully.
-  LXD_DIR="${LXD_FOUR_DIR}" lxc cluster remove node4
+  LXD_DIR="${LXD_FOUR_DIR}" lxc cluster delete node4
 
   LXD_DIR="${LXD_FOUR_DIR}" lxd shutdown
   LXD_DIR="${LXD_THREE_DIR}" lxd shutdown

From a796fb8be3f0d9d5796363cc99691abcecfa7486 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Tue, 14 Nov 2017 09:03:17 +0000
Subject: [PATCH 085/116] Add lxc cluster show command

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxc/cluster.go            | 40 ++++++++++++++++++++++++++++++++++++++++
 po/de.po                  | 19 +++++++++++--------
 po/el.po                  | 19 +++++++++++--------
 po/fr.po                  | 19 +++++++++++--------
 po/id.po                  | 19 +++++++++++--------
 po/it.po                  | 19 +++++++++++--------
 po/ja.po                  | 19 +++++++++++--------
 po/lxd.pot                | 19 +++++++++++--------
 po/nb_NO.po               | 19 +++++++++++--------
 po/nl.po                  | 19 +++++++++++--------
 po/pt_BR.po               | 19 +++++++++++--------
 po/ru.po                  | 19 +++++++++++--------
 po/sr.po                  | 19 +++++++++++--------
 po/sv.po                  | 19 +++++++++++--------
 po/tr.po                  | 19 +++++++++++--------
 po/zh.po                  | 19 +++++++++++--------
 po/zh_Hans.po             | 19 +++++++++++--------
 test/suites/clustering.sh |  3 +++
 18 files changed, 219 insertions(+), 128 deletions(-)

diff --git a/lxc/cluster.go b/lxc/cluster.go
index f913cba20..bca261ed6 100644
--- a/lxc/cluster.go
+++ b/lxc/cluster.go
@@ -5,6 +5,8 @@ import (
 	"os"
 	"sort"
 
+	yaml "gopkg.in/yaml.v2"
+
 	"github.com/lxc/lxd/lxc/config"
 	"github.com/lxc/lxd/shared/gnuflag"
 	"github.com/lxc/lxd/shared/i18n"
@@ -24,6 +26,9 @@ Manage cluster nodes.
 lxc cluster list [<remote>:]
     List all nodes in the cluster.
 
+lxc cluster show [<remote>:]<node>
+    Show details of a node.
+
 lxc cluster delete [<remote>:]<node> [--force]
     Delete a node from the cluster.`)
 }
@@ -45,6 +50,10 @@ func (c *clusterCmd) run(conf *config.Config, args []string) error {
 		return c.doClusterList(conf, args)
 	}
 
+	if args[0] == "show" {
+		return c.doClusterNodeShow(conf, args)
+	}
+
 	if args[0] == "delete" {
 		return c.doClusterNodeDelete(conf, args)
 	}
@@ -52,6 +61,37 @@ func (c *clusterCmd) run(conf *config.Config, args []string) error {
 	return nil
 }
 
+func (c *clusterCmd) doClusterNodeShow(conf *config.Config, args []string) error {
+	if len(args) < 2 {
+		return errArgs
+	}
+
+	// [[lxc cluster]] remove production:bionic-1
+	remote, name, err := conf.ParseRemote(args[1])
+	if err != nil {
+		return err
+	}
+
+	client, err := conf.GetContainerServer(remote)
+	if err != nil {
+		return err
+	}
+
+	node, err := client.GetNode(name)
+	if err != nil {
+		return err
+	}
+
+	data, err := yaml.Marshal(&node)
+	if err != nil {
+		return err
+	}
+
+	fmt.Printf("%s", data)
+
+	return nil
+}
+
 func (c *clusterCmd) doClusterNodeDelete(conf *config.Config, args []string) error {
 	if len(args) < 2 {
 		return errArgs
diff --git a/po/de.po b/po/de.po
index 6c56a87dc..bc095d70e 100644
--- a/po/de.po
+++ b/po/de.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: LXD\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2017-12-04 08:57+0000\n"
+"POT-Creation-Date: 2017-12-04 08:58+0000\n"
 "PO-Revision-Date: 2017-02-14 17:11+0000\n"
 "Last-Translator: Tim Rose <tim at netlope.de>\n"
 "Language-Team: German <https://hosted.weblate.org/projects/linux-containers/"
@@ -498,7 +498,7 @@ msgstr ""
 msgid "Creating the container"
 msgstr "kann nicht zum selben Container Namen kopieren"
 
-#: lxc/cluster.go:122
+#: lxc/cluster.go:162
 msgid "DATABASE"
 msgstr ""
 
@@ -658,7 +658,7 @@ msgstr "Fingerabdruck: %s\n"
 msgid "Force pseudo-terminal allocation"
 msgstr ""
 
-#: lxc/cluster.go:32
+#: lxc/cluster.go:37
 msgid "Force removing a node, even if degraded"
 msgstr ""
 
@@ -857,7 +857,7 @@ msgstr ""
 msgid "Must supply container name for: "
 msgstr "der Name des Ursprung Containers muss angegeben werden"
 
-#: lxc/cluster.go:120 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
+#: lxc/cluster.go:160 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
 #: lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
 msgid "NAME"
 msgstr ""
@@ -918,7 +918,7 @@ msgstr "Kein Zertifikat für diese Verbindung"
 msgid "No fingerprint specified."
 msgstr "Kein Fingerabdruck angegeben."
 
-#: lxc/cluster.go:81
+#: lxc/cluster.go:121
 #, fuzzy, c-format
 msgid "Node %s removed"
 msgstr "Gerät %s wurde von %s entfernt\n"
@@ -1156,7 +1156,7 @@ msgstr ""
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/cluster.go:123 lxc/list.go:469
+#: lxc/cluster.go:163 lxc/list.go:469
 msgid "STATE"
 msgstr ""
 
@@ -1432,7 +1432,7 @@ msgstr ""
 msgid "UPLOAD DATE"
 msgstr ""
 
-#: lxc/cluster.go:121 lxc/remote.go:410
+#: lxc/cluster.go:161 lxc/remote.go:410
 msgid "URL"
 msgstr ""
 
@@ -1472,7 +1472,7 @@ msgstr ""
 "Benutzung: lxc [Unterbefehl] [Optionen]\n"
 "Verfügbare Befehle:\n"
 
-#: lxc/cluster.go:19
+#: lxc/cluster.go:21
 msgid ""
 "Usage: lxc cluster <subcommand> [options]\n"
 "\n"
@@ -1481,6 +1481,9 @@ msgid ""
 "lxc cluster list [<remote>:]\n"
 "    List all nodes in the cluster.\n"
 "\n"
+"lxc cluster show [<remote>:]<node>\n"
+"    Show details of a node.\n"
+"\n"
 "lxc cluster delete [<remote>:]<node> [--force]\n"
 "    Delete a node from the cluster."
 msgstr ""
diff --git a/po/el.po b/po/el.po
index cc9830df8..641184cdc 100644
--- a/po/el.po
+++ b/po/el.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: lxd\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2017-12-04 08:57+0000\n"
+"POT-Creation-Date: 2017-12-04 08:58+0000\n"
 "PO-Revision-Date: 2017-02-14 08:00+0000\n"
 "Last-Translator: Simos Xenitellis <simos.65 at gmail.com>\n"
 "Language-Team: Greek <https://hosted.weblate.org/projects/linux-containers/"
@@ -384,7 +384,7 @@ msgstr ""
 msgid "Creating the container"
 msgstr ""
 
-#: lxc/cluster.go:122
+#: lxc/cluster.go:162
 msgid "DATABASE"
 msgstr ""
 
@@ -538,7 +538,7 @@ msgstr ""
 msgid "Force pseudo-terminal allocation"
 msgstr ""
 
-#: lxc/cluster.go:32
+#: lxc/cluster.go:37
 msgid "Force removing a node, even if degraded"
 msgstr ""
 
@@ -730,7 +730,7 @@ msgstr ""
 msgid "Must supply container name for: "
 msgstr ""
 
-#: lxc/cluster.go:120 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
+#: lxc/cluster.go:160 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
 #: lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
 msgid "NAME"
 msgstr ""
@@ -788,7 +788,7 @@ msgstr ""
 msgid "No fingerprint specified."
 msgstr ""
 
-#: lxc/cluster.go:81
+#: lxc/cluster.go:121
 #, c-format
 msgid "Node %s removed"
 msgstr ""
@@ -1018,7 +1018,7 @@ msgstr ""
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/cluster.go:123 lxc/list.go:469
+#: lxc/cluster.go:163 lxc/list.go:469
 msgid "STATE"
 msgstr ""
 
@@ -1282,7 +1282,7 @@ msgstr ""
 msgid "UPLOAD DATE"
 msgstr ""
 
-#: lxc/cluster.go:121 lxc/remote.go:410
+#: lxc/cluster.go:161 lxc/remote.go:410
 msgid "URL"
 msgstr ""
 
@@ -1316,7 +1316,7 @@ msgstr ""
 msgid "Usage: lxc <command> [options]"
 msgstr ""
 
-#: lxc/cluster.go:19
+#: lxc/cluster.go:21
 msgid ""
 "Usage: lxc cluster <subcommand> [options]\n"
 "\n"
@@ -1325,6 +1325,9 @@ msgid ""
 "lxc cluster list [<remote>:]\n"
 "    List all nodes in the cluster.\n"
 "\n"
+"lxc cluster show [<remote>:]<node>\n"
+"    Show details of a node.\n"
+"\n"
 "lxc cluster delete [<remote>:]<node> [--force]\n"
 "    Delete a node from the cluster."
 msgstr ""
diff --git a/po/fr.po b/po/fr.po
index 8ea314763..9daeaeea9 100644
--- a/po/fr.po
+++ b/po/fr.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: LXD\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2017-12-04 08:57+0000\n"
+"POT-Creation-Date: 2017-12-04 08:58+0000\n"
 "PO-Revision-Date: 2017-10-26 15:46+0000\n"
 "Last-Translator: Alban Vidal <alban.vidal at zordhak.fr>\n"
 "Language-Team: French <https://hosted.weblate.org/projects/linux-containers/"
@@ -485,7 +485,7 @@ msgstr "Création de %s"
 msgid "Creating the container"
 msgstr "Création du conteneur"
 
-#: lxc/cluster.go:122
+#: lxc/cluster.go:162
 msgid "DATABASE"
 msgstr ""
 
@@ -642,7 +642,7 @@ msgstr "Empreinte : %s"
 msgid "Force pseudo-terminal allocation"
 msgstr "Forcer l'allocation de pseudo-terminal "
 
-#: lxc/cluster.go:32
+#: lxc/cluster.go:37
 msgid "Force removing a node, even if degraded"
 msgstr ""
 
@@ -843,7 +843,7 @@ msgstr ""
 msgid "Must supply container name for: "
 msgstr "Vous devez fournir le nom d'un conteneur pour : "
 
-#: lxc/cluster.go:120 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
+#: lxc/cluster.go:160 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
 #: lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
 msgid "NAME"
 msgstr "NOM"
@@ -902,7 +902,7 @@ msgstr "Aucun périphérique existant pour ce réseau"
 msgid "No fingerprint specified."
 msgstr "Aucune empreinte n'a été indiquée."
 
-#: lxc/cluster.go:81
+#: lxc/cluster.go:121
 #, fuzzy, c-format
 msgid "Node %s removed"
 msgstr "Profil %s supprimé de %s"
@@ -1136,7 +1136,7 @@ msgstr "INSTANTANÉS"
 msgid "SOURCE"
 msgstr "SOURCE"
 
-#: lxc/cluster.go:123 lxc/list.go:469
+#: lxc/cluster.go:163 lxc/list.go:469
 msgid "STATE"
 msgstr "ÉTAT"
 
@@ -1417,7 +1417,7 @@ msgstr "Type : persistant"
 msgid "UPLOAD DATE"
 msgstr "DATE DE PUBLICATION"
 
-#: lxc/cluster.go:121 lxc/remote.go:410
+#: lxc/cluster.go:161 lxc/remote.go:410
 msgid "URL"
 msgstr "URL"
 
@@ -1454,7 +1454,7 @@ msgstr ""
 msgid "Usage: lxc <command> [options]"
 msgstr "Utilisation : lxc <commande> [options]"
 
-#: lxc/cluster.go:19
+#: lxc/cluster.go:21
 msgid ""
 "Usage: lxc cluster <subcommand> [options]\n"
 "\n"
@@ -1463,6 +1463,9 @@ msgid ""
 "lxc cluster list [<remote>:]\n"
 "    List all nodes in the cluster.\n"
 "\n"
+"lxc cluster show [<remote>:]<node>\n"
+"    Show details of a node.\n"
+"\n"
 "lxc cluster delete [<remote>:]<node> [--force]\n"
 "    Delete a node from the cluster."
 msgstr ""
diff --git a/po/id.po b/po/id.po
index 896139c1e..3b1ff22db 100644
--- a/po/id.po
+++ b/po/id.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: lxd\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2017-12-04 08:57+0000\n"
+"POT-Creation-Date: 2017-12-04 08:58+0000\n"
 "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
 "Last-Translator: Automatically generated\n"
 "Language-Team: none\n"
@@ -380,7 +380,7 @@ msgstr ""
 msgid "Creating the container"
 msgstr ""
 
-#: lxc/cluster.go:122
+#: lxc/cluster.go:162
 msgid "DATABASE"
 msgstr ""
 
@@ -533,7 +533,7 @@ msgstr ""
 msgid "Force pseudo-terminal allocation"
 msgstr ""
 
-#: lxc/cluster.go:32
+#: lxc/cluster.go:37
 msgid "Force removing a node, even if degraded"
 msgstr ""
 
@@ -724,7 +724,7 @@ msgstr ""
 msgid "Must supply container name for: "
 msgstr ""
 
-#: lxc/cluster.go:120 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
+#: lxc/cluster.go:160 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
 #: lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
 msgid "NAME"
 msgstr ""
@@ -781,7 +781,7 @@ msgstr ""
 msgid "No fingerprint specified."
 msgstr ""
 
-#: lxc/cluster.go:81
+#: lxc/cluster.go:121
 #, c-format
 msgid "Node %s removed"
 msgstr ""
@@ -1011,7 +1011,7 @@ msgstr ""
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/cluster.go:123 lxc/list.go:469
+#: lxc/cluster.go:163 lxc/list.go:469
 msgid "STATE"
 msgstr ""
 
@@ -1275,7 +1275,7 @@ msgstr ""
 msgid "UPLOAD DATE"
 msgstr ""
 
-#: lxc/cluster.go:121 lxc/remote.go:410
+#: lxc/cluster.go:161 lxc/remote.go:410
 msgid "URL"
 msgstr ""
 
@@ -1309,7 +1309,7 @@ msgstr ""
 msgid "Usage: lxc <command> [options]"
 msgstr ""
 
-#: lxc/cluster.go:19
+#: lxc/cluster.go:21
 msgid ""
 "Usage: lxc cluster <subcommand> [options]\n"
 "\n"
@@ -1318,6 +1318,9 @@ msgid ""
 "lxc cluster list [<remote>:]\n"
 "    List all nodes in the cluster.\n"
 "\n"
+"lxc cluster show [<remote>:]<node>\n"
+"    Show details of a node.\n"
+"\n"
 "lxc cluster delete [<remote>:]<node> [--force]\n"
 "    Delete a node from the cluster."
 msgstr ""
diff --git a/po/it.po b/po/it.po
index ebd1d8f4f..376f7a4f6 100644
--- a/po/it.po
+++ b/po/it.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: lxd\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2017-12-04 08:57+0000\n"
+"POT-Creation-Date: 2017-12-04 08:58+0000\n"
 "PO-Revision-Date: 2017-08-18 14:22+0000\n"
 "Last-Translator: Alberto Donato <alberto.donato at gmail.com>\n"
 "Language-Team: Italian <https://hosted.weblate.org/projects/linux-containers/"
@@ -405,7 +405,7 @@ msgstr "Creazione di %s in corso"
 msgid "Creating the container"
 msgstr "Creazione del container in corso"
 
-#: lxc/cluster.go:122
+#: lxc/cluster.go:162
 msgid "DATABASE"
 msgstr ""
 
@@ -559,7 +559,7 @@ msgstr ""
 msgid "Force pseudo-terminal allocation"
 msgstr ""
 
-#: lxc/cluster.go:32
+#: lxc/cluster.go:37
 msgid "Force removing a node, even if degraded"
 msgstr ""
 
@@ -750,7 +750,7 @@ msgstr ""
 msgid "Must supply container name for: "
 msgstr ""
 
-#: lxc/cluster.go:120 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
+#: lxc/cluster.go:160 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
 #: lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
 msgid "NAME"
 msgstr ""
@@ -807,7 +807,7 @@ msgstr ""
 msgid "No fingerprint specified."
 msgstr ""
 
-#: lxc/cluster.go:81
+#: lxc/cluster.go:121
 #, c-format
 msgid "Node %s removed"
 msgstr ""
@@ -1037,7 +1037,7 @@ msgstr ""
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/cluster.go:123 lxc/list.go:469
+#: lxc/cluster.go:163 lxc/list.go:469
 msgid "STATE"
 msgstr ""
 
@@ -1301,7 +1301,7 @@ msgstr ""
 msgid "UPLOAD DATE"
 msgstr ""
 
-#: lxc/cluster.go:121 lxc/remote.go:410
+#: lxc/cluster.go:161 lxc/remote.go:410
 msgid "URL"
 msgstr ""
 
@@ -1335,7 +1335,7 @@ msgstr ""
 msgid "Usage: lxc <command> [options]"
 msgstr ""
 
-#: lxc/cluster.go:19
+#: lxc/cluster.go:21
 msgid ""
 "Usage: lxc cluster <subcommand> [options]\n"
 "\n"
@@ -1344,6 +1344,9 @@ msgid ""
 "lxc cluster list [<remote>:]\n"
 "    List all nodes in the cluster.\n"
 "\n"
+"lxc cluster show [<remote>:]<node>\n"
+"    Show details of a node.\n"
+"\n"
 "lxc cluster delete [<remote>:]<node> [--force]\n"
 "    Delete a node from the cluster."
 msgstr ""
diff --git a/po/ja.po b/po/ja.po
index 3fb512686..dc7298417 100644
--- a/po/ja.po
+++ b/po/ja.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: LXD\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2017-12-04 08:57+0000\n"
+"POT-Creation-Date: 2017-12-04 08:58+0000\n"
 "PO-Revision-Date: 2017-09-28 20:29+0000\n"
 "Last-Translator: KATOH Yasufumi <karma at jazz.email.ne.jp>\n"
 "Language-Team: Japanese <https://hosted.weblate.org/projects/linux-"
@@ -386,7 +386,7 @@ msgstr "%s を作成中"
 msgid "Creating the container"
 msgstr "コンテナを作成中"
 
-#: lxc/cluster.go:122
+#: lxc/cluster.go:162
 msgid "DATABASE"
 msgstr ""
 
@@ -541,7 +541,7 @@ msgstr "証明書のフィンガープリント: %s"
 msgid "Force pseudo-terminal allocation"
 msgstr "強制的に擬似端末を割り当てます"
 
-#: lxc/cluster.go:32
+#: lxc/cluster.go:37
 msgid "Force removing a node, even if degraded"
 msgstr ""
 
@@ -735,7 +735,7 @@ msgstr "ディレクトリからのインポートは root で実行する必要
 msgid "Must supply container name for: "
 msgstr "コンテナ名を指定する必要があります: "
 
-#: lxc/cluster.go:120 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
+#: lxc/cluster.go:160 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
 #: lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
 msgid "NAME"
 msgstr ""
@@ -792,7 +792,7 @@ msgstr "このストレージボリュームに対するデバイスがありま
 msgid "No fingerprint specified."
 msgstr "フィンガープリントが指定されていません。"
 
-#: lxc/cluster.go:81
+#: lxc/cluster.go:121
 #, fuzzy, c-format
 msgid "Node %s removed"
 msgstr "プロファイル %s が %s から削除されました"
@@ -1023,7 +1023,7 @@ msgstr ""
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/cluster.go:123 lxc/list.go:469
+#: lxc/cluster.go:163 lxc/list.go:469
 msgid "STATE"
 msgstr ""
 
@@ -1301,7 +1301,7 @@ msgstr "タイプ: persistent"
 msgid "UPLOAD DATE"
 msgstr ""
 
-#: lxc/cluster.go:121 lxc/remote.go:410
+#: lxc/cluster.go:161 lxc/remote.go:410
 msgid "URL"
 msgstr ""
 
@@ -1338,7 +1338,7 @@ msgstr ""
 msgid "Usage: lxc <command> [options]"
 msgstr "使い方: lxc <コマンド> [オプション]"
 
-#: lxc/cluster.go:19
+#: lxc/cluster.go:21
 msgid ""
 "Usage: lxc cluster <subcommand> [options]\n"
 "\n"
@@ -1347,6 +1347,9 @@ msgid ""
 "lxc cluster list [<remote>:]\n"
 "    List all nodes in the cluster.\n"
 "\n"
+"lxc cluster show [<remote>:]<node>\n"
+"    Show details of a node.\n"
+"\n"
 "lxc cluster delete [<remote>:]<node> [--force]\n"
 "    Delete a node from the cluster."
 msgstr ""
diff --git a/po/lxd.pot b/po/lxd.pot
index 8c512ac8c..07a8722e8 100644
--- a/po/lxd.pot
+++ b/po/lxd.pot
@@ -7,7 +7,7 @@
 msgid   ""
 msgstr  "Project-Id-Version: lxd\n"
         "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-        "POT-Creation-Date: 2017-12-04 08:57+0000\n"
+        "POT-Creation-Date: 2017-12-04 08:58+0000\n"
         "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
         "Last-Translator: FULL NAME <EMAIL at ADDRESS>\n"
         "Language-Team: LANGUAGE <LL at li.org>\n"
@@ -372,7 +372,7 @@ msgstr  ""
 msgid   "Creating the container"
 msgstr  ""
 
-#: lxc/cluster.go:122
+#: lxc/cluster.go:162
 msgid   "DATABASE"
 msgstr  ""
 
@@ -524,7 +524,7 @@ msgstr  ""
 msgid   "Force pseudo-terminal allocation"
 msgstr  ""
 
-#: lxc/cluster.go:32
+#: lxc/cluster.go:37
 msgid   "Force removing a node, even if degraded"
 msgstr  ""
 
@@ -715,7 +715,7 @@ msgstr  ""
 msgid   "Must supply container name for: "
 msgstr  ""
 
-#: lxc/cluster.go:120 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573 lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
+#: lxc/cluster.go:160 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573 lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
 msgid   "NAME"
 msgstr  ""
 
@@ -771,7 +771,7 @@ msgstr  ""
 msgid   "No fingerprint specified."
 msgstr  ""
 
-#: lxc/cluster.go:81
+#: lxc/cluster.go:121
 #, c-format
 msgid   "Node %s removed"
 msgstr  ""
@@ -1000,7 +1000,7 @@ msgstr  ""
 msgid   "SOURCE"
 msgstr  ""
 
-#: lxc/cluster.go:123 lxc/list.go:469
+#: lxc/cluster.go:163 lxc/list.go:469
 msgid   "STATE"
 msgstr  ""
 
@@ -1260,7 +1260,7 @@ msgstr  ""
 msgid   "UPLOAD DATE"
 msgstr  ""
 
-#: lxc/cluster.go:121 lxc/remote.go:410
+#: lxc/cluster.go:161 lxc/remote.go:410
 msgid   "URL"
 msgstr  ""
 
@@ -1293,7 +1293,7 @@ msgstr  ""
 msgid   "Usage: lxc <command> [options]"
 msgstr  ""
 
-#: lxc/cluster.go:19
+#: lxc/cluster.go:21
 msgid   "Usage: lxc cluster <subcommand> [options]\n"
         "\n"
         "Manage cluster nodes.\n"
@@ -1301,6 +1301,9 @@ msgid   "Usage: lxc cluster <subcommand> [options]\n"
         "lxc cluster list [<remote>:]\n"
         "    List all nodes in the cluster.\n"
         "\n"
+        "lxc cluster show [<remote>:]<node>\n"
+        "    Show details of a node.\n"
+        "\n"
         "lxc cluster delete [<remote>:]<node> [--force]\n"
         "    Delete a node from the cluster."
 msgstr  ""
diff --git a/po/nb_NO.po b/po/nb_NO.po
index acaf873c9..f2c794bce 100644
--- a/po/nb_NO.po
+++ b/po/nb_NO.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: lxd\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2017-12-04 08:57+0000\n"
+"POT-Creation-Date: 2017-12-04 08:58+0000\n"
 "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
 "Last-Translator: Automatically generated\n"
 "Language-Team: none\n"
@@ -380,7 +380,7 @@ msgstr ""
 msgid "Creating the container"
 msgstr ""
 
-#: lxc/cluster.go:122
+#: lxc/cluster.go:162
 msgid "DATABASE"
 msgstr ""
 
@@ -533,7 +533,7 @@ msgstr ""
 msgid "Force pseudo-terminal allocation"
 msgstr ""
 
-#: lxc/cluster.go:32
+#: lxc/cluster.go:37
 msgid "Force removing a node, even if degraded"
 msgstr ""
 
@@ -724,7 +724,7 @@ msgstr ""
 msgid "Must supply container name for: "
 msgstr ""
 
-#: lxc/cluster.go:120 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
+#: lxc/cluster.go:160 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
 #: lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
 msgid "NAME"
 msgstr ""
@@ -781,7 +781,7 @@ msgstr ""
 msgid "No fingerprint specified."
 msgstr ""
 
-#: lxc/cluster.go:81
+#: lxc/cluster.go:121
 #, c-format
 msgid "Node %s removed"
 msgstr ""
@@ -1011,7 +1011,7 @@ msgstr ""
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/cluster.go:123 lxc/list.go:469
+#: lxc/cluster.go:163 lxc/list.go:469
 msgid "STATE"
 msgstr ""
 
@@ -1275,7 +1275,7 @@ msgstr ""
 msgid "UPLOAD DATE"
 msgstr ""
 
-#: lxc/cluster.go:121 lxc/remote.go:410
+#: lxc/cluster.go:161 lxc/remote.go:410
 msgid "URL"
 msgstr ""
 
@@ -1309,7 +1309,7 @@ msgstr ""
 msgid "Usage: lxc <command> [options]"
 msgstr ""
 
-#: lxc/cluster.go:19
+#: lxc/cluster.go:21
 msgid ""
 "Usage: lxc cluster <subcommand> [options]\n"
 "\n"
@@ -1318,6 +1318,9 @@ msgid ""
 "lxc cluster list [<remote>:]\n"
 "    List all nodes in the cluster.\n"
 "\n"
+"lxc cluster show [<remote>:]<node>\n"
+"    Show details of a node.\n"
+"\n"
 "lxc cluster delete [<remote>:]<node> [--force]\n"
 "    Delete a node from the cluster."
 msgstr ""
diff --git a/po/nl.po b/po/nl.po
index da95f0702..a0a3c907e 100644
--- a/po/nl.po
+++ b/po/nl.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: lxd\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2017-12-04 08:57+0000\n"
+"POT-Creation-Date: 2017-12-04 08:58+0000\n"
 "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
 "Last-Translator: Automatically generated\n"
 "Language-Team: none\n"
@@ -380,7 +380,7 @@ msgstr ""
 msgid "Creating the container"
 msgstr ""
 
-#: lxc/cluster.go:122
+#: lxc/cluster.go:162
 msgid "DATABASE"
 msgstr ""
 
@@ -533,7 +533,7 @@ msgstr ""
 msgid "Force pseudo-terminal allocation"
 msgstr ""
 
-#: lxc/cluster.go:32
+#: lxc/cluster.go:37
 msgid "Force removing a node, even if degraded"
 msgstr ""
 
@@ -724,7 +724,7 @@ msgstr ""
 msgid "Must supply container name for: "
 msgstr ""
 
-#: lxc/cluster.go:120 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
+#: lxc/cluster.go:160 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
 #: lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
 msgid "NAME"
 msgstr ""
@@ -781,7 +781,7 @@ msgstr ""
 msgid "No fingerprint specified."
 msgstr ""
 
-#: lxc/cluster.go:81
+#: lxc/cluster.go:121
 #, c-format
 msgid "Node %s removed"
 msgstr ""
@@ -1011,7 +1011,7 @@ msgstr ""
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/cluster.go:123 lxc/list.go:469
+#: lxc/cluster.go:163 lxc/list.go:469
 msgid "STATE"
 msgstr ""
 
@@ -1275,7 +1275,7 @@ msgstr ""
 msgid "UPLOAD DATE"
 msgstr ""
 
-#: lxc/cluster.go:121 lxc/remote.go:410
+#: lxc/cluster.go:161 lxc/remote.go:410
 msgid "URL"
 msgstr ""
 
@@ -1309,7 +1309,7 @@ msgstr ""
 msgid "Usage: lxc <command> [options]"
 msgstr ""
 
-#: lxc/cluster.go:19
+#: lxc/cluster.go:21
 msgid ""
 "Usage: lxc cluster <subcommand> [options]\n"
 "\n"
@@ -1318,6 +1318,9 @@ msgid ""
 "lxc cluster list [<remote>:]\n"
 "    List all nodes in the cluster.\n"
 "\n"
+"lxc cluster show [<remote>:]<node>\n"
+"    Show details of a node.\n"
+"\n"
 "lxc cluster delete [<remote>:]<node> [--force]\n"
 "    Delete a node from the cluster."
 msgstr ""
diff --git a/po/pt_BR.po b/po/pt_BR.po
index 750b314d1..05aac9071 100644
--- a/po/pt_BR.po
+++ b/po/pt_BR.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: lxd\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2017-12-04 08:57+0000\n"
+"POT-Creation-Date: 2017-12-04 08:58+0000\n"
 "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
 "Last-Translator: Automatically generated\n"
 "Language-Team: none\n"
@@ -380,7 +380,7 @@ msgstr ""
 msgid "Creating the container"
 msgstr ""
 
-#: lxc/cluster.go:122
+#: lxc/cluster.go:162
 msgid "DATABASE"
 msgstr ""
 
@@ -533,7 +533,7 @@ msgstr ""
 msgid "Force pseudo-terminal allocation"
 msgstr ""
 
-#: lxc/cluster.go:32
+#: lxc/cluster.go:37
 msgid "Force removing a node, even if degraded"
 msgstr ""
 
@@ -724,7 +724,7 @@ msgstr ""
 msgid "Must supply container name for: "
 msgstr ""
 
-#: lxc/cluster.go:120 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
+#: lxc/cluster.go:160 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
 #: lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
 msgid "NAME"
 msgstr ""
@@ -781,7 +781,7 @@ msgstr ""
 msgid "No fingerprint specified."
 msgstr ""
 
-#: lxc/cluster.go:81
+#: lxc/cluster.go:121
 #, c-format
 msgid "Node %s removed"
 msgstr ""
@@ -1011,7 +1011,7 @@ msgstr ""
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/cluster.go:123 lxc/list.go:469
+#: lxc/cluster.go:163 lxc/list.go:469
 msgid "STATE"
 msgstr ""
 
@@ -1275,7 +1275,7 @@ msgstr ""
 msgid "UPLOAD DATE"
 msgstr ""
 
-#: lxc/cluster.go:121 lxc/remote.go:410
+#: lxc/cluster.go:161 lxc/remote.go:410
 msgid "URL"
 msgstr ""
 
@@ -1309,7 +1309,7 @@ msgstr ""
 msgid "Usage: lxc <command> [options]"
 msgstr ""
 
-#: lxc/cluster.go:19
+#: lxc/cluster.go:21
 msgid ""
 "Usage: lxc cluster <subcommand> [options]\n"
 "\n"
@@ -1318,6 +1318,9 @@ msgid ""
 "lxc cluster list [<remote>:]\n"
 "    List all nodes in the cluster.\n"
 "\n"
+"lxc cluster show [<remote>:]<node>\n"
+"    Show details of a node.\n"
+"\n"
 "lxc cluster delete [<remote>:]<node> [--force]\n"
 "    Delete a node from the cluster."
 msgstr ""
diff --git a/po/ru.po b/po/ru.po
index 2c2b2753b..bde82c0cf 100644
--- a/po/ru.po
+++ b/po/ru.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: lxd\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2017-12-04 08:57+0000\n"
+"POT-Creation-Date: 2017-12-04 08:58+0000\n"
 "PO-Revision-Date: 2017-09-05 16:48+0000\n"
 "Last-Translator: Ilya Yakimavets <ilya.yakimavets at backend.expert>\n"
 "Language-Team: Russian <https://hosted.weblate.org/projects/linux-containers/"
@@ -468,7 +468,7 @@ msgstr ""
 msgid "Creating the container"
 msgstr ""
 
-#: lxc/cluster.go:122
+#: lxc/cluster.go:162
 msgid "DATABASE"
 msgstr ""
 
@@ -622,7 +622,7 @@ msgstr ""
 msgid "Force pseudo-terminal allocation"
 msgstr ""
 
-#: lxc/cluster.go:32
+#: lxc/cluster.go:37
 msgid "Force removing a node, even if degraded"
 msgstr ""
 
@@ -814,7 +814,7 @@ msgstr ""
 msgid "Must supply container name for: "
 msgstr ""
 
-#: lxc/cluster.go:120 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
+#: lxc/cluster.go:160 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
 #: lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
 msgid "NAME"
 msgstr ""
@@ -872,7 +872,7 @@ msgstr ""
 msgid "No fingerprint specified."
 msgstr ""
 
-#: lxc/cluster.go:81
+#: lxc/cluster.go:121
 #, c-format
 msgid "Node %s removed"
 msgstr ""
@@ -1102,7 +1102,7 @@ msgstr ""
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/cluster.go:123 lxc/list.go:469
+#: lxc/cluster.go:163 lxc/list.go:469
 msgid "STATE"
 msgstr ""
 
@@ -1366,7 +1366,7 @@ msgstr ""
 msgid "UPLOAD DATE"
 msgstr ""
 
-#: lxc/cluster.go:121 lxc/remote.go:410
+#: lxc/cluster.go:161 lxc/remote.go:410
 msgid "URL"
 msgstr ""
 
@@ -1403,7 +1403,7 @@ msgstr ""
 msgid "Usage: lxc <command> [options]"
 msgstr ""
 
-#: lxc/cluster.go:19
+#: lxc/cluster.go:21
 msgid ""
 "Usage: lxc cluster <subcommand> [options]\n"
 "\n"
@@ -1412,6 +1412,9 @@ msgid ""
 "lxc cluster list [<remote>:]\n"
 "    List all nodes in the cluster.\n"
 "\n"
+"lxc cluster show [<remote>:]<node>\n"
+"    Show details of a node.\n"
+"\n"
 "lxc cluster delete [<remote>:]<node> [--force]\n"
 "    Delete a node from the cluster."
 msgstr ""
diff --git a/po/sr.po b/po/sr.po
index 12a5693d9..038d6a89f 100644
--- a/po/sr.po
+++ b/po/sr.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: lxd\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2017-12-04 08:57+0000\n"
+"POT-Creation-Date: 2017-12-04 08:58+0000\n"
 "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
 "Last-Translator: Automatically generated\n"
 "Language-Team: none\n"
@@ -380,7 +380,7 @@ msgstr ""
 msgid "Creating the container"
 msgstr ""
 
-#: lxc/cluster.go:122
+#: lxc/cluster.go:162
 msgid "DATABASE"
 msgstr ""
 
@@ -533,7 +533,7 @@ msgstr ""
 msgid "Force pseudo-terminal allocation"
 msgstr ""
 
-#: lxc/cluster.go:32
+#: lxc/cluster.go:37
 msgid "Force removing a node, even if degraded"
 msgstr ""
 
@@ -724,7 +724,7 @@ msgstr ""
 msgid "Must supply container name for: "
 msgstr ""
 
-#: lxc/cluster.go:120 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
+#: lxc/cluster.go:160 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
 #: lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
 msgid "NAME"
 msgstr ""
@@ -781,7 +781,7 @@ msgstr ""
 msgid "No fingerprint specified."
 msgstr ""
 
-#: lxc/cluster.go:81
+#: lxc/cluster.go:121
 #, c-format
 msgid "Node %s removed"
 msgstr ""
@@ -1011,7 +1011,7 @@ msgstr ""
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/cluster.go:123 lxc/list.go:469
+#: lxc/cluster.go:163 lxc/list.go:469
 msgid "STATE"
 msgstr ""
 
@@ -1275,7 +1275,7 @@ msgstr ""
 msgid "UPLOAD DATE"
 msgstr ""
 
-#: lxc/cluster.go:121 lxc/remote.go:410
+#: lxc/cluster.go:161 lxc/remote.go:410
 msgid "URL"
 msgstr ""
 
@@ -1309,7 +1309,7 @@ msgstr ""
 msgid "Usage: lxc <command> [options]"
 msgstr ""
 
-#: lxc/cluster.go:19
+#: lxc/cluster.go:21
 msgid ""
 "Usage: lxc cluster <subcommand> [options]\n"
 "\n"
@@ -1318,6 +1318,9 @@ msgid ""
 "lxc cluster list [<remote>:]\n"
 "    List all nodes in the cluster.\n"
 "\n"
+"lxc cluster show [<remote>:]<node>\n"
+"    Show details of a node.\n"
+"\n"
 "lxc cluster delete [<remote>:]<node> [--force]\n"
 "    Delete a node from the cluster."
 msgstr ""
diff --git a/po/sv.po b/po/sv.po
index b8a41cba3..e5d9d18b3 100644
--- a/po/sv.po
+++ b/po/sv.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: lxd\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2017-12-04 08:57+0000\n"
+"POT-Creation-Date: 2017-12-04 08:58+0000\n"
 "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
 "Last-Translator: Automatically generated\n"
 "Language-Team: none\n"
@@ -380,7 +380,7 @@ msgstr ""
 msgid "Creating the container"
 msgstr ""
 
-#: lxc/cluster.go:122
+#: lxc/cluster.go:162
 msgid "DATABASE"
 msgstr ""
 
@@ -533,7 +533,7 @@ msgstr ""
 msgid "Force pseudo-terminal allocation"
 msgstr ""
 
-#: lxc/cluster.go:32
+#: lxc/cluster.go:37
 msgid "Force removing a node, even if degraded"
 msgstr ""
 
@@ -724,7 +724,7 @@ msgstr ""
 msgid "Must supply container name for: "
 msgstr ""
 
-#: lxc/cluster.go:120 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
+#: lxc/cluster.go:160 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
 #: lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
 msgid "NAME"
 msgstr ""
@@ -781,7 +781,7 @@ msgstr ""
 msgid "No fingerprint specified."
 msgstr ""
 
-#: lxc/cluster.go:81
+#: lxc/cluster.go:121
 #, c-format
 msgid "Node %s removed"
 msgstr ""
@@ -1011,7 +1011,7 @@ msgstr ""
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/cluster.go:123 lxc/list.go:469
+#: lxc/cluster.go:163 lxc/list.go:469
 msgid "STATE"
 msgstr ""
 
@@ -1275,7 +1275,7 @@ msgstr ""
 msgid "UPLOAD DATE"
 msgstr ""
 
-#: lxc/cluster.go:121 lxc/remote.go:410
+#: lxc/cluster.go:161 lxc/remote.go:410
 msgid "URL"
 msgstr ""
 
@@ -1309,7 +1309,7 @@ msgstr ""
 msgid "Usage: lxc <command> [options]"
 msgstr ""
 
-#: lxc/cluster.go:19
+#: lxc/cluster.go:21
 msgid ""
 "Usage: lxc cluster <subcommand> [options]\n"
 "\n"
@@ -1318,6 +1318,9 @@ msgid ""
 "lxc cluster list [<remote>:]\n"
 "    List all nodes in the cluster.\n"
 "\n"
+"lxc cluster show [<remote>:]<node>\n"
+"    Show details of a node.\n"
+"\n"
 "lxc cluster delete [<remote>:]<node> [--force]\n"
 "    Delete a node from the cluster."
 msgstr ""
diff --git a/po/tr.po b/po/tr.po
index e7306f707..78592d739 100644
--- a/po/tr.po
+++ b/po/tr.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: lxd\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2017-12-04 08:57+0000\n"
+"POT-Creation-Date: 2017-12-04 08:58+0000\n"
 "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
 "Last-Translator: Automatically generated\n"
 "Language-Team: none\n"
@@ -380,7 +380,7 @@ msgstr ""
 msgid "Creating the container"
 msgstr ""
 
-#: lxc/cluster.go:122
+#: lxc/cluster.go:162
 msgid "DATABASE"
 msgstr ""
 
@@ -533,7 +533,7 @@ msgstr ""
 msgid "Force pseudo-terminal allocation"
 msgstr ""
 
-#: lxc/cluster.go:32
+#: lxc/cluster.go:37
 msgid "Force removing a node, even if degraded"
 msgstr ""
 
@@ -724,7 +724,7 @@ msgstr ""
 msgid "Must supply container name for: "
 msgstr ""
 
-#: lxc/cluster.go:120 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
+#: lxc/cluster.go:160 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
 #: lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
 msgid "NAME"
 msgstr ""
@@ -781,7 +781,7 @@ msgstr ""
 msgid "No fingerprint specified."
 msgstr ""
 
-#: lxc/cluster.go:81
+#: lxc/cluster.go:121
 #, c-format
 msgid "Node %s removed"
 msgstr ""
@@ -1011,7 +1011,7 @@ msgstr ""
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/cluster.go:123 lxc/list.go:469
+#: lxc/cluster.go:163 lxc/list.go:469
 msgid "STATE"
 msgstr ""
 
@@ -1275,7 +1275,7 @@ msgstr ""
 msgid "UPLOAD DATE"
 msgstr ""
 
-#: lxc/cluster.go:121 lxc/remote.go:410
+#: lxc/cluster.go:161 lxc/remote.go:410
 msgid "URL"
 msgstr ""
 
@@ -1309,7 +1309,7 @@ msgstr ""
 msgid "Usage: lxc <command> [options]"
 msgstr ""
 
-#: lxc/cluster.go:19
+#: lxc/cluster.go:21
 msgid ""
 "Usage: lxc cluster <subcommand> [options]\n"
 "\n"
@@ -1318,6 +1318,9 @@ msgid ""
 "lxc cluster list [<remote>:]\n"
 "    List all nodes in the cluster.\n"
 "\n"
+"lxc cluster show [<remote>:]<node>\n"
+"    Show details of a node.\n"
+"\n"
 "lxc cluster delete [<remote>:]<node> [--force]\n"
 "    Delete a node from the cluster."
 msgstr ""
diff --git a/po/zh.po b/po/zh.po
index 532febbff..8e8a67f3b 100644
--- a/po/zh.po
+++ b/po/zh.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: lxd\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2017-12-04 08:57+0000\n"
+"POT-Creation-Date: 2017-12-04 08:58+0000\n"
 "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
 "Last-Translator: Automatically generated\n"
 "Language-Team: none\n"
@@ -380,7 +380,7 @@ msgstr ""
 msgid "Creating the container"
 msgstr ""
 
-#: lxc/cluster.go:122
+#: lxc/cluster.go:162
 msgid "DATABASE"
 msgstr ""
 
@@ -533,7 +533,7 @@ msgstr ""
 msgid "Force pseudo-terminal allocation"
 msgstr ""
 
-#: lxc/cluster.go:32
+#: lxc/cluster.go:37
 msgid "Force removing a node, even if degraded"
 msgstr ""
 
@@ -724,7 +724,7 @@ msgstr ""
 msgid "Must supply container name for: "
 msgstr ""
 
-#: lxc/cluster.go:120 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
+#: lxc/cluster.go:160 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
 #: lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
 msgid "NAME"
 msgstr ""
@@ -781,7 +781,7 @@ msgstr ""
 msgid "No fingerprint specified."
 msgstr ""
 
-#: lxc/cluster.go:81
+#: lxc/cluster.go:121
 #, c-format
 msgid "Node %s removed"
 msgstr ""
@@ -1011,7 +1011,7 @@ msgstr ""
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/cluster.go:123 lxc/list.go:469
+#: lxc/cluster.go:163 lxc/list.go:469
 msgid "STATE"
 msgstr ""
 
@@ -1275,7 +1275,7 @@ msgstr ""
 msgid "UPLOAD DATE"
 msgstr ""
 
-#: lxc/cluster.go:121 lxc/remote.go:410
+#: lxc/cluster.go:161 lxc/remote.go:410
 msgid "URL"
 msgstr ""
 
@@ -1309,7 +1309,7 @@ msgstr ""
 msgid "Usage: lxc <command> [options]"
 msgstr ""
 
-#: lxc/cluster.go:19
+#: lxc/cluster.go:21
 msgid ""
 "Usage: lxc cluster <subcommand> [options]\n"
 "\n"
@@ -1318,6 +1318,9 @@ msgid ""
 "lxc cluster list [<remote>:]\n"
 "    List all nodes in the cluster.\n"
 "\n"
+"lxc cluster show [<remote>:]<node>\n"
+"    Show details of a node.\n"
+"\n"
 "lxc cluster delete [<remote>:]<node> [--force]\n"
 "    Delete a node from the cluster."
 msgstr ""
diff --git a/po/zh_Hans.po b/po/zh_Hans.po
index 26ad0ee0d..835c5870b 100644
--- a/po/zh_Hans.po
+++ b/po/zh_Hans.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: lxd\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2017-12-04 08:57+0000\n"
+"POT-Creation-Date: 2017-12-04 08:58+0000\n"
 "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
 "Last-Translator: Automatically generated\n"
 "Language-Team: none\n"
@@ -380,7 +380,7 @@ msgstr ""
 msgid "Creating the container"
 msgstr ""
 
-#: lxc/cluster.go:122
+#: lxc/cluster.go:162
 msgid "DATABASE"
 msgstr ""
 
@@ -533,7 +533,7 @@ msgstr ""
 msgid "Force pseudo-terminal allocation"
 msgstr ""
 
-#: lxc/cluster.go:32
+#: lxc/cluster.go:37
 msgid "Force removing a node, even if degraded"
 msgstr ""
 
@@ -724,7 +724,7 @@ msgstr ""
 msgid "Must supply container name for: "
 msgstr ""
 
-#: lxc/cluster.go:120 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
+#: lxc/cluster.go:160 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
 #: lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
 msgid "NAME"
 msgstr ""
@@ -781,7 +781,7 @@ msgstr ""
 msgid "No fingerprint specified."
 msgstr ""
 
-#: lxc/cluster.go:81
+#: lxc/cluster.go:121
 #, c-format
 msgid "Node %s removed"
 msgstr ""
@@ -1011,7 +1011,7 @@ msgstr ""
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/cluster.go:123 lxc/list.go:469
+#: lxc/cluster.go:163 lxc/list.go:469
 msgid "STATE"
 msgstr ""
 
@@ -1275,7 +1275,7 @@ msgstr ""
 msgid "UPLOAD DATE"
 msgstr ""
 
-#: lxc/cluster.go:121 lxc/remote.go:410
+#: lxc/cluster.go:161 lxc/remote.go:410
 msgid "URL"
 msgstr ""
 
@@ -1309,7 +1309,7 @@ msgstr ""
 msgid "Usage: lxc <command> [options]"
 msgstr ""
 
-#: lxc/cluster.go:19
+#: lxc/cluster.go:21
 msgid ""
 "Usage: lxc cluster <subcommand> [options]\n"
 "\n"
@@ -1318,6 +1318,9 @@ msgid ""
 "lxc cluster list [<remote>:]\n"
 "    List all nodes in the cluster.\n"
 "\n"
+"lxc cluster show [<remote>:]<node>\n"
+"    Show details of a node.\n"
+"\n"
 "lxc cluster delete [<remote>:]<node> [--force]\n"
 "    Delete a node from the cluster."
 msgstr ""
diff --git a/test/suites/clustering.sh b/test/suites/clustering.sh
index 5d240394b..d0ecc5f67 100644
--- a/test/suites/clustering.sh
+++ b/test/suites/clustering.sh
@@ -58,6 +58,9 @@ test_clustering() {
   LXD_DIR="${LXD_TWO_DIR}" lxc cluster list | grep "node4" | grep -q "NO"
   LXD_DIR="${LXD_FIVE_DIR}" lxc cluster list | grep "node5" | grep -q "NO"
 
+  # Show a single node
+  LXD_DIR="${LXD_TWO_DIR}" lxc cluster show node5 | grep -q "node5"
+
   # Shutdown a non-database node, and wait a few seconds so it will be
   # detected as down.
   LXD_DIR="${LXD_FIVE_DIR}" lxd shutdown

From d28ff7889675ae8b656c889497409f490714bce6 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Tue, 14 Nov 2017 10:22:59 +0000
Subject: [PATCH 086/116] Notify new client certificates

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/certificates.go       | 29 ++++++++++++++++++++++++++---
 test/suites/clustering.sh |  5 +++++
 2 files changed, 31 insertions(+), 3 deletions(-)

diff --git a/lxd/certificates.go b/lxd/certificates.go
index e51e6a88f..c2420a381 100644
--- a/lxd/certificates.go
+++ b/lxd/certificates.go
@@ -11,6 +11,7 @@ import (
 
 	"github.com/gorilla/mux"
 
+	lxd "github.com/lxc/lxd/client"
 	"github.com/lxc/lxd/lxd/cluster"
 	"github.com/lxc/lxd/lxd/db"
 	"github.com/lxc/lxd/lxd/util"
@@ -148,9 +149,31 @@ func certificatesPost(d *Daemon, r *http.Request) Response {
 		}
 	}
 
-	err = saveCert(d.cluster, name, cert)
-	if err != nil {
-		return SmartError(err)
+	if !isClusterNotification(r) {
+		// Store the certificate in the cluster database.
+		err = saveCert(d.cluster, name, cert)
+		if err != nil {
+			return SmartError(err)
+		}
+
+		// Notify other nodes about the new certificate.
+		notifier, err := cluster.NewNotifier(
+			d.State(), d.endpoints.NetworkCert(), cluster.NotifyAlive)
+		if err != nil {
+			return SmartError(err)
+		}
+		req := api.CertificatesPost{
+			Certificate: base64.StdEncoding.EncodeToString(cert.Raw),
+		}
+		req.Name = name
+		req.Type = "client"
+
+		err = notifier(func(client lxd.ContainerServer) error {
+			return client.CreateCertificate(req)
+		})
+		if err != nil {
+			return SmartError(err)
+		}
 	}
 
 	d.clientCerts = append(d.clientCerts, *cert)
diff --git a/test/suites/clustering.sh b/test/suites/clustering.sh
index d0ecc5f67..108a80e16 100644
--- a/test/suites/clustering.sh
+++ b/test/suites/clustering.sh
@@ -61,6 +61,11 @@ test_clustering() {
   # Show a single node
   LXD_DIR="${LXD_TWO_DIR}" lxc cluster show node5 | grep -q "node5"
 
+  # Client certificate are shared across all nodes.
+  LXD_DIR="${LXD_ONE_DIR}" lxc remote add cluster 10.1.1.101:8443 --accept-certificate --password=sekret
+  LXD_DIR="${LXD_ONE_DIR}" lxc remote set-url cluster https://10.1.1.102:8443
+  lxc network list cluster: | grep -q "${bridge}"
+
   # Shutdown a non-database node, and wait a few seconds so it will be
   # detected as down.
   LXD_DIR="${LXD_FIVE_DIR}" lxd shutdown

From 9726312cd8ca0711014590f1e651156cf04fbcf3 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Tue, 14 Nov 2017 20:52:45 +0000
Subject: [PATCH 087/116] Add operations table

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/cluster/membership.go     |  2 +-
 lxd/db/cluster/schema.go      |  9 ++++-
 lxd/db/cluster/update.go      | 16 +++++++-
 lxd/db/cluster/update_test.go | 25 +++++++++++++
 lxd/db/containers.go          |  2 +-
 lxd/db/db.go                  | 22 ++++++-----
 lxd/db/images.go              |  2 +-
 lxd/db/networks.go            | 10 ++---
 lxd/db/node.go                |  4 +-
 lxd/db/operations.go          | 87 +++++++++++++++++++++++++++++++++++++++++++
 lxd/db/operations_test.go     | 33 ++++++++++++++++
 lxd/db/storage_pools.go       | 14 +++----
 lxd/db/storage_volumes.go     |  2 +-
 lxd/db/testing.go             |  2 +-
 lxd/db/transaction.go         |  3 +-
 15 files changed, 201 insertions(+), 32 deletions(-)
 create mode 100644 lxd/db/operations.go
 create mode 100644 lxd/db/operations_test.go

diff --git a/lxd/cluster/membership.go b/lxd/cluster/membership.go
index ff6540f95..bde5f3725 100644
--- a/lxd/cluster/membership.go
+++ b/lxd/cluster/membership.go
@@ -299,7 +299,7 @@ func Join(state *state.State, gateway *Gateway, cert *shared.CertInfo, name stri
 		if err != nil {
 			return errors.Wrap(err, "failed to get ID of joining node")
 		}
-		state.Cluster.ID(node.ID)
+		state.Cluster.NodeID(node.ID)
 
 		// Storage pools.
 		ids, err := tx.StoragePoolIDs()
diff --git a/lxd/db/cluster/schema.go b/lxd/db/cluster/schema.go
index ef7d93de5..222760113 100644
--- a/lxd/db/cluster/schema.go
+++ b/lxd/db/cluster/schema.go
@@ -142,6 +142,13 @@ CREATE TABLE nodes (
     UNIQUE (name),
     UNIQUE (address)
 );
+CREATE TABLE operations (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    uuid TEXT NOT NULL,
+    node_id TEXT NOT NULL,
+    UNIQUE (uuid),
+    FOREIGN KEY (node_id) REFERENCES nodes (id) ON DELETE CASCADE
+);
 CREATE TABLE profiles (
     id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
     name TEXT NOT NULL,
@@ -209,5 +216,5 @@ CREATE TABLE storage_volumes_config (
     FOREIGN KEY (node_id) REFERENCES nodes (id) ON DELETE CASCADE
 );
 
-INSERT INTO schema (version, updated_at) VALUES (2, strftime("%s"))
+INSERT INTO schema (version, updated_at) VALUES (3, strftime("%s"))
 `
diff --git a/lxd/db/cluster/update.go b/lxd/db/cluster/update.go
index c41311e6d..92c19aed0 100644
--- a/lxd/db/cluster/update.go
+++ b/lxd/db/cluster/update.go
@@ -25,10 +25,24 @@ var SchemaVersion = len(updates)
 var updates = map[int]schema.Update{
 	1: updateFromV0,
 	2: updateFromV1,
+	3: updateFromV2,
+}
+
+func updateFromV2(tx *sql.Tx) error {
+	stmt := `
+CREATE TABLE operations (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    uuid TEXT NOT NULL,
+    node_id TEXT NOT NULL,
+    UNIQUE (uuid),
+    FOREIGN KEY (node_id) REFERENCES nodes (id) ON DELETE CASCADE
+);
+`
+	_, err := tx.Exec(stmt)
+	return err
 }
 
 func updateFromV1(tx *sql.Tx) error {
-	// config table
 	stmt := `
 CREATE TABLE certificates (
     id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
diff --git a/lxd/db/cluster/update_test.go b/lxd/db/cluster/update_test.go
index b0865c245..646886505 100644
--- a/lxd/db/cluster/update_test.go
+++ b/lxd/db/cluster/update_test.go
@@ -180,3 +180,28 @@ func testConfigTable(t *testing.T, table string, setup func(db *sql.DB)) {
 	require.NoError(t, err)
 	assert.Equal(t, int64(0), n) // The row was already deleted by the previous query
 }
+
+func TestUpdateFromV2(t *testing.T) {
+	schema := cluster.Schema()
+	db, err := schema.ExerciseUpdate(3, nil)
+	require.NoError(t, err)
+
+	_, err = db.Exec("INSERT INTO nodes VALUES (1, 'one', '', '1.1.1.1', 666, 999, ?)", time.Now())
+	require.NoError(t, err)
+
+	_, err = db.Exec("INSERT INTO operations VALUES (1, 'abcd', 1)")
+	require.NoError(t, err)
+
+	// Unique constraint on uuid
+	_, err = db.Exec("INSERT INTO operations VALUES (2, 'abcd', 1)")
+	require.Error(t, err)
+
+	// Cascade delete on node_id
+	_, err = db.Exec("DELETE FROM nodes")
+	require.NoError(t, err)
+	result, err := db.Exec("DELETE FROM operations")
+	require.NoError(t, err)
+	n, err := result.RowsAffected()
+	require.NoError(t, err)
+	assert.Equal(t, int64(0), n)
+}
diff --git a/lxd/db/containers.go b/lxd/db/containers.go
index 337a432a1..1cd19a33f 100644
--- a/lxd/db/containers.go
+++ b/lxd/db/containers.go
@@ -166,7 +166,7 @@ func (c *Cluster) ContainerCreate(args ContainerArgs) (int, error) {
 		return 0, err
 	}
 	defer stmt.Close()
-	result, err := stmt.Exec(c.id, args.Name, args.Architecture, args.Ctype, ephemInt, args.CreationDate.Unix(), args.LastUsedDate.Unix(), statefulInt)
+	result, err := stmt.Exec(c.nodeID, args.Name, args.Architecture, args.Ctype, ephemInt, args.CreationDate.Unix(), args.LastUsedDate.Unix(), statefulInt)
 	if err != nil {
 		tx.Rollback()
 		return 0, err
diff --git a/lxd/db/db.go b/lxd/db/db.go
index 97d1f9173..6bcde122e 100644
--- a/lxd/db/db.go
+++ b/lxd/db/db.go
@@ -140,8 +140,8 @@ func (n *Node) Begin() (*sql.Tx, error) {
 
 // Cluster mediates access to LXD's data stored in the cluster dqlite database.
 type Cluster struct {
-	db *sql.DB // Handle to the cluster dqlite database, gated behind gRPC SQL.
-	id int64   // Node ID of this LXD instance.
+	db     *sql.DB // Handle to the cluster dqlite database, gated behind gRPC SQL.
+	nodeID int64   // Node ID of this LXD instance.
 }
 
 // OpenCluster creates a new Cluster object for interacting with the dqlite
@@ -180,12 +180,12 @@ func OpenCluster(name string, dialer grpcsql.Dialer, address string) (*Cluster,
 		}
 		if len(nodes) == 1 && nodes[0].Address == "0.0.0.0" {
 			// We're not clustered
-			cluster.ID(1)
+			cluster.NodeID(1)
 			return nil
 		}
 		for _, node := range nodes {
 			if node.Address == address {
-				cluster.id = node.ID
+				cluster.nodeID = node.ID
 				return nil
 			}
 		}
@@ -210,7 +210,9 @@ func ForLocalInspection(db *sql.DB) *Cluster {
 // returns no error, all database changes are committed to the cluster database
 // database, otherwise they are rolled back.
 func (c *Cluster) Transaction(f func(*ClusterTx) error) error {
-	clusterTx := &ClusterTx{}
+	clusterTx := &ClusterTx{
+		nodeID: c.nodeID,
+	}
 
 	// FIXME: the retry loop should be configurable.
 	var err error
@@ -229,12 +231,12 @@ func (c *Cluster) Transaction(f func(*ClusterTx) error) error {
 	return err
 }
 
-// ID sets the the node ID associated with this cluster instance. It's used for
+// NodeID sets the the node NodeID associated with this cluster instance. It's used for
 // backward-compatibility of all db-related APIs that were written before
-// clustering and don't accept a node ID, so in those cases we automatically
-// use this value as implict node ID.
-func (c *Cluster) ID(id int64) {
-	c.id = id
+// clustering and don't accept a node NodeID, so in those cases we automatically
+// use this value as implict node NodeID.
+func (c *Cluster) NodeID(id int64) {
+	c.nodeID = id
 }
 
 // Close the database facade.
diff --git a/lxd/db/images.go b/lxd/db/images.go
index e71ef07ac..d183d253d 100644
--- a/lxd/db/images.go
+++ b/lxd/db/images.go
@@ -523,7 +523,7 @@ func (c *Cluster) ImageInsert(fp string, fname string, sz int64, public bool, au
 
 	}
 
-	_, err = tx.Exec("INSERT INTO images_nodes(image_id, node_id) VALUES(?, ?)", id, c.id)
+	_, err = tx.Exec("INSERT INTO images_nodes(image_id, node_id) VALUES(?, ?)", id, c.nodeID)
 	if err != nil {
 		tx.Rollback()
 		return err
diff --git a/lxd/db/networks.go b/lxd/db/networks.go
index 193950b2f..3e9107899 100644
--- a/lxd/db/networks.go
+++ b/lxd/db/networks.go
@@ -112,7 +112,7 @@ func (c *Cluster) NetworkGetInterface(devName string) (int64, *api.Network, erro
 	value := ""
 
 	q := "SELECT networks.id, networks.name, networks_config.value FROM networks LEFT JOIN networks_config ON networks.id=networks_config.network_id WHERE networks_config.key=\"bridge.external_interfaces\" AND networks_config.node_id=?"
-	arg1 := []interface{}{c.id}
+	arg1 := []interface{}{c.nodeID}
 	arg2 := []interface{}{id, name, value}
 	result, err := queryScan(c.db, q, arg1, arg2)
 	if err != nil {
@@ -157,7 +157,7 @@ func (c *Cluster) NetworkConfigGet(id int64) (map[string]string, error) {
         FROM networks_config
 		WHERE network_id=?
                 AND node_id=?`
-	inargs := []interface{}{id, c.id}
+	inargs := []interface{}{id, c.nodeID}
 	outfmt := []interface{}{key, value}
 	results, err := queryScan(c.db, query, inargs, outfmt)
 	if err != nil {
@@ -211,7 +211,7 @@ func (c *Cluster) NetworkCreate(name, description string, config map[string]stri
 		return -1, err
 	}
 
-	err = networkConfigAdd(tx, id, c.id, config)
+	err = networkConfigAdd(tx, id, c.nodeID, config)
 	if err != nil {
 		tx.Rollback()
 		return -1, err
@@ -242,13 +242,13 @@ func (c *Cluster) NetworkUpdate(name, description string, config map[string]stri
 		return err
 	}
 
-	err = NetworkConfigClear(tx, id, c.id)
+	err = NetworkConfigClear(tx, id, c.nodeID)
 	if err != nil {
 		tx.Rollback()
 		return err
 	}
 
-	err = networkConfigAdd(tx, id, c.id, config)
+	err = networkConfigAdd(tx, id, c.nodeID, config)
 	if err != nil {
 		tx.Rollback()
 		return err
diff --git a/lxd/db/node.go b/lxd/db/node.go
index 743a6bd9e..4b42ec1cc 100644
--- a/lxd/db/node.go
+++ b/lxd/db/node.go
@@ -165,7 +165,7 @@ func (c *ClusterTx) NodeIsEmpty(id int64) (bool, error) {
 		return false, nil
 	}
 
-	n, err = query.Count(c.tx, "images", "node_id=?", id)
+	n, err = query.Count(c.tx, "images_nodes", "node_id=?", id)
 	if err != nil {
 		return false, errors.Wrapf(err, "failed to get images count for node %d", id)
 	}
@@ -183,7 +183,7 @@ func (c *ClusterTx) NodeClear(id int64) error {
 		return err
 	}
 
-	_, err = c.tx.Exec("DELETE FROM images WHERE node_id=?", id)
+	_, err = c.tx.Exec("DELETE FROM images_nodes WHERE node_id=?", id)
 	if err != nil {
 		return err
 	}
diff --git a/lxd/db/operations.go b/lxd/db/operations.go
new file mode 100644
index 000000000..bc00e9f47
--- /dev/null
+++ b/lxd/db/operations.go
@@ -0,0 +1,87 @@
+package db
+
+import (
+	"fmt"
+
+	"github.com/lxc/lxd/lxd/db/query"
+	"github.com/pkg/errors"
+)
+
+// Operation holds information about a single LXD operation running on a node
+// in the cluster.
+type Operation struct {
+	ID          int64  // Stable database identifier
+	UUID        string // User-visible identifier
+	NodeAddress string // Address of the node the operation is running on
+}
+
+// OperationsUUIDs returns the UUIDs of all operations associated with this
+// node.
+func (c *ClusterTx) OperationsUUIDs() ([]string, error) {
+	stmt := "SELECT uuid FROM operations WHERE node_id=?"
+	return query.SelectStrings(c.tx, stmt, c.nodeID)
+}
+
+// OperationByUUID returns the operation with the given UUID.
+func (c *ClusterTx) OperationByUUID(uuid string) (Operation, error) {
+	null := Operation{}
+	operations, err := c.operations("uuid=?", uuid)
+	if err != nil {
+		return null, err
+	}
+	switch len(operations) {
+	case 0:
+		return null, NoSuchObjectError
+	case 1:
+		return operations[0], nil
+	default:
+		return null, fmt.Errorf("more than one node matches")
+	}
+}
+
+// OperationAdd adds a new operations to the table.
+func (c *ClusterTx) OperationAdd(uuid string) (int64, error) {
+	columns := []string{"uuid", "node_id"}
+	values := []interface{}{uuid, c.nodeID}
+	return query.UpsertObject(c.tx, "operations", columns, values)
+}
+
+// OperationRemove removes the operation with the given UUID.
+func (c *ClusterTx) OperationRemove(uuid string) error {
+	result, err := c.tx.Exec("DELETE FROM operations WHERE uuid=?", uuid)
+	if err != nil {
+		return err
+	}
+	n, err := result.RowsAffected()
+	if err != nil {
+		return err
+	}
+	if n != 1 {
+		return fmt.Errorf("query deleted %d rows instead of 1", n)
+	}
+	return nil
+}
+
+// Operations returns all operations in the cluster, filtered by the given clause.
+func (c *ClusterTx) operations(where string, args ...interface{}) ([]Operation, error) {
+	operations := []Operation{}
+	dest := func(i int) []interface{} {
+		operations = append(operations, Operation{})
+		return []interface{}{
+			&operations[i].ID,
+			&operations[i].UUID,
+			&operations[i].NodeAddress,
+		}
+	}
+	stmt := `
+SELECT operations.id, uuid, nodes.address FROM operations JOIN nodes ON nodes.id = node_id `
+	if where != "" {
+		stmt += fmt.Sprintf("WHERE %s ", where)
+	}
+	stmt += "ORDER BY operations.id"
+	err := query.SelectObjects(c.tx, dest, stmt, args...)
+	if err != nil {
+		return nil, errors.Wrap(err, "failed to fecth operations")
+	}
+	return operations, nil
+}
diff --git a/lxd/db/operations_test.go b/lxd/db/operations_test.go
new file mode 100644
index 000000000..896304bea
--- /dev/null
+++ b/lxd/db/operations_test.go
@@ -0,0 +1,33 @@
+package db_test
+
+import (
+	"testing"
+
+	"github.com/lxc/lxd/lxd/db"
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
+)
+
+// Add, get and remove an operation.
+func TestOperation(t *testing.T) {
+	tx, cleanup := db.NewTestClusterTx(t)
+	defer cleanup()
+
+	id, err := tx.OperationAdd("abcd")
+	require.NoError(t, err)
+	assert.Equal(t, int64(1), id)
+
+	operation, err := tx.OperationByUUID("abcd")
+	require.NoError(t, err)
+	assert.Equal(t, id, operation.ID)
+
+	uuids, err := tx.OperationsUUIDs()
+	require.NoError(t, err)
+	assert.Equal(t, []string{"abcd"}, uuids)
+
+	err = tx.OperationRemove("abcd")
+	require.NoError(t, err)
+
+	_, err = tx.OperationByUUID("abcd")
+	assert.Equal(t, db.NoSuchObjectError, err)
+}
diff --git a/lxd/db/storage_pools.go b/lxd/db/storage_pools.go
index ad2fad60e..a32263e24 100644
--- a/lxd/db/storage_pools.go
+++ b/lxd/db/storage_pools.go
@@ -164,7 +164,7 @@ func (c *Cluster) StoragePoolGet(poolName string) (int64, *api.StoragePool, erro
 func (c *Cluster) StoragePoolConfigGet(poolID int64) (map[string]string, error) {
 	var key, value string
 	query := "SELECT key, value FROM storage_pools_config WHERE storage_pool_id=? AND (node_id=? OR node_id IS NULL)"
-	inargs := []interface{}{poolID, c.id}
+	inargs := []interface{}{poolID, c.nodeID}
 	outargs := []interface{}{key, value}
 
 	results, err := queryScan(c.db, query, inargs, outargs)
@@ -203,7 +203,7 @@ func (c *Cluster) StoragePoolCreate(poolName string, poolDescription string, poo
 		return -1, err
 	}
 
-	err = storagePoolConfigAdd(tx, id, c.id, poolConfig)
+	err = storagePoolConfigAdd(tx, id, c.nodeID, poolConfig)
 	if err != nil {
 		tx.Rollback()
 		return -1, err
@@ -264,13 +264,13 @@ func (c *Cluster) StoragePoolUpdate(poolName, description string, poolConfig map
 		return err
 	}
 
-	err = StoragePoolConfigClear(tx, poolID, c.id)
+	err = StoragePoolConfigClear(tx, poolID, c.nodeID)
 	if err != nil {
 		tx.Rollback()
 		return err
 	}
 
-	err = storagePoolConfigAdd(tx, poolID, c.id, poolConfig)
+	err = storagePoolConfigAdd(tx, poolID, c.nodeID, poolConfig)
 	if err != nil {
 		tx.Rollback()
 		return err
@@ -420,13 +420,13 @@ func (c *Cluster) StoragePoolVolumeUpdate(volumeName string, volumeType int, poo
 		return err
 	}
 
-	err = StorageVolumeConfigClear(tx, volumeID, c.id)
+	err = StorageVolumeConfigClear(tx, volumeID, c.nodeID)
 	if err != nil {
 		tx.Rollback()
 		return err
 	}
 
-	err = StorageVolumeConfigAdd(tx, volumeID, c.id, volumeConfig)
+	err = StorageVolumeConfigAdd(tx, volumeID, c.nodeID, volumeConfig)
 	if err != nil {
 		tx.Rollback()
 		return err
@@ -497,7 +497,7 @@ func (c *Cluster) StoragePoolVolumeCreate(volumeName, volumeDescription string,
 		return -1, err
 	}
 
-	err = StorageVolumeConfigAdd(tx, volumeID, c.id, volumeConfig)
+	err = StorageVolumeConfigAdd(tx, volumeID, c.nodeID, volumeConfig)
 	if err != nil {
 		tx.Rollback()
 		return -1, err
diff --git a/lxd/db/storage_volumes.go b/lxd/db/storage_volumes.go
index 95d164234..e40bc11cb 100644
--- a/lxd/db/storage_volumes.go
+++ b/lxd/db/storage_volumes.go
@@ -12,7 +12,7 @@ import (
 func (c *Cluster) StorageVolumeConfigGet(volumeID int64) (map[string]string, error) {
 	var key, value string
 	query := "SELECT key, value FROM storage_volumes_config WHERE storage_volume_id=? AND node_id=?"
-	inargs := []interface{}{volumeID, c.id}
+	inargs := []interface{}{volumeID, c.nodeID}
 	outargs := []interface{}{key, value}
 
 	results, err := queryScan(c.db, query, inargs, outargs)
diff --git a/lxd/db/testing.go b/lxd/db/testing.go
index 9f819f5b0..0950156a3 100644
--- a/lxd/db/testing.go
+++ b/lxd/db/testing.go
@@ -74,7 +74,7 @@ func NewTestClusterTx(t *testing.T) (*ClusterTx, func()) {
 
 	var err error
 
-	clusterTx := &ClusterTx{}
+	clusterTx := &ClusterTx{nodeID: cluster.nodeID}
 	clusterTx.tx, err = cluster.db.Begin()
 	require.NoError(t, err)
 
diff --git a/lxd/db/transaction.go b/lxd/db/transaction.go
index de30c11f7..8220bf8d5 100644
--- a/lxd/db/transaction.go
+++ b/lxd/db/transaction.go
@@ -25,5 +25,6 @@ func (n *NodeTx) Tx() *sql.Tx {
 // It wraps low-level sql.Tx objects and offers a high-level API to fetch and
 // update data.
 type ClusterTx struct {
-	tx *sql.Tx // Handle to a transaction in the cluster dqlite database.
+	tx     *sql.Tx // Handle to a transaction in the cluster dqlite database.
+	nodeID int64   // Node ID of this LXD instance.
 }

From e1ac25247f44e2fe9b4347a3939be6e195261fec Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Wed, 15 Nov 2017 11:20:07 +0000
Subject: [PATCH 088/116] Track operations in the cluster database table

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 client/interfaces.go      |  2 +-
 client/lxd_cluster.go     |  9 +++------
 lxc/cluster.go            |  7 +------
 lxd/api_cluster.go        | 14 +++++---------
 lxd/api_cluster_test.go   |  3 +--
 lxd/cluster/membership.go | 17 ++++++++++++++++-
 lxd/container_console.go  |  2 +-
 lxd/container_delete.go   |  2 +-
 lxd/container_exec.go     |  4 ++--
 lxd/container_post.go     |  6 +++---
 lxd/container_put.go      |  2 +-
 lxd/container_snapshot.go | 11 ++++++-----
 lxd/container_state.go    |  2 +-
 lxd/containers_post.go    | 10 +++++-----
 lxd/daemon_images_test.go |  2 +-
 lxd/images.go             |  8 ++++----
 lxd/migrate.go            |  3 ++-
 lxd/operations.go         | 22 +++++++++++++++++++++-
 po/de.po                  | 12 ++++++------
 po/el.po                  | 12 ++++++------
 po/fr.po                  | 12 ++++++------
 po/id.po                  | 12 ++++++------
 po/it.po                  | 12 ++++++------
 po/ja.po                  | 12 ++++++------
 po/lxd.pot                | 12 ++++++------
 po/nb_NO.po               | 12 ++++++------
 po/nl.po                  | 12 ++++++------
 po/pt_BR.po               | 12 ++++++------
 po/ru.po                  | 12 ++++++------
 po/sr.po                  | 12 ++++++------
 po/sv.po                  | 12 ++++++------
 po/tr.po                  | 12 ++++++------
 po/zh.po                  | 12 ++++++------
 po/zh_Hans.po             | 12 ++++++------
 34 files changed, 171 insertions(+), 147 deletions(-)

diff --git a/client/interfaces.go b/client/interfaces.go
index c45c09e3b..4ab4a8843 100644
--- a/client/interfaces.go
+++ b/client/interfaces.go
@@ -165,7 +165,7 @@ type ContainerServer interface {
 	BootstrapCluster(name string) (op *Operation, err error)
 	AcceptNode(targetPassword, name, address string, schema, api int) (info *api.ClusterNodeAccepted, err error)
 	JoinCluster(targetAddress, targetPassword, targetCert, name string) (op *Operation, err error)
-	LeaveCluster(name string, force bool) (op *Operation, err error)
+	LeaveCluster(name string, force bool) (err error)
 	GetNodes() (nodes []api.Node, err error)
 	GetNode(name string) (node *api.Node, err error)
 
diff --git a/client/lxd_cluster.go b/client/lxd_cluster.go
index 1bb302dc9..3c12da1d0 100644
--- a/client/lxd_cluster.go
+++ b/client/lxd_cluster.go
@@ -72,17 +72,14 @@ func (r *ProtocolLXD) JoinCluster(targetAddress, targetPassword, targetCert, nam
 
 // LeaveCluster makes the given node leave the cluster (gracefully or not,
 // depending on the force flag).
-func (r *ProtocolLXD) LeaveCluster(name string, force bool) (*Operation, error) {
+func (r *ProtocolLXD) LeaveCluster(name string, force bool) error {
 	params := ""
 	if force {
 		params += "?force=1"
 	}
 	url := fmt.Sprintf("/cluster/nodes/%s%s", name, params)
-	op, _, err := r.queryOperation("DELETE", url, nil, "")
-	if err != nil {
-		return nil, err
-	}
-	return op, nil
+	_, err := r.queryStruct("DELETE", url, nil, "", nil)
+	return err
 }
 
 // GetNodes returns the current nodes in the cluster.
diff --git a/lxc/cluster.go b/lxc/cluster.go
index bca261ed6..041bb7c7f 100644
--- a/lxc/cluster.go
+++ b/lxc/cluster.go
@@ -108,16 +108,11 @@ func (c *clusterCmd) doClusterNodeDelete(conf *config.Config, args []string) err
 		return err
 	}
 
-	op, err := client.LeaveCluster(name, c.force)
+	err = client.LeaveCluster(name, c.force)
 	if err != nil {
 		return err
 	}
 
-	err = op.Wait()
-	if err != nil {
-		return nil
-	}
-
 	fmt.Printf(i18n.G("Node %s removed")+"\n", name)
 	return nil
 }
diff --git a/lxd/api_cluster.go b/lxd/api_cluster.go
index 24e3776bd..30de8bb4d 100644
--- a/lxd/api_cluster.go
+++ b/lxd/api_cluster.go
@@ -157,7 +157,7 @@ func clusterNodesPostBootstrap(d *Daemon, req api.ClusterPost) Response {
 	resources := map[string][]string{}
 	resources["cluster"] = []string{}
 
-	op, err := operationCreate(operationClassTask, resources, nil, run, nil, nil)
+	op, err := operationCreate(d.cluster, operationClassTask, resources, nil, run, nil, nil)
 	if err != nil {
 		return InternalError(err)
 	}
@@ -267,7 +267,7 @@ func clusterNodesPostJoin(d *Daemon, req api.ClusterPost) Response {
 	resources := map[string][]string{}
 	resources["cluster"] = []string{}
 
-	op, err := operationCreate(operationClassTask, resources, nil, run, nil, nil)
+	op, err := operationCreate(d.cluster, operationClassTask, resources, nil, run, nil, nil)
 	if err != nil {
 		return InternalError(err)
 	}
@@ -379,13 +379,9 @@ func clusterNodeDelete(d *Daemon, r *http.Request) Response {
 		}
 	}
 
-	resources := map[string][]string{}
-	resources["cluster"] = []string{}
-
-	op, err := operationCreate(operationClassTask, resources, nil, run, nil, nil)
+	err = run(nil)
 	if err != nil {
-		return InternalError(err)
+		return SmartError(err)
 	}
-
-	return OperationResponse(op)
+	return EmptySyncResponse
 }
diff --git a/lxd/api_cluster_test.go b/lxd/api_cluster_test.go
index 8d7c3a944..a6d18e7cf 100644
--- a/lxd/api_cluster_test.go
+++ b/lxd/api_cluster_test.go
@@ -162,9 +162,8 @@ func TestCluster_Leave(t *testing.T) {
 	f.FormCluster(daemons)
 
 	client := f.ClientUnix(daemons[1])
-	op, err := client.LeaveCluster("rusp-0", false)
+	err := client.LeaveCluster("rusp-0", false)
 	require.NoError(t, err)
-	assert.NoError(t, op.Wait())
 }
 
 // Test helper for cluster-related APIs.
diff --git a/lxd/cluster/membership.go b/lxd/cluster/membership.go
index bde5f3725..68357c8ba 100644
--- a/lxd/cluster/membership.go
+++ b/lxd/cluster/membership.go
@@ -229,9 +229,12 @@ func Join(state *state.State, gateway *Gateway, cert *shared.CertInfo, name stri
 
 	// Get the local config keys for the cluster networks. It assumes that
 	// the local storage pools and networks match the cluster networks, if
-	// not an error will be returned.
+	// not an error will be returned. Also get any outstanding operation,
+	// typically there will be just one, created by the POST /cluster/nodes
+	// request which triggered this code.
 	var pools map[string]map[string]string
 	var networks map[string]map[string]string
+	var operations []string
 	err = state.Cluster.Transaction(func(tx *db.ClusterTx) error {
 		pools, err = tx.StoragePoolConfigs()
 		if err != nil {
@@ -241,6 +244,10 @@ func Join(state *state.State, gateway *Gateway, cert *shared.CertInfo, name stri
 		if err != nil {
 			return err
 		}
+		operations, err = tx.OperationsUUIDs()
+		if err != nil {
+			return err
+		}
 		return nil
 	})
 	if err != nil {
@@ -335,6 +342,14 @@ func Join(state *state.State, gateway *Gateway, cert *shared.CertInfo, name stri
 				return errors.Wrap(err, "failed to add joining node's network config")
 			}
 		}
+
+		// Migrate outstanding operations.
+		for _, uuid := range operations {
+			_, err := tx.OperationAdd(uuid)
+			if err != nil {
+				return err
+			}
+		}
 		return nil
 	})
 	if err != nil {
diff --git a/lxd/container_console.go b/lxd/container_console.go
index 07ef18b4f..f31a57043 100644
--- a/lxd/container_console.go
+++ b/lxd/container_console.go
@@ -310,7 +310,7 @@ func containerConsolePost(d *Daemon, r *http.Request) Response {
 	resources := map[string][]string{}
 	resources["containers"] = []string{ws.container.Name()}
 
-	op, err := operationCreate(operationClassWebsocket, resources,
+	op, err := operationCreate(d.cluster, operationClassWebsocket, resources,
 		ws.Metadata(), ws.Do, nil, ws.Connect)
 	if err != nil {
 		return InternalError(err)
diff --git a/lxd/container_delete.go b/lxd/container_delete.go
index a98e6051d..c0226f349 100644
--- a/lxd/container_delete.go
+++ b/lxd/container_delete.go
@@ -25,7 +25,7 @@ func containerDelete(d *Daemon, r *http.Request) Response {
 	resources := map[string][]string{}
 	resources["containers"] = []string{name}
 
-	op, err := operationCreate(operationClassTask, resources, nil, rmct, nil, nil)
+	op, err := operationCreate(d.cluster, operationClassTask, resources, nil, rmct, nil, nil)
 	if err != nil {
 		return InternalError(err)
 	}
diff --git a/lxd/container_exec.go b/lxd/container_exec.go
index f75dc348a..587312406 100644
--- a/lxd/container_exec.go
+++ b/lxd/container_exec.go
@@ -435,7 +435,7 @@ func containerExecPost(d *Daemon, r *http.Request) Response {
 		resources := map[string][]string{}
 		resources["containers"] = []string{ws.container.Name()}
 
-		op, err := operationCreate(operationClassWebsocket, resources, ws.Metadata(), ws.Do, nil, ws.Connect)
+		op, err := operationCreate(d.cluster, operationClassWebsocket, resources, ws.Metadata(), ws.Do, nil, ws.Connect)
 		if err != nil {
 			return InternalError(err)
 		}
@@ -487,7 +487,7 @@ func containerExecPost(d *Daemon, r *http.Request) Response {
 	resources := map[string][]string{}
 	resources["containers"] = []string{name}
 
-	op, err := operationCreate(operationClassTask, resources, nil, run, nil, nil)
+	op, err := operationCreate(d.cluster, operationClassTask, resources, nil, run, nil, nil)
 	if err != nil {
 		return InternalError(err)
 	}
diff --git a/lxd/container_post.go b/lxd/container_post.go
index 25e1fd04b..7c18c5c38 100644
--- a/lxd/container_post.go
+++ b/lxd/container_post.go
@@ -62,7 +62,7 @@ func containerPost(d *Daemon, r *http.Request) Response {
 				return InternalError(err)
 			}
 
-			op, err := operationCreate(operationClassTask, resources, nil, ws.Do, nil, nil)
+			op, err := operationCreate(d.cluster, operationClassTask, resources, nil, ws.Do, nil, nil)
 			if err != nil {
 				return InternalError(err)
 			}
@@ -71,7 +71,7 @@ func containerPost(d *Daemon, r *http.Request) Response {
 		}
 
 		// Pull mode
-		op, err := operationCreate(operationClassWebsocket, resources, ws.Metadata(), ws.Do, nil, ws.Connect)
+		op, err := operationCreate(d.cluster, operationClassWebsocket, resources, ws.Metadata(), ws.Do, nil, ws.Connect)
 		if err != nil {
 			return InternalError(err)
 		}
@@ -92,7 +92,7 @@ func containerPost(d *Daemon, r *http.Request) Response {
 	resources := map[string][]string{}
 	resources["containers"] = []string{name}
 
-	op, err := operationCreate(operationClassTask, resources, nil, run, nil, nil)
+	op, err := operationCreate(d.cluster, operationClassTask, resources, nil, run, nil, nil)
 	if err != nil {
 		return InternalError(err)
 	}
diff --git a/lxd/container_put.go b/lxd/container_put.go
index 02a05c643..93cba1d0b 100644
--- a/lxd/container_put.go
+++ b/lxd/container_put.go
@@ -75,7 +75,7 @@ func containerPut(d *Daemon, r *http.Request) Response {
 	resources := map[string][]string{}
 	resources["containers"] = []string{name}
 
-	op, err := operationCreate(operationClassTask, resources, nil, do, nil, nil)
+	op, err := operationCreate(d.cluster, operationClassTask, resources, nil, do, nil, nil)
 	if err != nil {
 		return InternalError(err)
 	}
diff --git a/lxd/container_snapshot.go b/lxd/container_snapshot.go
index 94ca9cbd8..e42b1a6d5 100644
--- a/lxd/container_snapshot.go
+++ b/lxd/container_snapshot.go
@@ -120,7 +120,7 @@ func containerSnapshotsPost(d *Daemon, r *http.Request) Response {
 	resources := map[string][]string{}
 	resources["containers"] = []string{name}
 
-	op, err := operationCreate(operationClassTask, resources, nil, snapshot, nil, nil)
+	op, err := operationCreate(d.cluster, operationClassTask, resources, nil, snapshot, nil, nil)
 	if err != nil {
 		return InternalError(err)
 	}
@@ -222,7 +222,7 @@ func snapshotPost(d *Daemon, r *http.Request, sc container, containerName string
 				return InternalError(err)
 			}
 
-			op, err := operationCreate(operationClassTask, resources, nil, ws.Do, nil, nil)
+			op, err := operationCreate(d.cluster, operationClassTask, resources, nil, ws.Do, nil, nil)
 			if err != nil {
 				return InternalError(err)
 			}
@@ -231,7 +231,7 @@ func snapshotPost(d *Daemon, r *http.Request, sc container, containerName string
 		}
 
 		// Pull mode
-		op, err := operationCreate(operationClassWebsocket, resources, ws.Metadata(), ws.Do, nil, ws.Connect)
+		op, err := operationCreate(d.cluster, operationClassWebsocket, resources, ws.Metadata(), ws.Do, nil, ws.Connect)
 		if err != nil {
 			return InternalError(err)
 		}
@@ -259,7 +259,7 @@ func snapshotPost(d *Daemon, r *http.Request, sc container, containerName string
 	resources := map[string][]string{}
 	resources["containers"] = []string{containerName}
 
-	op, err := operationCreate(operationClassTask, resources, nil, rename, nil, nil)
+	op, err := operationCreate(d.cluster, operationClassTask, resources, nil, rename, nil, nil)
 	if err != nil {
 		return InternalError(err)
 	}
@@ -275,7 +275,8 @@ func snapshotDelete(sc container, name string) Response {
 	resources := map[string][]string{}
 	resources["containers"] = []string{sc.Name()}
 
-	op, err := operationCreate(operationClassTask, resources, nil, remove, nil, nil)
+	state := sc.DaemonState()
+	op, err := operationCreate(state.Cluster, operationClassTask, resources, nil, remove, nil, nil)
 	if err != nil {
 		return InternalError(err)
 	}
diff --git a/lxd/container_state.go b/lxd/container_state.go
index 039b8fdb0..306fbca74 100644
--- a/lxd/container_state.go
+++ b/lxd/container_state.go
@@ -159,7 +159,7 @@ func containerStatePut(d *Daemon, r *http.Request) Response {
 	resources := map[string][]string{}
 	resources["containers"] = []string{name}
 
-	op, err := operationCreate(operationClassTask, resources, nil, do, nil, nil)
+	op, err := operationCreate(d.cluster, operationClassTask, resources, nil, do, nil, nil)
 	if err != nil {
 		return InternalError(err)
 	}
diff --git a/lxd/containers_post.go b/lxd/containers_post.go
index 666a3fbc2..5f33235b8 100644
--- a/lxd/containers_post.go
+++ b/lxd/containers_post.go
@@ -126,7 +126,7 @@ func createFromImage(d *Daemon, req *api.ContainersPost) Response {
 	resources := map[string][]string{}
 	resources["containers"] = []string{req.Name}
 
-	op, err := operationCreate(operationClassTask, resources, nil, run, nil, nil)
+	op, err := operationCreate(d.cluster, operationClassTask, resources, nil, run, nil, nil)
 	if err != nil {
 		return InternalError(err)
 	}
@@ -160,7 +160,7 @@ func createFromNone(d *Daemon, req *api.ContainersPost) Response {
 	resources := map[string][]string{}
 	resources["containers"] = []string{req.Name}
 
-	op, err := operationCreate(operationClassTask, resources, nil, run, nil, nil)
+	op, err := operationCreate(d.cluster, operationClassTask, resources, nil, run, nil, nil)
 	if err != nil {
 		return InternalError(err)
 	}
@@ -407,12 +407,12 @@ func createFromMigration(d *Daemon, req *api.ContainersPost) Response {
 
 	var op *operation
 	if push {
-		op, err = operationCreate(operationClassWebsocket, resources, sink.Metadata(), run, nil, sink.Connect)
+		op, err = operationCreate(d.cluster, operationClassWebsocket, resources, sink.Metadata(), run, nil, sink.Connect)
 		if err != nil {
 			return InternalError(err)
 		}
 	} else {
-		op, err = operationCreate(operationClassTask, resources, nil, run, nil, nil)
+		op, err = operationCreate(d.cluster, operationClassTask, resources, nil, run, nil, nil)
 		if err != nil {
 			return InternalError(err)
 		}
@@ -507,7 +507,7 @@ func createFromCopy(d *Daemon, req *api.ContainersPost) Response {
 	resources := map[string][]string{}
 	resources["containers"] = []string{req.Name, req.Source.Source}
 
-	op, err := operationCreate(operationClassTask, resources, nil, run, nil, nil)
+	op, err := operationCreate(d.cluster, operationClassTask, resources, nil, run, nil, nil)
 	if err != nil {
 		return InternalError(err)
 	}
diff --git a/lxd/daemon_images_test.go b/lxd/daemon_images_test.go
index 68683dde7..cc91165f6 100644
--- a/lxd/daemon_images_test.go
+++ b/lxd/daemon_images_test.go
@@ -39,7 +39,7 @@ func (suite *daemonImagesTestSuite) TestUseCachedImagesIfAvailable() {
 
 	// Request an image with alias "test" and check that it's the
 	// one we created above.
-	op, err := operationCreate(operationClassTask, map[string][]string{}, nil, nil, nil, nil)
+	op, err := operationCreate(suite.d.cluster, operationClassTask, map[string][]string{}, nil, nil, nil, nil)
 	suite.Req.Nil(err)
 	image, err := suite.d.ImageDownload(op, "img.srv", "simplestreams", "", "", "test", false, false, "", true)
 	suite.Req.Nil(err)
diff --git a/lxd/images.go b/lxd/images.go
index 5697637a4..62ebba8a5 100644
--- a/lxd/images.go
+++ b/lxd/images.go
@@ -765,7 +765,7 @@ func imagesPost(d *Daemon, r *http.Request) Response {
 		return nil
 	}
 
-	op, err := operationCreate(operationClassTask, nil, nil, run, nil, nil)
+	op, err := operationCreate(d.cluster, operationClassTask, nil, nil, run, nil, nil)
 	if err != nil {
 		return InternalError(err)
 	}
@@ -1237,7 +1237,7 @@ func imageDelete(d *Daemon, r *http.Request) Response {
 	resources := map[string][]string{}
 	resources["images"] = []string{fingerprint}
 
-	op, err := operationCreate(operationClassTask, resources, nil, rmimg, nil, nil)
+	op, err := operationCreate(d.cluster, operationClassTask, resources, nil, rmimg, nil, nil)
 	if err != nil {
 		return InternalError(err)
 	}
@@ -1671,7 +1671,7 @@ func imageSecret(d *Daemon, r *http.Request) Response {
 	resources := map[string][]string{}
 	resources["images"] = []string{imgInfo.Fingerprint}
 
-	op, err := operationCreate(operationClassToken, resources, meta, nil, nil, nil)
+	op, err := operationCreate(d.cluster, operationClassToken, resources, meta, nil, nil, nil)
 	if err != nil {
 		return InternalError(err)
 	}
@@ -1691,7 +1691,7 @@ func imageRefresh(d *Daemon, r *http.Request) Response {
 		return autoUpdateImage(d, op, imageId, imageInfo)
 	}
 
-	op, err := operationCreate(operationClassTask, nil, nil, run, nil, nil)
+	op, err := operationCreate(d.cluster, operationClassTask, nil, nil, run, nil, nil)
 	if err != nil {
 		return InternalError(err)
 	}
diff --git a/lxd/migrate.go b/lxd/migrate.go
index e7789fddb..d19886bd8 100644
--- a/lxd/migrate.go
+++ b/lxd/migrate.go
@@ -699,7 +699,9 @@ func (s *migrationSourceWs) Do(migrateOp *operation) error {
 				return abort(err)
 			}
 
+			state := s.container.DaemonState()
 			actionScriptOp, err := operationCreate(
+				state.Cluster,
 				operationClassWebsocket,
 				nil,
 				nil,
@@ -737,7 +739,6 @@ func (s *migrationSourceWs) Do(migrateOp *operation) error {
 				return abort(err)
 			}
 
-			state := s.container.DaemonState()
 			err = writeActionScript(checkpointDir, actionScriptOp.url, actionScriptOpSecret, state.OS.ExecPath)
 			if err != nil {
 				os.RemoveAll(checkpointDir)
diff --git a/lxd/operations.go b/lxd/operations.go
index dc0388fbd..dd428422a 100644
--- a/lxd/operations.go
+++ b/lxd/operations.go
@@ -10,7 +10,9 @@ import (
 
 	"github.com/gorilla/mux"
 	"github.com/pborman/uuid"
+	"github.com/pkg/errors"
 
+	"github.com/lxc/lxd/lxd/db"
 	"github.com/lxc/lxd/lxd/util"
 	"github.com/lxc/lxd/shared"
 	"github.com/lxc/lxd/shared/api"
@@ -61,6 +63,8 @@ type operation struct {
 
 	// Locking for concurent access to the operation
 	lock sync.Mutex
+
+	cluster *db.Cluster
 }
 
 func (op *operation) done() {
@@ -87,6 +91,13 @@ func (op *operation) done() {
 		delete(operations, op.id)
 		operationsLock.Unlock()
 
+		err := op.cluster.Transaction(func(tx *db.ClusterTx) error {
+			return tx.OperationRemove(op.id)
+		})
+		if err != nil {
+			logger.Warnf("Failed to delete operation %s: %s", op.id, err)
+		}
+
 		/*
 		 * When we create a new lxc.Container, it adds a finalizer (via
 		 * SetFinalizer) that frees the struct. However, it sometimes
@@ -372,7 +383,7 @@ func (op *operation) UpdateMetadata(opMetadata interface{}) error {
 	return nil
 }
 
-func operationCreate(opClass operationClass, opResources map[string][]string, opMetadata interface{}, onRun func(*operation) error, onCancel func(*operation) error, onConnect func(*operation, *http.Request, http.ResponseWriter) error) (*operation, error) {
+func operationCreate(cluster *db.Cluster, opClass operationClass, opResources map[string][]string, opMetadata interface{}, onRun func(*operation) error, onCancel func(*operation) error, onConnect func(*operation, *http.Request, http.ResponseWriter) error) (*operation, error) {
 	// Main attributes
 	op := operation{}
 	op.id = uuid.NewRandom().String()
@@ -383,6 +394,7 @@ func operationCreate(opClass operationClass, opResources map[string][]string, op
 	op.url = fmt.Sprintf("/%s/operations/%s", version.APIVersion, op.id)
 	op.resources = opResources
 	op.chanDone = make(chan error)
+	op.cluster = cluster
 
 	newMetadata, err := shared.ParseMetadata(opMetadata)
 	if err != nil {
@@ -416,6 +428,14 @@ func operationCreate(opClass operationClass, opResources map[string][]string, op
 	operations[op.id] = &op
 	operationsLock.Unlock()
 
+	err = op.cluster.Transaction(func(tx *db.ClusterTx) error {
+		_, err := tx.OperationAdd(op.id)
+		return err
+	})
+	if err != nil {
+		return nil, errors.Wrap(err, "failed to add operation to database")
+	}
+
 	logger.Debugf("New %s operation: %s", op.class.String(), op.id)
 	_, md, _ := op.Render()
 	eventSend("operation", md)
diff --git a/po/de.po b/po/de.po
index bc095d70e..3e707ae5d 100644
--- a/po/de.po
+++ b/po/de.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: LXD\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2017-12-04 08:58+0000\n"
+"POT-Creation-Date: 2017-12-04 08:59+0000\n"
 "PO-Revision-Date: 2017-02-14 17:11+0000\n"
 "Last-Translator: Tim Rose <tim at netlope.de>\n"
 "Language-Team: German <https://hosted.weblate.org/projects/linux-containers/"
@@ -498,7 +498,7 @@ msgstr ""
 msgid "Creating the container"
 msgstr "kann nicht zum selben Container Namen kopieren"
 
-#: lxc/cluster.go:162
+#: lxc/cluster.go:157
 msgid "DATABASE"
 msgstr ""
 
@@ -857,7 +857,7 @@ msgstr ""
 msgid "Must supply container name for: "
 msgstr "der Name des Ursprung Containers muss angegeben werden"
 
-#: lxc/cluster.go:160 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
+#: lxc/cluster.go:155 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
 #: lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
 msgid "NAME"
 msgstr ""
@@ -918,7 +918,7 @@ msgstr "Kein Zertifikat für diese Verbindung"
 msgid "No fingerprint specified."
 msgstr "Kein Fingerabdruck angegeben."
 
-#: lxc/cluster.go:121
+#: lxc/cluster.go:116
 #, fuzzy, c-format
 msgid "Node %s removed"
 msgstr "Gerät %s wurde von %s entfernt\n"
@@ -1156,7 +1156,7 @@ msgstr ""
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/cluster.go:163 lxc/list.go:469
+#: lxc/cluster.go:158 lxc/list.go:469
 msgid "STATE"
 msgstr ""
 
@@ -1432,7 +1432,7 @@ msgstr ""
 msgid "UPLOAD DATE"
 msgstr ""
 
-#: lxc/cluster.go:161 lxc/remote.go:410
+#: lxc/cluster.go:156 lxc/remote.go:410
 msgid "URL"
 msgstr ""
 
diff --git a/po/el.po b/po/el.po
index 641184cdc..151cc99c6 100644
--- a/po/el.po
+++ b/po/el.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: lxd\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2017-12-04 08:58+0000\n"
+"POT-Creation-Date: 2017-12-04 08:59+0000\n"
 "PO-Revision-Date: 2017-02-14 08:00+0000\n"
 "Last-Translator: Simos Xenitellis <simos.65 at gmail.com>\n"
 "Language-Team: Greek <https://hosted.weblate.org/projects/linux-containers/"
@@ -384,7 +384,7 @@ msgstr ""
 msgid "Creating the container"
 msgstr ""
 
-#: lxc/cluster.go:162
+#: lxc/cluster.go:157
 msgid "DATABASE"
 msgstr ""
 
@@ -730,7 +730,7 @@ msgstr ""
 msgid "Must supply container name for: "
 msgstr ""
 
-#: lxc/cluster.go:160 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
+#: lxc/cluster.go:155 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
 #: lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
 msgid "NAME"
 msgstr ""
@@ -788,7 +788,7 @@ msgstr ""
 msgid "No fingerprint specified."
 msgstr ""
 
-#: lxc/cluster.go:121
+#: lxc/cluster.go:116
 #, c-format
 msgid "Node %s removed"
 msgstr ""
@@ -1018,7 +1018,7 @@ msgstr ""
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/cluster.go:163 lxc/list.go:469
+#: lxc/cluster.go:158 lxc/list.go:469
 msgid "STATE"
 msgstr ""
 
@@ -1282,7 +1282,7 @@ msgstr ""
 msgid "UPLOAD DATE"
 msgstr ""
 
-#: lxc/cluster.go:161 lxc/remote.go:410
+#: lxc/cluster.go:156 lxc/remote.go:410
 msgid "URL"
 msgstr ""
 
diff --git a/po/fr.po b/po/fr.po
index 9daeaeea9..cab670c0a 100644
--- a/po/fr.po
+++ b/po/fr.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: LXD\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2017-12-04 08:58+0000\n"
+"POT-Creation-Date: 2017-12-04 08:59+0000\n"
 "PO-Revision-Date: 2017-10-26 15:46+0000\n"
 "Last-Translator: Alban Vidal <alban.vidal at zordhak.fr>\n"
 "Language-Team: French <https://hosted.weblate.org/projects/linux-containers/"
@@ -485,7 +485,7 @@ msgstr "Création de %s"
 msgid "Creating the container"
 msgstr "Création du conteneur"
 
-#: lxc/cluster.go:162
+#: lxc/cluster.go:157
 msgid "DATABASE"
 msgstr ""
 
@@ -843,7 +843,7 @@ msgstr ""
 msgid "Must supply container name for: "
 msgstr "Vous devez fournir le nom d'un conteneur pour : "
 
-#: lxc/cluster.go:160 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
+#: lxc/cluster.go:155 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
 #: lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
 msgid "NAME"
 msgstr "NOM"
@@ -902,7 +902,7 @@ msgstr "Aucun périphérique existant pour ce réseau"
 msgid "No fingerprint specified."
 msgstr "Aucune empreinte n'a été indiquée."
 
-#: lxc/cluster.go:121
+#: lxc/cluster.go:116
 #, fuzzy, c-format
 msgid "Node %s removed"
 msgstr "Profil %s supprimé de %s"
@@ -1136,7 +1136,7 @@ msgstr "INSTANTANÉS"
 msgid "SOURCE"
 msgstr "SOURCE"
 
-#: lxc/cluster.go:163 lxc/list.go:469
+#: lxc/cluster.go:158 lxc/list.go:469
 msgid "STATE"
 msgstr "ÉTAT"
 
@@ -1417,7 +1417,7 @@ msgstr "Type : persistant"
 msgid "UPLOAD DATE"
 msgstr "DATE DE PUBLICATION"
 
-#: lxc/cluster.go:161 lxc/remote.go:410
+#: lxc/cluster.go:156 lxc/remote.go:410
 msgid "URL"
 msgstr "URL"
 
diff --git a/po/id.po b/po/id.po
index 3b1ff22db..a32bded7e 100644
--- a/po/id.po
+++ b/po/id.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: lxd\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2017-12-04 08:58+0000\n"
+"POT-Creation-Date: 2017-12-04 08:59+0000\n"
 "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
 "Last-Translator: Automatically generated\n"
 "Language-Team: none\n"
@@ -380,7 +380,7 @@ msgstr ""
 msgid "Creating the container"
 msgstr ""
 
-#: lxc/cluster.go:162
+#: lxc/cluster.go:157
 msgid "DATABASE"
 msgstr ""
 
@@ -724,7 +724,7 @@ msgstr ""
 msgid "Must supply container name for: "
 msgstr ""
 
-#: lxc/cluster.go:160 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
+#: lxc/cluster.go:155 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
 #: lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
 msgid "NAME"
 msgstr ""
@@ -781,7 +781,7 @@ msgstr ""
 msgid "No fingerprint specified."
 msgstr ""
 
-#: lxc/cluster.go:121
+#: lxc/cluster.go:116
 #, c-format
 msgid "Node %s removed"
 msgstr ""
@@ -1011,7 +1011,7 @@ msgstr ""
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/cluster.go:163 lxc/list.go:469
+#: lxc/cluster.go:158 lxc/list.go:469
 msgid "STATE"
 msgstr ""
 
@@ -1275,7 +1275,7 @@ msgstr ""
 msgid "UPLOAD DATE"
 msgstr ""
 
-#: lxc/cluster.go:161 lxc/remote.go:410
+#: lxc/cluster.go:156 lxc/remote.go:410
 msgid "URL"
 msgstr ""
 
diff --git a/po/it.po b/po/it.po
index 376f7a4f6..5e87787fb 100644
--- a/po/it.po
+++ b/po/it.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: lxd\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2017-12-04 08:58+0000\n"
+"POT-Creation-Date: 2017-12-04 08:59+0000\n"
 "PO-Revision-Date: 2017-08-18 14:22+0000\n"
 "Last-Translator: Alberto Donato <alberto.donato at gmail.com>\n"
 "Language-Team: Italian <https://hosted.weblate.org/projects/linux-containers/"
@@ -405,7 +405,7 @@ msgstr "Creazione di %s in corso"
 msgid "Creating the container"
 msgstr "Creazione del container in corso"
 
-#: lxc/cluster.go:162
+#: lxc/cluster.go:157
 msgid "DATABASE"
 msgstr ""
 
@@ -750,7 +750,7 @@ msgstr ""
 msgid "Must supply container name for: "
 msgstr ""
 
-#: lxc/cluster.go:160 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
+#: lxc/cluster.go:155 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
 #: lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
 msgid "NAME"
 msgstr ""
@@ -807,7 +807,7 @@ msgstr ""
 msgid "No fingerprint specified."
 msgstr ""
 
-#: lxc/cluster.go:121
+#: lxc/cluster.go:116
 #, c-format
 msgid "Node %s removed"
 msgstr ""
@@ -1037,7 +1037,7 @@ msgstr ""
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/cluster.go:163 lxc/list.go:469
+#: lxc/cluster.go:158 lxc/list.go:469
 msgid "STATE"
 msgstr ""
 
@@ -1301,7 +1301,7 @@ msgstr ""
 msgid "UPLOAD DATE"
 msgstr ""
 
-#: lxc/cluster.go:161 lxc/remote.go:410
+#: lxc/cluster.go:156 lxc/remote.go:410
 msgid "URL"
 msgstr ""
 
diff --git a/po/ja.po b/po/ja.po
index dc7298417..b7ef05360 100644
--- a/po/ja.po
+++ b/po/ja.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: LXD\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2017-12-04 08:58+0000\n"
+"POT-Creation-Date: 2017-12-04 08:59+0000\n"
 "PO-Revision-Date: 2017-09-28 20:29+0000\n"
 "Last-Translator: KATOH Yasufumi <karma at jazz.email.ne.jp>\n"
 "Language-Team: Japanese <https://hosted.weblate.org/projects/linux-"
@@ -386,7 +386,7 @@ msgstr "%s を作成中"
 msgid "Creating the container"
 msgstr "コンテナを作成中"
 
-#: lxc/cluster.go:162
+#: lxc/cluster.go:157
 msgid "DATABASE"
 msgstr ""
 
@@ -735,7 +735,7 @@ msgstr "ディレクトリからのインポートは root で実行する必要
 msgid "Must supply container name for: "
 msgstr "コンテナ名を指定する必要があります: "
 
-#: lxc/cluster.go:160 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
+#: lxc/cluster.go:155 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
 #: lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
 msgid "NAME"
 msgstr ""
@@ -792,7 +792,7 @@ msgstr "このストレージボリュームに対するデバイスがありま
 msgid "No fingerprint specified."
 msgstr "フィンガープリントが指定されていません。"
 
-#: lxc/cluster.go:121
+#: lxc/cluster.go:116
 #, fuzzy, c-format
 msgid "Node %s removed"
 msgstr "プロファイル %s が %s から削除されました"
@@ -1023,7 +1023,7 @@ msgstr ""
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/cluster.go:163 lxc/list.go:469
+#: lxc/cluster.go:158 lxc/list.go:469
 msgid "STATE"
 msgstr ""
 
@@ -1301,7 +1301,7 @@ msgstr "タイプ: persistent"
 msgid "UPLOAD DATE"
 msgstr ""
 
-#: lxc/cluster.go:161 lxc/remote.go:410
+#: lxc/cluster.go:156 lxc/remote.go:410
 msgid "URL"
 msgstr ""
 
diff --git a/po/lxd.pot b/po/lxd.pot
index 07a8722e8..bd9895e26 100644
--- a/po/lxd.pot
+++ b/po/lxd.pot
@@ -7,7 +7,7 @@
 msgid   ""
 msgstr  "Project-Id-Version: lxd\n"
         "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-        "POT-Creation-Date: 2017-12-04 08:58+0000\n"
+        "POT-Creation-Date: 2017-12-04 08:59+0000\n"
         "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
         "Last-Translator: FULL NAME <EMAIL at ADDRESS>\n"
         "Language-Team: LANGUAGE <LL at li.org>\n"
@@ -372,7 +372,7 @@ msgstr  ""
 msgid   "Creating the container"
 msgstr  ""
 
-#: lxc/cluster.go:162
+#: lxc/cluster.go:157
 msgid   "DATABASE"
 msgstr  ""
 
@@ -715,7 +715,7 @@ msgstr  ""
 msgid   "Must supply container name for: "
 msgstr  ""
 
-#: lxc/cluster.go:160 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573 lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
+#: lxc/cluster.go:155 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573 lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
 msgid   "NAME"
 msgstr  ""
 
@@ -771,7 +771,7 @@ msgstr  ""
 msgid   "No fingerprint specified."
 msgstr  ""
 
-#: lxc/cluster.go:121
+#: lxc/cluster.go:116
 #, c-format
 msgid   "Node %s removed"
 msgstr  ""
@@ -1000,7 +1000,7 @@ msgstr  ""
 msgid   "SOURCE"
 msgstr  ""
 
-#: lxc/cluster.go:163 lxc/list.go:469
+#: lxc/cluster.go:158 lxc/list.go:469
 msgid   "STATE"
 msgstr  ""
 
@@ -1260,7 +1260,7 @@ msgstr  ""
 msgid   "UPLOAD DATE"
 msgstr  ""
 
-#: lxc/cluster.go:161 lxc/remote.go:410
+#: lxc/cluster.go:156 lxc/remote.go:410
 msgid   "URL"
 msgstr  ""
 
diff --git a/po/nb_NO.po b/po/nb_NO.po
index f2c794bce..59b2bf8cb 100644
--- a/po/nb_NO.po
+++ b/po/nb_NO.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: lxd\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2017-12-04 08:58+0000\n"
+"POT-Creation-Date: 2017-12-04 08:59+0000\n"
 "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
 "Last-Translator: Automatically generated\n"
 "Language-Team: none\n"
@@ -380,7 +380,7 @@ msgstr ""
 msgid "Creating the container"
 msgstr ""
 
-#: lxc/cluster.go:162
+#: lxc/cluster.go:157
 msgid "DATABASE"
 msgstr ""
 
@@ -724,7 +724,7 @@ msgstr ""
 msgid "Must supply container name for: "
 msgstr ""
 
-#: lxc/cluster.go:160 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
+#: lxc/cluster.go:155 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
 #: lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
 msgid "NAME"
 msgstr ""
@@ -781,7 +781,7 @@ msgstr ""
 msgid "No fingerprint specified."
 msgstr ""
 
-#: lxc/cluster.go:121
+#: lxc/cluster.go:116
 #, c-format
 msgid "Node %s removed"
 msgstr ""
@@ -1011,7 +1011,7 @@ msgstr ""
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/cluster.go:163 lxc/list.go:469
+#: lxc/cluster.go:158 lxc/list.go:469
 msgid "STATE"
 msgstr ""
 
@@ -1275,7 +1275,7 @@ msgstr ""
 msgid "UPLOAD DATE"
 msgstr ""
 
-#: lxc/cluster.go:161 lxc/remote.go:410
+#: lxc/cluster.go:156 lxc/remote.go:410
 msgid "URL"
 msgstr ""
 
diff --git a/po/nl.po b/po/nl.po
index a0a3c907e..190d5cd49 100644
--- a/po/nl.po
+++ b/po/nl.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: lxd\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2017-12-04 08:58+0000\n"
+"POT-Creation-Date: 2017-12-04 08:59+0000\n"
 "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
 "Last-Translator: Automatically generated\n"
 "Language-Team: none\n"
@@ -380,7 +380,7 @@ msgstr ""
 msgid "Creating the container"
 msgstr ""
 
-#: lxc/cluster.go:162
+#: lxc/cluster.go:157
 msgid "DATABASE"
 msgstr ""
 
@@ -724,7 +724,7 @@ msgstr ""
 msgid "Must supply container name for: "
 msgstr ""
 
-#: lxc/cluster.go:160 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
+#: lxc/cluster.go:155 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
 #: lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
 msgid "NAME"
 msgstr ""
@@ -781,7 +781,7 @@ msgstr ""
 msgid "No fingerprint specified."
 msgstr ""
 
-#: lxc/cluster.go:121
+#: lxc/cluster.go:116
 #, c-format
 msgid "Node %s removed"
 msgstr ""
@@ -1011,7 +1011,7 @@ msgstr ""
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/cluster.go:163 lxc/list.go:469
+#: lxc/cluster.go:158 lxc/list.go:469
 msgid "STATE"
 msgstr ""
 
@@ -1275,7 +1275,7 @@ msgstr ""
 msgid "UPLOAD DATE"
 msgstr ""
 
-#: lxc/cluster.go:161 lxc/remote.go:410
+#: lxc/cluster.go:156 lxc/remote.go:410
 msgid "URL"
 msgstr ""
 
diff --git a/po/pt_BR.po b/po/pt_BR.po
index 05aac9071..30d59c628 100644
--- a/po/pt_BR.po
+++ b/po/pt_BR.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: lxd\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2017-12-04 08:58+0000\n"
+"POT-Creation-Date: 2017-12-04 08:59+0000\n"
 "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
 "Last-Translator: Automatically generated\n"
 "Language-Team: none\n"
@@ -380,7 +380,7 @@ msgstr ""
 msgid "Creating the container"
 msgstr ""
 
-#: lxc/cluster.go:162
+#: lxc/cluster.go:157
 msgid "DATABASE"
 msgstr ""
 
@@ -724,7 +724,7 @@ msgstr ""
 msgid "Must supply container name for: "
 msgstr ""
 
-#: lxc/cluster.go:160 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
+#: lxc/cluster.go:155 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
 #: lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
 msgid "NAME"
 msgstr ""
@@ -781,7 +781,7 @@ msgstr ""
 msgid "No fingerprint specified."
 msgstr ""
 
-#: lxc/cluster.go:121
+#: lxc/cluster.go:116
 #, c-format
 msgid "Node %s removed"
 msgstr ""
@@ -1011,7 +1011,7 @@ msgstr ""
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/cluster.go:163 lxc/list.go:469
+#: lxc/cluster.go:158 lxc/list.go:469
 msgid "STATE"
 msgstr ""
 
@@ -1275,7 +1275,7 @@ msgstr ""
 msgid "UPLOAD DATE"
 msgstr ""
 
-#: lxc/cluster.go:161 lxc/remote.go:410
+#: lxc/cluster.go:156 lxc/remote.go:410
 msgid "URL"
 msgstr ""
 
diff --git a/po/ru.po b/po/ru.po
index bde82c0cf..da1fea3b2 100644
--- a/po/ru.po
+++ b/po/ru.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: lxd\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2017-12-04 08:58+0000\n"
+"POT-Creation-Date: 2017-12-04 08:59+0000\n"
 "PO-Revision-Date: 2017-09-05 16:48+0000\n"
 "Last-Translator: Ilya Yakimavets <ilya.yakimavets at backend.expert>\n"
 "Language-Team: Russian <https://hosted.weblate.org/projects/linux-containers/"
@@ -468,7 +468,7 @@ msgstr ""
 msgid "Creating the container"
 msgstr ""
 
-#: lxc/cluster.go:162
+#: lxc/cluster.go:157
 msgid "DATABASE"
 msgstr ""
 
@@ -814,7 +814,7 @@ msgstr ""
 msgid "Must supply container name for: "
 msgstr ""
 
-#: lxc/cluster.go:160 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
+#: lxc/cluster.go:155 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
 #: lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
 msgid "NAME"
 msgstr ""
@@ -872,7 +872,7 @@ msgstr ""
 msgid "No fingerprint specified."
 msgstr ""
 
-#: lxc/cluster.go:121
+#: lxc/cluster.go:116
 #, c-format
 msgid "Node %s removed"
 msgstr ""
@@ -1102,7 +1102,7 @@ msgstr ""
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/cluster.go:163 lxc/list.go:469
+#: lxc/cluster.go:158 lxc/list.go:469
 msgid "STATE"
 msgstr ""
 
@@ -1366,7 +1366,7 @@ msgstr ""
 msgid "UPLOAD DATE"
 msgstr ""
 
-#: lxc/cluster.go:161 lxc/remote.go:410
+#: lxc/cluster.go:156 lxc/remote.go:410
 msgid "URL"
 msgstr ""
 
diff --git a/po/sr.po b/po/sr.po
index 038d6a89f..f57b3c688 100644
--- a/po/sr.po
+++ b/po/sr.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: lxd\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2017-12-04 08:58+0000\n"
+"POT-Creation-Date: 2017-12-04 08:59+0000\n"
 "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
 "Last-Translator: Automatically generated\n"
 "Language-Team: none\n"
@@ -380,7 +380,7 @@ msgstr ""
 msgid "Creating the container"
 msgstr ""
 
-#: lxc/cluster.go:162
+#: lxc/cluster.go:157
 msgid "DATABASE"
 msgstr ""
 
@@ -724,7 +724,7 @@ msgstr ""
 msgid "Must supply container name for: "
 msgstr ""
 
-#: lxc/cluster.go:160 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
+#: lxc/cluster.go:155 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
 #: lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
 msgid "NAME"
 msgstr ""
@@ -781,7 +781,7 @@ msgstr ""
 msgid "No fingerprint specified."
 msgstr ""
 
-#: lxc/cluster.go:121
+#: lxc/cluster.go:116
 #, c-format
 msgid "Node %s removed"
 msgstr ""
@@ -1011,7 +1011,7 @@ msgstr ""
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/cluster.go:163 lxc/list.go:469
+#: lxc/cluster.go:158 lxc/list.go:469
 msgid "STATE"
 msgstr ""
 
@@ -1275,7 +1275,7 @@ msgstr ""
 msgid "UPLOAD DATE"
 msgstr ""
 
-#: lxc/cluster.go:161 lxc/remote.go:410
+#: lxc/cluster.go:156 lxc/remote.go:410
 msgid "URL"
 msgstr ""
 
diff --git a/po/sv.po b/po/sv.po
index e5d9d18b3..80a2f488a 100644
--- a/po/sv.po
+++ b/po/sv.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: lxd\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2017-12-04 08:58+0000\n"
+"POT-Creation-Date: 2017-12-04 08:59+0000\n"
 "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
 "Last-Translator: Automatically generated\n"
 "Language-Team: none\n"
@@ -380,7 +380,7 @@ msgstr ""
 msgid "Creating the container"
 msgstr ""
 
-#: lxc/cluster.go:162
+#: lxc/cluster.go:157
 msgid "DATABASE"
 msgstr ""
 
@@ -724,7 +724,7 @@ msgstr ""
 msgid "Must supply container name for: "
 msgstr ""
 
-#: lxc/cluster.go:160 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
+#: lxc/cluster.go:155 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
 #: lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
 msgid "NAME"
 msgstr ""
@@ -781,7 +781,7 @@ msgstr ""
 msgid "No fingerprint specified."
 msgstr ""
 
-#: lxc/cluster.go:121
+#: lxc/cluster.go:116
 #, c-format
 msgid "Node %s removed"
 msgstr ""
@@ -1011,7 +1011,7 @@ msgstr ""
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/cluster.go:163 lxc/list.go:469
+#: lxc/cluster.go:158 lxc/list.go:469
 msgid "STATE"
 msgstr ""
 
@@ -1275,7 +1275,7 @@ msgstr ""
 msgid "UPLOAD DATE"
 msgstr ""
 
-#: lxc/cluster.go:161 lxc/remote.go:410
+#: lxc/cluster.go:156 lxc/remote.go:410
 msgid "URL"
 msgstr ""
 
diff --git a/po/tr.po b/po/tr.po
index 78592d739..29890d47a 100644
--- a/po/tr.po
+++ b/po/tr.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: lxd\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2017-12-04 08:58+0000\n"
+"POT-Creation-Date: 2017-12-04 08:59+0000\n"
 "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
 "Last-Translator: Automatically generated\n"
 "Language-Team: none\n"
@@ -380,7 +380,7 @@ msgstr ""
 msgid "Creating the container"
 msgstr ""
 
-#: lxc/cluster.go:162
+#: lxc/cluster.go:157
 msgid "DATABASE"
 msgstr ""
 
@@ -724,7 +724,7 @@ msgstr ""
 msgid "Must supply container name for: "
 msgstr ""
 
-#: lxc/cluster.go:160 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
+#: lxc/cluster.go:155 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
 #: lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
 msgid "NAME"
 msgstr ""
@@ -781,7 +781,7 @@ msgstr ""
 msgid "No fingerprint specified."
 msgstr ""
 
-#: lxc/cluster.go:121
+#: lxc/cluster.go:116
 #, c-format
 msgid "Node %s removed"
 msgstr ""
@@ -1011,7 +1011,7 @@ msgstr ""
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/cluster.go:163 lxc/list.go:469
+#: lxc/cluster.go:158 lxc/list.go:469
 msgid "STATE"
 msgstr ""
 
@@ -1275,7 +1275,7 @@ msgstr ""
 msgid "UPLOAD DATE"
 msgstr ""
 
-#: lxc/cluster.go:161 lxc/remote.go:410
+#: lxc/cluster.go:156 lxc/remote.go:410
 msgid "URL"
 msgstr ""
 
diff --git a/po/zh.po b/po/zh.po
index 8e8a67f3b..1ea3f41d9 100644
--- a/po/zh.po
+++ b/po/zh.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: lxd\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2017-12-04 08:58+0000\n"
+"POT-Creation-Date: 2017-12-04 08:59+0000\n"
 "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
 "Last-Translator: Automatically generated\n"
 "Language-Team: none\n"
@@ -380,7 +380,7 @@ msgstr ""
 msgid "Creating the container"
 msgstr ""
 
-#: lxc/cluster.go:162
+#: lxc/cluster.go:157
 msgid "DATABASE"
 msgstr ""
 
@@ -724,7 +724,7 @@ msgstr ""
 msgid "Must supply container name for: "
 msgstr ""
 
-#: lxc/cluster.go:160 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
+#: lxc/cluster.go:155 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
 #: lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
 msgid "NAME"
 msgstr ""
@@ -781,7 +781,7 @@ msgstr ""
 msgid "No fingerprint specified."
 msgstr ""
 
-#: lxc/cluster.go:121
+#: lxc/cluster.go:116
 #, c-format
 msgid "Node %s removed"
 msgstr ""
@@ -1011,7 +1011,7 @@ msgstr ""
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/cluster.go:163 lxc/list.go:469
+#: lxc/cluster.go:158 lxc/list.go:469
 msgid "STATE"
 msgstr ""
 
@@ -1275,7 +1275,7 @@ msgstr ""
 msgid "UPLOAD DATE"
 msgstr ""
 
-#: lxc/cluster.go:161 lxc/remote.go:410
+#: lxc/cluster.go:156 lxc/remote.go:410
 msgid "URL"
 msgstr ""
 
diff --git a/po/zh_Hans.po b/po/zh_Hans.po
index 835c5870b..bd4c8c3a2 100644
--- a/po/zh_Hans.po
+++ b/po/zh_Hans.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: lxd\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2017-12-04 08:58+0000\n"
+"POT-Creation-Date: 2017-12-04 08:59+0000\n"
 "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
 "Last-Translator: Automatically generated\n"
 "Language-Team: none\n"
@@ -380,7 +380,7 @@ msgstr ""
 msgid "Creating the container"
 msgstr ""
 
-#: lxc/cluster.go:162
+#: lxc/cluster.go:157
 msgid "DATABASE"
 msgstr ""
 
@@ -724,7 +724,7 @@ msgstr ""
 msgid "Must supply container name for: "
 msgstr ""
 
-#: lxc/cluster.go:160 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
+#: lxc/cluster.go:155 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
 #: lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
 msgid "NAME"
 msgstr ""
@@ -781,7 +781,7 @@ msgstr ""
 msgid "No fingerprint specified."
 msgstr ""
 
-#: lxc/cluster.go:121
+#: lxc/cluster.go:116
 #, c-format
 msgid "Node %s removed"
 msgstr ""
@@ -1011,7 +1011,7 @@ msgstr ""
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/cluster.go:163 lxc/list.go:469
+#: lxc/cluster.go:158 lxc/list.go:469
 msgid "STATE"
 msgstr ""
 
@@ -1275,7 +1275,7 @@ msgstr ""
 msgid "UPLOAD DATE"
 msgstr ""
 
-#: lxc/cluster.go:161 lxc/remote.go:410
+#: lxc/cluster.go:156 lxc/remote.go:410
 msgid "URL"
 msgstr ""
 

From 883d5a9e1dcc4c2636f57eb2f5bf239f3f9d75e8 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Thu, 16 Nov 2017 12:00:22 +0000
Subject: [PATCH 089/116] Add cluster.Events task for watching events from
 other nodes

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/cluster/events.go | 104 ++++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 104 insertions(+)
 create mode 100644 lxd/cluster/events.go

diff --git a/lxd/cluster/events.go b/lxd/cluster/events.go
new file mode 100644
index 000000000..fe02df4f7
--- /dev/null
+++ b/lxd/cluster/events.go
@@ -0,0 +1,104 @@
+package cluster
+
+import (
+	"fmt"
+	"time"
+
+	lxd "github.com/lxc/lxd/client"
+	"github.com/lxc/lxd/lxd/db"
+	"github.com/lxc/lxd/lxd/endpoints"
+	"github.com/lxc/lxd/lxd/task"
+	"github.com/lxc/lxd/shared"
+	"github.com/lxc/lxd/shared/logger"
+	"golang.org/x/net/context"
+)
+
+// Events starts a task that continuosly monitors the list of cluster nodes and
+// maintains a pool of websocket connections against all of them, in order to
+// get notified about events.
+//
+// Whenever an event is received the given callback is invoked.
+func Events(endpoints *endpoints.Endpoints, cluster *db.Cluster, f func(int64, interface{})) (task.Func, task.Schedule) {
+	listeners := map[int64]*lxd.EventListener{}
+
+	// Update our pool of event listeners.
+	update := func(ctx context.Context) {
+		// Get the current cluster nodes.
+		var nodes []db.NodeInfo
+		err := cluster.Transaction(func(tx *db.ClusterTx) error {
+			var err error
+			nodes, err = tx.Nodes()
+			return err
+		})
+		if err != nil {
+			logger.Warnf("Failed to get current cluster nodes: %v", err)
+			return
+		}
+		if len(nodes) == 1 {
+			return // Either we're not clustered or this is a single-node cluster
+		}
+
+		address := endpoints.NetworkAddress()
+
+		ids := make([]int, len(nodes))
+		for i, node := range nodes {
+			ids[i] = int(node.ID)
+
+			// Don't bother trying to connect to offline nodes, or to ourselves.
+			if node.IsDown() || node.Address == address {
+				continue
+			}
+
+			_, ok := listeners[node.ID]
+
+			// The node has already a listener associated to it.
+			if ok {
+				// Double check that the listener is still
+				// connected. If it is, just move on, other
+				// we'll try to connect again.
+				if listeners[node.ID].Active() {
+					continue
+				}
+				delete(listeners, node.ID)
+			}
+
+			listener, err := eventsConnect(node.Address, endpoints.NetworkCert())
+			if err != nil {
+				logger.Warnf("Failed to get events from node %s: %v", node.Address, err)
+				continue
+			}
+			logger.Debugf("Listening for events on node %s", node.Address)
+			listener.AddHandler(nil, func(event interface{}) { f(node.ID, event) })
+			listeners[node.ID] = listener
+		}
+		for id, listener := range listeners {
+			if !shared.IntInSlice(int(id), ids) {
+				listener.Disconnect()
+				delete(listeners, id)
+			}
+		}
+	}
+
+	schedule := task.Every(time.Second)
+
+	return update, schedule
+}
+
+// Establish a client connection to get events from the given node.
+func eventsConnect(address string, cert *shared.CertInfo) (*lxd.EventListener, error) {
+	args := &lxd.ConnectionArgs{
+		TLSServerCert: string(cert.PublicKey()),
+		TLSClientCert: string(cert.PublicKey()),
+		TLSClientKey:  string(cert.PrivateKey()),
+		// Use a special user agent to let the events API handler know that
+		// it should only notify us of local events.
+		UserAgent: "lxd-cluster-notifier",
+	}
+
+	url := fmt.Sprintf("https://%s", address)
+	client, err := lxd.ConnectLXD(url, args)
+	if err != nil {
+		return nil, err
+	}
+	return client.GetEvents()
+}

From 91ad8adada25a960751ae18e745609d8edc9cedc Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Thu, 16 Nov 2017 12:00:55 +0000
Subject: [PATCH 090/116] Make nodes forward events received from other nodes

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 client/events.go         | 10 ++++++++++
 lxd/cluster/heartbeat.go |  4 ++++
 lxd/daemon.go            |  3 +++
 lxd/events.go            | 34 ++++++++++++++++++++++++++++++++--
 4 files changed, 49 insertions(+), 2 deletions(-)

diff --git a/client/events.go b/client/events.go
index 9738505c1..a05bce83f 100644
--- a/client/events.go
+++ b/client/events.go
@@ -98,3 +98,13 @@ func (e *EventListener) Wait() error {
 	<-e.chActive
 	return e.err
 }
+
+// Active returns true if this listener is still connected, false otherwise.
+func (e *EventListener) Active() bool {
+	select {
+	case <-e.chActive:
+		return false // If the chActive channel is closed we got disconnected
+	default:
+		return true
+	}
+}
diff --git a/lxd/cluster/heartbeat.go b/lxd/cluster/heartbeat.go
index 150bff51b..332f291c3 100644
--- a/lxd/cluster/heartbeat.go
+++ b/lxd/cluster/heartbeat.go
@@ -44,6 +44,10 @@ func Heartbeat(gateway *Gateway, cluster *db.Cluster) (task.Func, task.Schedule)
 			nodes, err = tx.Nodes()
 			return err
 		})
+		if err != nil {
+			logger.Warnf("Failed to get current cluster nodes: %v", err)
+			return
+		}
 		wg := sync.WaitGroup{}
 		wg.Add(len(nodes))
 		heartbeats := make([]time.Time, len(nodes))
diff --git a/lxd/daemon.go b/lxd/daemon.go
index d25a08646..dcdeb02b2 100644
--- a/lxd/daemon.go
+++ b/lxd/daemon.go
@@ -519,6 +519,9 @@ func (d *Daemon) Ready() error {
 	/* Heartbeats */
 	d.tasks.Add(cluster.Heartbeat(d.gateway, d.cluster))
 
+	/* Events */
+	d.tasks.Add(cluster.Events(d.endpoints, d.cluster, eventForward))
+
 	// FIXME: There's no hard reason for which we should not run these
 	//        tasks in mock mode. However it requires that we tweak them so
 	//        they exit gracefully without blocking (something we should do
diff --git a/lxd/events.go b/lxd/events.go
index 4d0c1e9b5..c08cac628 100644
--- a/lxd/events.go
+++ b/lxd/events.go
@@ -53,6 +53,11 @@ type eventListener struct {
 	id           string
 	lock         sync.Mutex
 	done         bool
+
+	// If true, this listener won't get events forwarded from other
+	// nodes. It only used by listeners created internally by LXD nodes
+	// connecting to other LXD nodes to get their local events only.
+	noForward bool
 }
 
 type eventsServe struct {
@@ -85,6 +90,11 @@ func eventsSocket(r *http.Request, w http.ResponseWriter) error {
 	listener.id = uuid.NewRandom().String()
 	listener.messageTypes = strings.Split(typeStr, ",")
 
+	// If this request is an internal one initiated by another node wanting
+	// to watch the events on this node, set the listener to broadcast only
+	// local events.
+	listener.noForward = isClusterNotification(r)
+
 	eventsLock.Lock()
 	eventListeners[listener.id] = &listener
 	eventsLock.Unlock()
@@ -97,7 +107,7 @@ func eventsSocket(r *http.Request, w http.ResponseWriter) error {
 }
 
 func eventsGet(d *Daemon, r *http.Request) Response {
-	return &eventsServe{r}
+	return &eventsServe{req: r}
 }
 
 var eventsCmd = Command{name: "events", get: eventsGet}
@@ -108,15 +118,24 @@ func eventSend(eventType string, eventMessage interface{}) error {
 	event["timestamp"] = time.Now()
 	event["metadata"] = eventMessage
 
+	return eventBroadcast(event)
+}
+
+func eventBroadcast(event shared.Jmap) error {
 	body, err := json.Marshal(event)
 	if err != nil {
 		return err
 	}
 
+	_, isForward := event["node"]
 	eventsLock.Lock()
 	listeners := eventListeners
 	for _, listener := range listeners {
-		if !shared.StringInSlice(eventType, listener.messageTypes) {
+		if isForward && listener.noForward {
+			continue
+		}
+
+		if !shared.StringInSlice(event["type"].(string), listener.messageTypes) {
 			continue
 		}
 
@@ -154,3 +173,14 @@ func eventSend(eventType string, eventMessage interface{}) error {
 
 	return nil
 }
+
+// Forward to the local events dispatcher an event received from another node .
+func eventForward(id int64, data interface{}) {
+	event := data.(map[string]interface{})
+	event["node"] = id
+
+	err := eventBroadcast(event)
+	if err != nil {
+		logger.Warnf("Failed to forward event from node %d: %v", id, err)
+	}
+}

From 79a61f5e9588c79e5661a3231262aa7102e2c9f8 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Fri, 17 Nov 2017 09:32:41 +0000
Subject: [PATCH 091/116] Change GET /operations/<uuid> to return non-local ops
 on other nodes

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/operations.go | 44 +++++++++++++++++++++++++++++++++++++-------
 1 file changed, 37 insertions(+), 7 deletions(-)

diff --git a/lxd/operations.go b/lxd/operations.go
index dd428422a..cdc5deb75 100644
--- a/lxd/operations.go
+++ b/lxd/operations.go
@@ -12,6 +12,7 @@ import (
 	"github.com/pborman/uuid"
 	"github.com/pkg/errors"
 
+	lxd "github.com/lxc/lxd/client"
 	"github.com/lxc/lxd/lxd/db"
 	"github.com/lxc/lxd/lxd/util"
 	"github.com/lxc/lxd/shared"
@@ -459,14 +460,43 @@ func operationGet(id string) (*operation, error) {
 func operationAPIGet(d *Daemon, r *http.Request) Response {
 	id := mux.Vars(r)["id"]
 
-	op, err := operationGet(id)
-	if err != nil {
-		return NotFound
-	}
+	var body *api.Operation
 
-	_, body, err := op.Render()
-	if err != nil {
-		return SmartError(err)
+	// First check the local cache, then the cluster database table.
+	op, err := operationGet(id)
+	if err == nil {
+		_, body, err = op.Render()
+		if err != nil {
+			return SmartError(err)
+		}
+	} else {
+		var address string
+		err = d.cluster.Transaction(func(tx *db.ClusterTx) error {
+			operation, err := tx.OperationByUUID(id)
+			if err != nil {
+				return err
+			}
+			address = operation.NodeAddress
+			return nil
+		})
+		if err != nil {
+			return SmartError(err)
+		}
+		cert := d.endpoints.NetworkCert()
+		args := &lxd.ConnectionArgs{
+			TLSServerCert: string(cert.PublicKey()),
+			TLSClientCert: string(cert.PublicKey()),
+			TLSClientKey:  string(cert.PrivateKey()),
+		}
+		url := fmt.Sprintf("https://%s", address)
+		client, err := lxd.ConnectLXD(url, args)
+		if err != nil {
+			return SmartError(err)
+		}
+		body, _, err = client.GetOperation(id)
+		if err != nil {
+			return SmartError(err)
+		}
 	}
 
 	return SyncResponse(true, body)

From 189221f26251fb0aa1f11c23780e167b33c25fc3 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Fri, 17 Nov 2017 11:59:45 +0000
Subject: [PATCH 092/116] Support for lxd init --target <node>

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 client/interfaces.go      |  1 +
 client/lxd.go             |  3 +++
 client/lxd_containers.go  |  6 +++++-
 client/lxd_server.go      | 18 ++++++++++++++++++
 doc/api-extensions.md     |  4 ++++
 lxc/init.go               |  5 ++++-
 lxd/containers_post.go    | 38 ++++++++++++++++++++++++++++++++++++++
 lxd/response.go           | 37 +++++++++++++++++++++++++++++++++++++
 test/suites/clustering.sh | 36 ++++++++++++++++++++++++++++++++++++
 9 files changed, 146 insertions(+), 2 deletions(-)

diff --git a/client/interfaces.go b/client/interfaces.go
index 4ab4a8843..a97ad3750 100644
--- a/client/interfaces.go
+++ b/client/interfaces.go
@@ -47,6 +47,7 @@ type ContainerServer interface {
 	UpdateServer(server api.ServerPut, ETag string) (err error)
 	HasExtension(extension string) (exists bool)
 	RequireAuthenticated(authenticated bool)
+	ClusterTargetNode(name string) ContainerServer
 
 	// Certificate functions
 	GetCertificateFingerprints() (fingerprints []string, err error)
diff --git a/client/lxd.go b/client/lxd.go
index 9e0be5200..4421258eb 100644
--- a/client/lxd.go
+++ b/client/lxd.go
@@ -35,6 +35,9 @@ type ProtocolLXD struct {
 	bakeryClient         *httpbakery.Client
 	bakeryInteractor     httpbakery.Interactor
 	requireAuthenticated bool
+
+	// Name of the node that node-specific operations will target.
+	targetNode string
 }
 
 // GetConnectionInfo returns the basic connection information used to interact with the server
diff --git a/client/lxd_containers.go b/client/lxd_containers.go
index 04585e135..ba3f52c52 100644
--- a/client/lxd_containers.go
+++ b/client/lxd_containers.go
@@ -71,7 +71,11 @@ func (r *ProtocolLXD) CreateContainer(container api.ContainersPost) (*Operation,
 	}
 
 	// Send the request
-	op, _, err := r.queryOperation("POST", "/containers", container, "")
+	path := "/containers"
+	if r.targetNode != "" {
+		path += fmt.Sprintf("?targetNode=%s", r.targetNode)
+	}
+	op, _, err := r.queryOperation("POST", path, container, "")
 	if err != nil {
 		return nil, err
 	}
diff --git a/client/lxd_server.go b/client/lxd_server.go
index c95d095de..84b401deb 100644
--- a/client/lxd_server.go
+++ b/client/lxd_server.go
@@ -77,3 +77,21 @@ func (r *ProtocolLXD) GetServerResources() (*api.Resources, error) {
 
 	return &resources, nil
 }
+
+// ClusterTargetNode returns a client that will target the given node for
+// node-specific operations such as creating containers, modifying storage
+// configuration etc.
+func (r *ProtocolLXD) ClusterTargetNode(name string) ContainerServer {
+	return &ProtocolLXD{
+		server:               r.server,
+		http:                 r.http,
+		httpCertificate:      r.httpCertificate,
+		httpHost:             r.httpHost,
+		httpProtocol:         r.httpProtocol,
+		httpUserAgent:        r.httpUserAgent,
+		bakeryClient:         r.bakeryClient,
+		bakeryInteractor:     r.bakeryInteractor,
+		requireAuthenticated: r.requireAuthenticated,
+		targetNode:           name,
+	}
+}
diff --git a/doc/api-extensions.md b/doc/api-extensions.md
index 49744fb05..35a89addc 100644
--- a/doc/api-extensions.md
+++ b/doc/api-extensions.md
@@ -384,3 +384,7 @@ This includes the following new endpoints:
 
 * `GET /1.0/cluster/nodes/<name>` (see [RESTful API](rest-api.md) for details)
 * `DELETE /1.0/cluster/nodes/<name>` (see [RESTful API](rest-api.md) for details)
+
+The following existing endpoints have been modified:
+
+ * `POST /1.0/containers` accepts a new targetNode query parameter
diff --git a/lxc/init.go b/lxc/init.go
index caa3ce84a..7e5ed14aa 100644
--- a/lxc/init.go
+++ b/lxc/init.go
@@ -67,6 +67,7 @@ type initCmd struct {
 	network      string
 	storagePool  string
 	instanceType string
+	target       string
 }
 
 func (c *initCmd) showByDefault() bool {
@@ -75,7 +76,7 @@ func (c *initCmd) showByDefault() bool {
 
 func (c *initCmd) usage() string {
 	return i18n.G(
-		`Usage: lxc init [<remote>:]<image> [<remote>:][<name>] [--ephemeral|-e] [--profile|-p <profile>...] [--config|-c <key=value>...] [--network|-n <network>] [--storage|-s <pool>] [--type|-t <instance type>]
+		`Usage: lxc init [<remote>:]<image> [<remote>:][<name>] [--ephemeral|-e] [--profile|-p <profile>...] [--config|-c <key=value>...] [--network|-n <network>] [--storage|-s <pool>] [--type|-t <instance type>] [--target <node>]
 
 Create containers from images.
 
@@ -145,6 +146,7 @@ func (c *initCmd) flags() {
 	gnuflag.StringVar(&c.storagePool, "storage", "", i18n.G("Storage pool name"))
 	gnuflag.StringVar(&c.storagePool, "s", "", i18n.G("Storage pool name"))
 	gnuflag.StringVar(&c.instanceType, "t", "", i18n.G("Instance type"))
+	gnuflag.StringVar(&c.target, "target", "", i18n.G("Node name"))
 }
 
 func (c *initCmd) run(conf *config.Config, args []string) error {
@@ -180,6 +182,7 @@ func (c *initCmd) create(conf *config.Config, args []string) (lxd.ContainerServe
 	if err != nil {
 		return nil, "", err
 	}
+	d = d.ClusterTargetNode(c.target)
 
 	/*
 	 * initRequestedEmptyProfiles means user requested empty
diff --git a/lxd/containers_post.go b/lxd/containers_post.go
index 5f33235b8..66053f7ef 100644
--- a/lxd/containers_post.go
+++ b/lxd/containers_post.go
@@ -11,6 +11,7 @@ import (
 	"github.com/dustinkirkland/golang-petname"
 	"github.com/gorilla/websocket"
 
+	lxd "github.com/lxc/lxd/client"
 	"github.com/lxc/lxd/lxd/cluster"
 	"github.com/lxc/lxd/lxd/db"
 	"github.com/lxc/lxd/lxd/types"
@@ -523,6 +524,43 @@ func containersPost(d *Daemon, r *http.Request) Response {
 		return BadRequest(err)
 	}
 
+	targetNode := r.FormValue("targetNode")
+	if targetNode != "" {
+		address := ""
+		err := d.cluster.Transaction(func(tx *db.ClusterTx) error {
+			node, err := tx.NodeByName(targetNode)
+			if err != nil {
+				return err
+			}
+			if node.Address != d.endpoints.NetworkAddress() {
+				address = node.Address
+			}
+			return nil
+		})
+		if err != nil {
+			return SmartError(err)
+		}
+		if address != "" {
+			cert := d.endpoints.NetworkCert()
+			args := &lxd.ConnectionArgs{
+				TLSServerCert: string(cert.PublicKey()),
+				TLSClientCert: string(cert.PublicKey()),
+				TLSClientKey:  string(cert.PrivateKey()),
+			}
+			url := fmt.Sprintf("https://%s", address)
+			client, err := lxd.ConnectLXD(url, args)
+			if err != nil {
+				return SmartError(err)
+			}
+			logger.Debugf("Forward container post request to %s", address)
+			op, err := client.CreateContainer(req)
+			if err != nil {
+				return SmartError(err)
+			}
+			return ForwardedOperationResponse(&op.Operation)
+		}
+	}
+
 	// If no storage pool is found, error out.
 	pools, err := d.cluster.StoragePools()
 	if err != nil || len(pools) == 0 {
diff --git a/lxd/response.go b/lxd/response.go
index 73d8540c1..9fb053412 100644
--- a/lxd/response.go
+++ b/lxd/response.go
@@ -17,6 +17,7 @@ import (
 	"github.com/lxc/lxd/lxd/util"
 	"github.com/lxc/lxd/shared"
 	"github.com/lxc/lxd/shared/api"
+	"github.com/lxc/lxd/shared/version"
 )
 
 type Response interface {
@@ -263,6 +264,42 @@ func OperationResponse(op *operation) Response {
 	return &operationResponse{op}
 }
 
+// Forwarded operation response.
+//
+// Returned when the operation has been created on another node
+type forwardedOperationResponse struct {
+	op *api.Operation
+}
+
+func (r *forwardedOperationResponse) Render(w http.ResponseWriter) error {
+	url := fmt.Sprintf("/%s/operations/%s", version.APIVersion, r.op.ID)
+
+	body := api.ResponseRaw{
+		Response: api.Response{
+			Type:       api.AsyncResponse,
+			Status:     api.OperationCreated.String(),
+			StatusCode: int(api.OperationCreated),
+			Operation:  url,
+		},
+		Metadata: r.op,
+	}
+
+	w.Header().Set("Location", url)
+	w.WriteHeader(202)
+
+	return util.WriteJSON(w, body, debug)
+}
+
+func (r *forwardedOperationResponse) String() string {
+	return r.op.ID
+}
+
+// ForwardedOperationResponse creates a response that forwards the metadata of
+// an operation created on another node.
+func ForwardedOperationResponse(op *api.Operation) Response {
+	return &forwardedOperationResponse{op}
+}
+
 // Error response
 type errorResponse struct {
 	code int
diff --git a/test/suites/clustering.sh b/test/suites/clustering.sh
index 108a80e16..05535f9b6 100644
--- a/test/suites/clustering.sh
+++ b/test/suites/clustering.sh
@@ -96,3 +96,39 @@ test_clustering() {
   rm -f "${LXD_TWO_DIR}/unix.socket"
   rm -f "${LXD_ONE_DIR}/unix.socket"
 }
+
+test_clustering_containers() {
+  setup_clustering_bridge
+  prefix="lxd$$"
+  bridge="${prefix}"
+
+  setup_clustering_netns 1
+  LXD_ONE_DIR=$(mktemp -d -p "${TEST_DIR}" XXX)
+  chmod +x "${LXD_ONE_DIR}"
+  ns1="${prefix}1"
+  spawn_lxd_and_bootstrap_cluster "${ns1}" "${bridge}" "${LXD_ONE_DIR}"
+
+  # Add a newline at the end of each line. YAML as weird rules..
+  cert=$(sed ':a;N;$!ba;s/\n/\n\n/g' "${LXD_ONE_DIR}/server.crt")
+
+  # Spawn a second node
+  setup_clustering_netns 2
+  LXD_TWO_DIR=$(mktemp -d -p "${TEST_DIR}" XXX)
+  chmod +x "${LXD_TWO_DIR}"
+  ns2="${prefix}2"
+  spawn_lxd_and_join_cluster "${ns2}" "${bridge}" "${cert}" 2 1 "${LXD_TWO_DIR}"
+
+  # Init a container on a node2, using a client connected to node1
+  LXD_DIR="${LXD_TWO_DIR}" ensure_import_testimage
+  LXD_DIR="${LXD_ONE_DIR}" lxc init --target node2 testimage foo
+  LXD_DIR="${LXD_TWO_DIR}" lxc list | grep -q foo
+
+  LXD_DIR="${LXD_TWO_DIR}" lxc network delete "${bridge}"
+
+  LXD_DIR="${LXD_TWO_DIR}" lxd shutdown
+  LXD_DIR="${LXD_ONE_DIR}" lxd shutdown
+  sleep 2
+  rm -f "${LXD_TWO_DIR}/unix.socket"
+  rm -f "${LXD_ONE_DIR}/unix.socket"
+}
+

From eba676d9642737848599bad1bac9d9511fff2f63 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Mon, 20 Nov 2017 08:02:57 +0000
Subject: [PATCH 093/116] Include node name in GET /containers/<name> and lxc
 info <name>

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxc/info.go               |   3 ++
 lxd/container_lxc.go      |   5 +++
 lxd/db/containers.go      |  20 +++++++--
 po/de.po                  | 102 +++++++++++++++++++++++++--------------------
 po/el.po                  | 102 +++++++++++++++++++++++++--------------------
 po/fr.po                  | 103 +++++++++++++++++++++++++--------------------
 po/id.po                  | 102 +++++++++++++++++++++++++--------------------
 po/it.po                  | 102 +++++++++++++++++++++++++--------------------
 po/ja.po                  | 104 ++++++++++++++++++++++++++--------------------
 po/lxd.pot                | 101 ++++++++++++++++++++++++--------------------
 po/nb_NO.po               | 102 +++++++++++++++++++++++++--------------------
 po/nl.po                  | 102 +++++++++++++++++++++++++--------------------
 po/pt_BR.po               | 102 +++++++++++++++++++++++++--------------------
 po/ru.po                  | 102 +++++++++++++++++++++++++--------------------
 po/sr.po                  | 102 +++++++++++++++++++++++++--------------------
 po/sv.po                  | 102 +++++++++++++++++++++++++--------------------
 po/tr.po                  | 102 +++++++++++++++++++++++++--------------------
 po/zh.po                  | 102 +++++++++++++++++++++++++--------------------
 po/zh_Hans.po             | 102 +++++++++++++++++++++++++--------------------
 shared/api/container.go   |   3 ++
 test/suites/clustering.sh |   1 +
 21 files changed, 926 insertions(+), 740 deletions(-)

diff --git a/lxc/info.go b/lxc/info.go
index fdb9f250d..7b89e1866 100644
--- a/lxc/info.go
+++ b/lxc/info.go
@@ -115,6 +115,9 @@ func (c *infoCmd) containerInfo(d lxd.ContainerServer, remote config.Remote, nam
 	const layout = "2006/01/02 15:04 UTC"
 
 	fmt.Printf(i18n.G("Name: %s")+"\n", ct.Name)
+	if ct.Node != "" {
+		fmt.Printf(i18n.G("Node: %s")+"\n", ct.Node)
+	}
 	if remote.Addr != "" {
 		fmt.Printf(i18n.G("Remote: %s")+"\n", remote.Addr)
 	}
diff --git a/lxd/container_lxc.go b/lxd/container_lxc.go
index 264ba1d95..6eef2829e 100644
--- a/lxd/container_lxc.go
+++ b/lxd/container_lxc.go
@@ -453,6 +453,7 @@ func containerLXCLoad(s *state.State, args db.ContainerArgs) (container, error)
 		localConfig:  args.Config,
 		localDevices: args.Devices,
 		stateful:     args.Stateful,
+		node:         args.Node,
 	}
 
 	// Load the config.
@@ -494,6 +495,9 @@ type containerLXC struct {
 
 	// Storage
 	storage storage
+
+	// Clustering
+	node string
 }
 
 func (c *containerLXC) createOperation(action string, reusable bool, reuse bool) (*lxcContainerOperation, error) {
@@ -2823,6 +2827,7 @@ func (c *containerLXC) Render() (interface{}, interface{}, error) {
 			Name:            c.name,
 			Status:          statusCode.String(),
 			StatusCode:      statusCode,
+			Node:            c.node,
 		}
 
 		ct.Description = c.Description()
diff --git a/lxd/db/containers.go b/lxd/db/containers.go
index 1cd19a33f..c2ef39d0b 100644
--- a/lxd/db/containers.go
+++ b/lxd/db/containers.go
@@ -16,7 +16,8 @@ import (
 // container.
 type ContainerArgs struct {
 	// Don't set manually
-	Id int
+	Id   int
+	Node string
 
 	Description  string
 	Architecture int
@@ -73,7 +74,8 @@ func (c *Cluster) ContainerId(name string) (int, error) {
 }
 
 func (c *Cluster) ContainerGet(name string) (ContainerArgs, error) {
-	var used *time.Time // Hold the db-returned time
+	var used *time.Time    // Hold the db-returned time
+	var nodeAddress string // Hold the db-returned node address
 	description := sql.NullString{}
 
 	args := ContainerArgs{}
@@ -81,9 +83,14 @@ func (c *Cluster) ContainerGet(name string) (ContainerArgs, error) {
 
 	ephemInt := -1
 	statefulInt := -1
-	q := "SELECT id, description, architecture, type, ephemeral, stateful, creation_date, last_use_date FROM containers WHERE name=?"
+	q := `
+SELECT containers.id, containers.description, architecture, type, ephemeral, stateful,
+       creation_date, last_use_date, nodes.name, nodes.address
+  FROM containers JOIN nodes ON node_id = nodes.id
+  WHERE containers.name=?
+`
 	arg1 := []interface{}{name}
-	arg2 := []interface{}{&args.Id, &description, &args.Architecture, &args.Ctype, &ephemInt, &statefulInt, &args.CreationDate, &used}
+	arg2 := []interface{}{&args.Id, &description, &args.Architecture, &args.Ctype, &ephemInt, &statefulInt, &args.CreationDate, &used, &args.Node, &nodeAddress}
 	err := dbQueryRowScan(c.db, q, arg1, arg2)
 	if err != nil {
 		return args, err
@@ -132,6 +139,11 @@ func (c *Cluster) ContainerGet(name string) (ContainerArgs, error) {
 		args.Devices[k] = v
 	}
 
+	if nodeAddress == "0.0.0.0" {
+		// This means we're not clustered, so omit the node name
+		args.Node = ""
+	}
+
 	return args, nil
 }
 
diff --git a/po/de.po b/po/de.po
index 3e707ae5d..b8c223372 100644
--- a/po/de.po
+++ b/po/de.po
@@ -317,7 +317,7 @@ msgstr "Administrator Passwort für %s: "
 msgid "Aliases:"
 msgstr "Aliasse:\n"
 
-#: lxc/image.go:600 lxc/info.go:122
+#: lxc/image.go:600 lxc/info.go:125
 #, fuzzy, c-format
 msgid "Architecture: %s"
 msgstr "Architektur: %s\n"
@@ -337,11 +337,11 @@ msgstr "automatisches Update: %s"
 msgid "Bad property: %s"
 msgstr "Ungültige Abbild Eigenschaft: %s\n"
 
-#: lxc/info.go:215
+#: lxc/info.go:218
 msgid "Bytes received"
 msgstr "Bytes empfangen"
 
-#: lxc/info.go:216
+#: lxc/info.go:219
 msgid "Bytes sent"
 msgstr "Bytes gesendet"
 
@@ -353,11 +353,11 @@ msgstr ""
 msgid "COMMON NAME"
 msgstr ""
 
-#: lxc/info.go:179
+#: lxc/info.go:182
 msgid "CPU usage (in seconds)"
 msgstr ""
 
-#: lxc/info.go:183
+#: lxc/info.go:186
 #, fuzzy
 msgid "CPU usage:"
 msgstr " Prozessorauslastung:"
@@ -424,7 +424,7 @@ msgstr ""
 msgid "Commands:"
 msgstr ""
 
-#: lxc/copy.go:35 lxc/copy.go:36 lxc/init.go:137 lxc/init.go:138
+#: lxc/copy.go:35 lxc/copy.go:36 lxc/init.go:138 lxc/init.go:139
 #, fuzzy
 msgid "Config key/value to apply to the new container"
 msgstr "kann nicht zum selben Container Namen kopieren"
@@ -447,7 +447,7 @@ msgstr ""
 msgid "Container name is mandatory"
 msgstr ""
 
-#: lxc/copy.go:229 lxc/init.go:317
+#: lxc/copy.go:229 lxc/init.go:320
 #, c-format
 msgid "Container name is: %s"
 msgstr ""
@@ -483,17 +483,17 @@ msgstr "Kann Verzeichnis für Zertifikate auf dem Server nicht erstellen"
 msgid "Create any directories necessary"
 msgstr ""
 
-#: lxc/image.go:605 lxc/info.go:124
+#: lxc/image.go:605 lxc/info.go:127
 #, c-format
 msgid "Created: %s"
 msgstr ""
 
-#: lxc/init.go:196
+#: lxc/init.go:199
 #, c-format
 msgid "Creating %s"
 msgstr ""
 
-#: lxc/init.go:194
+#: lxc/init.go:197
 #, fuzzy
 msgid "Creating the container"
 msgstr "kann nicht zum selben Container Namen kopieren"
@@ -542,7 +542,7 @@ msgstr ""
 msgid "Disable stdin (reads from /dev/null)"
 msgstr ""
 
-#: lxc/info.go:172
+#: lxc/info.go:175
 #, fuzzy
 msgid "Disk usage:"
 msgstr " Prozessorauslastung:"
@@ -573,7 +573,7 @@ msgstr ""
 msgid "Environment:"
 msgstr ""
 
-#: lxc/copy.go:39 lxc/copy.go:40 lxc/init.go:141 lxc/init.go:142
+#: lxc/copy.go:39 lxc/copy.go:40 lxc/init.go:142 lxc/init.go:143
 msgid "Ephemeral container"
 msgstr "Flüchtiger Container"
 
@@ -738,7 +738,7 @@ msgstr ""
 msgid "Input data"
 msgstr ""
 
-#: lxc/init.go:147
+#: lxc/init.go:148
 msgid "Instance type"
 msgstr ""
 
@@ -776,7 +776,7 @@ msgstr "Ungültige Quelle %s"
 msgid "Invalid target %s"
 msgstr "Ungültiges Ziel %s"
 
-#: lxc/info.go:153
+#: lxc/info.go:156
 msgid "Ips:"
 msgstr ""
 
@@ -801,7 +801,7 @@ msgstr ""
 msgid "Last used: never"
 msgstr ""
 
-#: lxc/info.go:268
+#: lxc/info.go:271
 msgid "Log:"
 msgstr ""
 
@@ -818,15 +818,15 @@ msgstr "Veröffentliche Abbild"
 msgid "Make the image public"
 msgstr "Veröffentliche Abbild"
 
-#: lxc/info.go:190
+#: lxc/info.go:193
 msgid "Memory (current)"
 msgstr ""
 
-#: lxc/info.go:194
+#: lxc/info.go:197
 msgid "Memory (peak)"
 msgstr ""
 
-#: lxc/info.go:206
+#: lxc/info.go:209
 msgid "Memory usage:"
 msgstr ""
 
@@ -886,11 +886,11 @@ msgstr "Profil %s gelöscht\n"
 msgid "Network %s renamed to %s"
 msgstr "Profil %s erstellt\n"
 
-#: lxc/init.go:143 lxc/init.go:144
+#: lxc/init.go:144 lxc/init.go:145
 msgid "Network name"
 msgstr ""
 
-#: lxc/info.go:223
+#: lxc/info.go:226
 #, fuzzy
 msgid "Network usage:"
 msgstr "Profil %s erstellt\n"
@@ -923,6 +923,15 @@ msgstr "Kein Fingerabdruck angegeben."
 msgid "Node %s removed"
 msgstr "Gerät %s wurde von %s entfernt\n"
 
+#: lxc/init.go:149
+msgid "Node name"
+msgstr ""
+
+#: lxc/info.go:119
+#, fuzzy, c-format
+msgid "Node: %s"
+msgstr "automatisches Update: %s"
+
 #: lxc/storage.go:355 lxc/storage.go:448
 msgid "Only \"custom\" volumes can be attached to containers."
 msgstr ""
@@ -972,11 +981,11 @@ msgstr ""
 msgid "PUBLIC"
 msgstr ""
 
-#: lxc/info.go:217
+#: lxc/info.go:220
 msgid "Packets received"
 msgstr ""
 
-#: lxc/info.go:218
+#: lxc/info.go:221
 msgid "Packets sent"
 msgstr ""
 
@@ -999,7 +1008,7 @@ msgstr "kann nicht zum selben Container Namen kopieren"
 msgid "Permission denied, are you in the lxd group?"
 msgstr ""
 
-#: lxc/info.go:135
+#: lxc/info.go:138
 #, c-format
 msgid "Pid: %d"
 msgstr ""
@@ -1029,7 +1038,7 @@ msgstr ""
 msgid "Print verbose information"
 msgstr ""
 
-#: lxc/info.go:159
+#: lxc/info.go:162
 #, fuzzy, c-format
 msgid "Processes: %d"
 msgstr "Profil %s erstellt\n"
@@ -1059,7 +1068,7 @@ msgstr "Gerät %s wurde von %s entfernt\n"
 msgid "Profile %s renamed to %s"
 msgstr "Profil %s wurde auf %s angewandt\n"
 
-#: lxc/copy.go:37 lxc/copy.go:38 lxc/init.go:139 lxc/init.go:140
+#: lxc/copy.go:37 lxc/copy.go:38 lxc/init.go:140 lxc/init.go:141
 #, fuzzy
 msgid "Profile to apply to the new container"
 msgstr "kann nicht zum selben Container Namen kopieren"
@@ -1069,7 +1078,7 @@ msgstr "kann nicht zum selben Container Namen kopieren"
 msgid "Profiles %s applied to %s"
 msgstr "Profil %s wurde auf %s angewandt\n"
 
-#: lxc/info.go:133
+#: lxc/info.go:136
 #, fuzzy, c-format
 msgid "Profiles: %s"
 msgstr "Profil %s erstellt\n"
@@ -1106,7 +1115,7 @@ msgstr "Entferntes Administrator Passwort"
 msgid "Remote operation canceled by user"
 msgstr "Server Zertifikat vom Benutzer nicht akzeptiert"
 
-#: lxc/info.go:119
+#: lxc/info.go:122
 #, c-format
 msgid "Remote: %s"
 msgstr ""
@@ -1125,7 +1134,7 @@ msgstr ""
 msgid "Require user confirmation"
 msgstr ""
 
-#: lxc/info.go:156
+#: lxc/info.go:159
 msgid "Resources:"
 msgstr ""
 
@@ -1139,7 +1148,7 @@ msgstr "kann nicht zum selben Container Namen kopieren"
 msgid "Retrieve the container's console log"
 msgstr "Herunterfahren des Containers erzwingen."
 
-#: lxc/init.go:289
+#: lxc/init.go:292
 #, c-format
 msgid "Retrieving image: %s"
 msgstr ""
@@ -1231,7 +1240,7 @@ msgstr ""
 msgid "Size: %.2fMB"
 msgstr "Größe: %.2vMB\n"
 
-#: lxc/info.go:237
+#: lxc/info.go:240
 msgid "Snapshots:"
 msgstr ""
 
@@ -1254,7 +1263,7 @@ msgstr "kann nicht zum selben Container Namen kopieren"
 msgid "Starting %s"
 msgstr ""
 
-#: lxc/info.go:127
+#: lxc/info.go:130
 #, c-format
 msgid "Status: %s"
 msgstr ""
@@ -1287,7 +1296,7 @@ msgstr "Profil %s erstellt\n"
 msgid "Storage pool %s deleted"
 msgstr "Profil %s gelöscht\n"
 
-#: lxc/init.go:145 lxc/init.go:146
+#: lxc/init.go:146 lxc/init.go:147
 #, fuzzy
 msgid "Storage pool name"
 msgstr "Profilname kann nicht geändert werden"
@@ -1307,11 +1316,11 @@ msgstr "Profil %s gelöscht\n"
 msgid "Store the container state (only for stop)"
 msgstr "Herunterfahren des Containers erzwingen."
 
-#: lxc/info.go:198
+#: lxc/info.go:201
 msgid "Swap (current)"
 msgstr ""
 
-#: lxc/info.go:202
+#: lxc/info.go:205
 msgid "Swap (peak)"
 msgstr ""
 
@@ -1329,7 +1338,7 @@ msgid ""
 "restarted."
 msgstr ""
 
-#: lxc/init.go:362
+#: lxc/init.go:365
 msgid "The container you are starting doesn't have any network attached to it."
 msgstr ""
 
@@ -1344,7 +1353,7 @@ msgstr "entfernte Instanz %s existiert bereits"
 msgid "The device doesn't exist"
 msgstr "entfernte Instanz %s existiert nicht"
 
-#: lxc/init.go:346
+#: lxc/init.go:349
 #, c-format
 msgid "The local image '%s' couldn't be found, trying '%s:' instead."
 msgstr ""
@@ -1385,11 +1394,11 @@ msgstr "Wartezeit bevor der Container gestoppt wird."
 msgid "Timestamps:"
 msgstr "Zeitstempel:\n"
 
-#: lxc/init.go:364
+#: lxc/init.go:367
 msgid "To attach a network to a container, use: lxc network attach"
 msgstr ""
 
-#: lxc/init.go:363
+#: lxc/init.go:366
 msgid "To create a new network, use: lxc network create"
 msgstr ""
 
@@ -1420,11 +1429,11 @@ msgstr ""
 msgid "Try `lxc info --show-log %s` for more info"
 msgstr ""
 
-#: lxc/info.go:129
+#: lxc/info.go:132
 msgid "Type: ephemeral"
 msgstr ""
 
-#: lxc/info.go:131
+#: lxc/info.go:134
 msgid "Type: persistent"
 msgstr ""
 
@@ -1859,12 +1868,13 @@ msgid ""
 "    For LXD server information."
 msgstr ""
 
-#: lxc/init.go:77
+#: lxc/init.go:78
 #, fuzzy
 msgid ""
 "Usage: lxc init [<remote>:]<image> [<remote>:][<name>] [--ephemeral|-e] [--"
 "profile|-p <profile>...] [--config|-c <key=value>...] [--network|-n "
-"<network>] [--storage|-s <pool>] [--type|-t <instance type>]\n"
+"<network>] [--storage|-s <pool>] [--type|-t <instance type>] [--target "
+"<node>]\n"
 "\n"
 "Create containers from images.\n"
 "\n"
@@ -2536,7 +2546,7 @@ msgstr ""
 msgid "default"
 msgstr ""
 
-#: lxc/init.go:311
+#: lxc/init.go:314
 msgid "didn't get any affected image, container or snapshot from server"
 msgstr ""
 
@@ -2596,15 +2606,15 @@ msgstr "entfernte Instanz %s existiert als <%s>"
 msgid "remote %s is static and cannot be modified"
 msgstr ""
 
-#: lxc/info.go:248
+#: lxc/info.go:251
 msgid "stateful"
 msgstr ""
 
-#: lxc/info.go:250
+#: lxc/info.go:253
 msgid "stateless"
 msgstr ""
 
-#: lxc/info.go:244
+#: lxc/info.go:247
 #, c-format
 msgid "taken at %s"
 msgstr ""
diff --git a/po/el.po b/po/el.po
index 151cc99c6..4282e98fd 100644
--- a/po/el.po
+++ b/po/el.po
@@ -211,7 +211,7 @@ msgstr ""
 msgid "Aliases:"
 msgstr ""
 
-#: lxc/image.go:600 lxc/info.go:122
+#: lxc/image.go:600 lxc/info.go:125
 #, c-format
 msgid "Architecture: %s"
 msgstr ""
@@ -231,11 +231,11 @@ msgstr ""
 msgid "Bad property: %s"
 msgstr ""
 
-#: lxc/info.go:215
+#: lxc/info.go:218
 msgid "Bytes received"
 msgstr ""
 
-#: lxc/info.go:216
+#: lxc/info.go:219
 msgid "Bytes sent"
 msgstr ""
 
@@ -247,11 +247,11 @@ msgstr ""
 msgid "COMMON NAME"
 msgstr ""
 
-#: lxc/info.go:179
+#: lxc/info.go:182
 msgid "CPU usage (in seconds)"
 msgstr ""
 
-#: lxc/info.go:183
+#: lxc/info.go:186
 #, fuzzy
 msgid "CPU usage:"
 msgstr "  Χρήση CPU:"
@@ -313,7 +313,7 @@ msgstr ""
 msgid "Commands:"
 msgstr ""
 
-#: lxc/copy.go:35 lxc/copy.go:36 lxc/init.go:137 lxc/init.go:138
+#: lxc/copy.go:35 lxc/copy.go:36 lxc/init.go:138 lxc/init.go:139
 msgid "Config key/value to apply to the new container"
 msgstr ""
 
@@ -335,7 +335,7 @@ msgstr ""
 msgid "Container name is mandatory"
 msgstr ""
 
-#: lxc/copy.go:229 lxc/init.go:317
+#: lxc/copy.go:229 lxc/init.go:320
 #, c-format
 msgid "Container name is: %s"
 msgstr ""
@@ -370,17 +370,17 @@ msgstr ""
 msgid "Create any directories necessary"
 msgstr ""
 
-#: lxc/image.go:605 lxc/info.go:124
+#: lxc/image.go:605 lxc/info.go:127
 #, c-format
 msgid "Created: %s"
 msgstr ""
 
-#: lxc/init.go:196
+#: lxc/init.go:199
 #, c-format
 msgid "Creating %s"
 msgstr ""
 
-#: lxc/init.go:194
+#: lxc/init.go:197
 msgid "Creating the container"
 msgstr ""
 
@@ -428,7 +428,7 @@ msgstr ""
 msgid "Disable stdin (reads from /dev/null)"
 msgstr ""
 
-#: lxc/info.go:172
+#: lxc/info.go:175
 #, fuzzy
 msgid "Disk usage:"
 msgstr "  Χρήση CPU:"
@@ -457,7 +457,7 @@ msgstr ""
 msgid "Environment:"
 msgstr ""
 
-#: lxc/copy.go:39 lxc/copy.go:40 lxc/init.go:141 lxc/init.go:142
+#: lxc/copy.go:39 lxc/copy.go:40 lxc/init.go:142 lxc/init.go:143
 msgid "Ephemeral container"
 msgstr ""
 
@@ -615,7 +615,7 @@ msgstr ""
 msgid "Input data"
 msgstr ""
 
-#: lxc/init.go:147
+#: lxc/init.go:148
 msgid "Instance type"
 msgstr ""
 
@@ -652,7 +652,7 @@ msgstr ""
 msgid "Invalid target %s"
 msgstr ""
 
-#: lxc/info.go:153
+#: lxc/info.go:156
 msgid "Ips:"
 msgstr ""
 
@@ -677,7 +677,7 @@ msgstr ""
 msgid "Last used: never"
 msgstr ""
 
-#: lxc/info.go:268
+#: lxc/info.go:271
 msgid "Log:"
 msgstr ""
 
@@ -693,15 +693,15 @@ msgstr ""
 msgid "Make the image public"
 msgstr ""
 
-#: lxc/info.go:190
+#: lxc/info.go:193
 msgid "Memory (current)"
 msgstr ""
 
-#: lxc/info.go:194
+#: lxc/info.go:197
 msgid "Memory (peak)"
 msgstr ""
 
-#: lxc/info.go:206
+#: lxc/info.go:209
 #, fuzzy
 msgid "Memory usage:"
 msgstr "  Χρήση μνήμης:"
@@ -759,11 +759,11 @@ msgstr ""
 msgid "Network %s renamed to %s"
 msgstr ""
 
-#: lxc/init.go:143 lxc/init.go:144
+#: lxc/init.go:144 lxc/init.go:145
 msgid "Network name"
 msgstr ""
 
-#: lxc/info.go:223
+#: lxc/info.go:226
 #, fuzzy
 msgid "Network usage:"
 msgstr "  Χρήση δικτύου:"
@@ -793,6 +793,15 @@ msgstr ""
 msgid "Node %s removed"
 msgstr ""
 
+#: lxc/init.go:149
+msgid "Node name"
+msgstr ""
+
+#: lxc/info.go:119
+#, c-format
+msgid "Node: %s"
+msgstr ""
+
 #: lxc/storage.go:355 lxc/storage.go:448
 msgid "Only \"custom\" volumes can be attached to containers."
 msgstr ""
@@ -842,11 +851,11 @@ msgstr ""
 msgid "PUBLIC"
 msgstr ""
 
-#: lxc/info.go:217
+#: lxc/info.go:220
 msgid "Packets received"
 msgstr ""
 
-#: lxc/info.go:218
+#: lxc/info.go:221
 msgid "Packets sent"
 msgstr ""
 
@@ -866,7 +875,7 @@ msgstr ""
 msgid "Permission denied, are you in the lxd group?"
 msgstr ""
 
-#: lxc/info.go:135
+#: lxc/info.go:138
 #, c-format
 msgid "Pid: %d"
 msgstr ""
@@ -896,7 +905,7 @@ msgstr ""
 msgid "Print verbose information"
 msgstr ""
 
-#: lxc/info.go:159
+#: lxc/info.go:162
 #, c-format
 msgid "Processes: %d"
 msgstr ""
@@ -926,7 +935,7 @@ msgstr ""
 msgid "Profile %s renamed to %s"
 msgstr ""
 
-#: lxc/copy.go:37 lxc/copy.go:38 lxc/init.go:139 lxc/init.go:140
+#: lxc/copy.go:37 lxc/copy.go:38 lxc/init.go:140 lxc/init.go:141
 msgid "Profile to apply to the new container"
 msgstr ""
 
@@ -935,7 +944,7 @@ msgstr ""
 msgid "Profiles %s applied to %s"
 msgstr ""
 
-#: lxc/info.go:133
+#: lxc/info.go:136
 #, c-format
 msgid "Profiles: %s"
 msgstr ""
@@ -970,7 +979,7 @@ msgstr ""
 msgid "Remote operation canceled by user"
 msgstr ""
 
-#: lxc/info.go:119
+#: lxc/info.go:122
 #, c-format
 msgid "Remote: %s"
 msgstr ""
@@ -989,7 +998,7 @@ msgstr ""
 msgid "Require user confirmation"
 msgstr ""
 
-#: lxc/info.go:156
+#: lxc/info.go:159
 msgid "Resources:"
 msgstr ""
 
@@ -1001,7 +1010,7 @@ msgstr ""
 msgid "Retrieve the container's console log"
 msgstr ""
 
-#: lxc/init.go:289
+#: lxc/init.go:292
 #, c-format
 msgid "Retrieving image: %s"
 msgstr ""
@@ -1091,7 +1100,7 @@ msgstr ""
 msgid "Size: %.2fMB"
 msgstr ""
 
-#: lxc/info.go:237
+#: lxc/info.go:240
 msgid "Snapshots:"
 msgstr ""
 
@@ -1113,7 +1122,7 @@ msgstr ""
 msgid "Starting %s"
 msgstr ""
 
-#: lxc/info.go:127
+#: lxc/info.go:130
 #, c-format
 msgid "Status: %s"
 msgstr ""
@@ -1145,7 +1154,7 @@ msgstr ""
 msgid "Storage pool %s deleted"
 msgstr ""
 
-#: lxc/init.go:145 lxc/init.go:146
+#: lxc/init.go:146 lxc/init.go:147
 msgid "Storage pool name"
 msgstr ""
 
@@ -1163,11 +1172,11 @@ msgstr ""
 msgid "Store the container state (only for stop)"
 msgstr ""
 
-#: lxc/info.go:198
+#: lxc/info.go:201
 msgid "Swap (current)"
 msgstr ""
 
-#: lxc/info.go:202
+#: lxc/info.go:205
 msgid "Swap (peak)"
 msgstr ""
 
@@ -1185,7 +1194,7 @@ msgid ""
 "restarted."
 msgstr ""
 
-#: lxc/init.go:362
+#: lxc/init.go:365
 msgid "The container you are starting doesn't have any network attached to it."
 msgstr ""
 
@@ -1198,7 +1207,7 @@ msgstr ""
 msgid "The device doesn't exist"
 msgstr ""
 
-#: lxc/init.go:346
+#: lxc/init.go:349
 #, c-format
 msgid "The local image '%s' couldn't be found, trying '%s:' instead."
 msgstr ""
@@ -1235,11 +1244,11 @@ msgstr ""
 msgid "Timestamps:"
 msgstr ""
 
-#: lxc/init.go:364
+#: lxc/init.go:367
 msgid "To attach a network to a container, use: lxc network attach"
 msgstr ""
 
-#: lxc/init.go:363
+#: lxc/init.go:366
 msgid "To create a new network, use: lxc network create"
 msgstr ""
 
@@ -1270,11 +1279,11 @@ msgstr ""
 msgid "Try `lxc info --show-log %s` for more info"
 msgstr ""
 
-#: lxc/info.go:129
+#: lxc/info.go:132
 msgid "Type: ephemeral"
 msgstr ""
 
-#: lxc/info.go:131
+#: lxc/info.go:134
 msgid "Type: persistent"
 msgstr ""
 
@@ -1648,11 +1657,12 @@ msgid ""
 "    For LXD server information."
 msgstr ""
 
-#: lxc/init.go:77
+#: lxc/init.go:78
 msgid ""
 "Usage: lxc init [<remote>:]<image> [<remote>:][<name>] [--ephemeral|-e] [--"
 "profile|-p <profile>...] [--config|-c <key=value>...] [--network|-n "
-"<network>] [--storage|-s <pool>] [--type|-t <instance type>]\n"
+"<network>] [--storage|-s <pool>] [--type|-t <instance type>] [--target "
+"<node>]\n"
 "\n"
 "Create containers from images.\n"
 "\n"
@@ -2218,7 +2228,7 @@ msgstr ""
 msgid "default"
 msgstr ""
 
-#: lxc/init.go:311
+#: lxc/init.go:314
 msgid "didn't get any affected image, container or snapshot from server"
 msgstr ""
 
@@ -2277,15 +2287,15 @@ msgstr ""
 msgid "remote %s is static and cannot be modified"
 msgstr ""
 
-#: lxc/info.go:248
+#: lxc/info.go:251
 msgid "stateful"
 msgstr ""
 
-#: lxc/info.go:250
+#: lxc/info.go:253
 msgid "stateless"
 msgstr ""
 
-#: lxc/info.go:244
+#: lxc/info.go:247
 #, c-format
 msgid "taken at %s"
 msgstr ""
diff --git a/po/fr.po b/po/fr.po
index cab670c0a..eca915eed 100644
--- a/po/fr.po
+++ b/po/fr.po
@@ -308,7 +308,7 @@ msgstr "Mot de passe administrateur pour %s : "
 msgid "Aliases:"
 msgstr "Alias :"
 
-#: lxc/image.go:600 lxc/info.go:122
+#: lxc/image.go:600 lxc/info.go:125
 #, c-format
 msgid "Architecture: %s"
 msgstr "Architecture : %s"
@@ -328,11 +328,11 @@ msgstr "Mise à jour auto. : %s"
 msgid "Bad property: %s"
 msgstr ""
 
-#: lxc/info.go:215
+#: lxc/info.go:218
 msgid "Bytes received"
 msgstr "Octets reçus"
 
-#: lxc/info.go:216
+#: lxc/info.go:219
 msgid "Bytes sent"
 msgstr "Octets émis"
 
@@ -344,11 +344,11 @@ msgstr ""
 msgid "COMMON NAME"
 msgstr "COMMON NAME"
 
-#: lxc/info.go:179
+#: lxc/info.go:182
 msgid "CPU usage (in seconds)"
 msgstr "CPU utilisé (en secondes)"
 
-#: lxc/info.go:183
+#: lxc/info.go:186
 msgid "CPU usage:"
 msgstr "CPU utilisé :"
 
@@ -413,7 +413,7 @@ msgstr "Colonnes"
 msgid "Commands:"
 msgstr "Commandes :"
 
-#: lxc/copy.go:35 lxc/copy.go:36 lxc/init.go:137 lxc/init.go:138
+#: lxc/copy.go:35 lxc/copy.go:36 lxc/init.go:138 lxc/init.go:139
 msgid "Config key/value to apply to the new container"
 msgstr "Clé/valeur de configuration à appliquer au nouveau conteneur"
 
@@ -435,7 +435,7 @@ msgstr ""
 msgid "Container name is mandatory"
 msgstr "Le nom du conteneur est obligatoire"
 
-#: lxc/copy.go:229 lxc/init.go:317
+#: lxc/copy.go:229 lxc/init.go:320
 #, c-format
 msgid "Container name is: %s"
 msgstr "Le nom du conteneur est : %s"
@@ -471,17 +471,17 @@ msgstr "Impossible de créer le dossier de stockage des certificats serveurs"
 msgid "Create any directories necessary"
 msgstr "Créer tous répertoires nécessaires"
 
-#: lxc/image.go:605 lxc/info.go:124
+#: lxc/image.go:605 lxc/info.go:127
 #, c-format
 msgid "Created: %s"
 msgstr "Créé : %s"
 
-#: lxc/init.go:196
+#: lxc/init.go:199
 #, c-format
 msgid "Creating %s"
 msgstr "Création de %s"
 
-#: lxc/init.go:194
+#: lxc/init.go:197
 msgid "Creating the container"
 msgstr "Création du conteneur"
 
@@ -529,7 +529,7 @@ msgstr "Désactiver l'allocation pseudo-terminal"
 msgid "Disable stdin (reads from /dev/null)"
 msgstr "Désactiver stdin (lecture à partir de /dev/null)"
 
-#: lxc/info.go:172
+#: lxc/info.go:175
 #, fuzzy
 msgid "Disk usage:"
 msgstr "  Disque utilisé :"
@@ -558,7 +558,7 @@ msgstr "Variable d'environnement (de la forme HOME=/home/foo) à positionner"
 msgid "Environment:"
 msgstr "Environnement :"
 
-#: lxc/copy.go:39 lxc/copy.go:40 lxc/init.go:141 lxc/init.go:142
+#: lxc/copy.go:39 lxc/copy.go:40 lxc/init.go:142 lxc/init.go:143
 msgid "Ephemeral container"
 msgstr "Conteneur éphémère"
 
@@ -726,7 +726,7 @@ msgstr "Image copiée avec succès !"
 msgid "Input data"
 msgstr ""
 
-#: lxc/init.go:147
+#: lxc/init.go:148
 msgid "Instance type"
 msgstr ""
 
@@ -763,7 +763,7 @@ msgstr "Source invalide %s"
 msgid "Invalid target %s"
 msgstr "Cible invalide %s"
 
-#: lxc/info.go:153
+#: lxc/info.go:156
 msgid "Ips:"
 msgstr "IPs :"
 
@@ -788,7 +788,7 @@ msgstr "Dernière utilisation : %s"
 msgid "Last used: never"
 msgstr "Dernière utilisation : jamais"
 
-#: lxc/info.go:268
+#: lxc/info.go:271
 msgid "Log:"
 msgstr "Journal : "
 
@@ -804,15 +804,15 @@ msgstr "Rendre l'image publique"
 msgid "Make the image public"
 msgstr "Rendre l'image publique"
 
-#: lxc/info.go:190
+#: lxc/info.go:193
 msgid "Memory (current)"
 msgstr "Mémoire (courante)"
 
-#: lxc/info.go:194
+#: lxc/info.go:197
 msgid "Memory (peak)"
 msgstr "Mémoire (pointe)"
 
-#: lxc/info.go:206
+#: lxc/info.go:209
 #, fuzzy
 msgid "Memory usage:"
 msgstr "  Mémoire utilisée :"
@@ -872,11 +872,11 @@ msgstr "Le réseau %s a été supprimé"
 msgid "Network %s renamed to %s"
 msgstr "Le réseau %s a été créé"
 
-#: lxc/init.go:143 lxc/init.go:144
+#: lxc/init.go:144 lxc/init.go:145
 msgid "Network name"
 msgstr "Nom du réseau"
 
-#: lxc/info.go:223
+#: lxc/info.go:226
 #, fuzzy
 msgid "Network usage:"
 msgstr "  Réseau utilisé :"
@@ -907,6 +907,16 @@ msgstr "Aucune empreinte n'a été indiquée."
 msgid "Node %s removed"
 msgstr "Profil %s supprimé de %s"
 
+#: lxc/init.go:149
+#, fuzzy
+msgid "Node name"
+msgstr "Nom du réseau"
+
+#: lxc/info.go:119
+#, fuzzy, c-format
+msgid "Node: %s"
+msgstr "Nom : %s"
+
 #: lxc/storage.go:355 lxc/storage.go:448
 msgid "Only \"custom\" volumes can be attached to containers."
 msgstr "Seul les volumes \"personnalisés\" peuvent être attaché aux conteneurs"
@@ -956,11 +966,11 @@ msgstr "PROTOCOLE"
 msgid "PUBLIC"
 msgstr "PUBLIC"
 
-#: lxc/info.go:217
+#: lxc/info.go:220
 msgid "Packets received"
 msgstr "Paquets reçus"
 
-#: lxc/info.go:218
+#: lxc/info.go:221
 msgid "Packets sent"
 msgstr "Paquets émis"
 
@@ -981,7 +991,7 @@ msgstr "Création du conteneur"
 msgid "Permission denied, are you in the lxd group?"
 msgstr "Permission refusée, êtes-vous dans le groupe lxd ?"
 
-#: lxc/info.go:135
+#: lxc/info.go:138
 #, c-format
 msgid "Pid: %d"
 msgstr "Pid : %d"
@@ -1011,7 +1021,7 @@ msgstr ""
 msgid "Print verbose information"
 msgstr "Afficher des informations supplémentaires"
 
-#: lxc/info.go:159
+#: lxc/info.go:162
 #, c-format
 msgid "Processes: %d"
 msgstr "Processus : %d"
@@ -1041,7 +1051,7 @@ msgstr "Profil %s supprimé de %s"
 msgid "Profile %s renamed to %s"
 msgstr "Profil %s ajouté à %s"
 
-#: lxc/copy.go:37 lxc/copy.go:38 lxc/init.go:139 lxc/init.go:140
+#: lxc/copy.go:37 lxc/copy.go:38 lxc/init.go:140 lxc/init.go:141
 msgid "Profile to apply to the new container"
 msgstr "Profil à appliquer au nouveau conteneur"
 
@@ -1050,7 +1060,7 @@ msgstr "Profil à appliquer au nouveau conteneur"
 msgid "Profiles %s applied to %s"
 msgstr "Profils %s appliqués à %s"
 
-#: lxc/info.go:133
+#: lxc/info.go:136
 #, c-format
 msgid "Profiles: %s"
 msgstr "Profils : %s"
@@ -1086,7 +1096,7 @@ msgstr "Mot de passe de l'administrateur distant"
 msgid "Remote operation canceled by user"
 msgstr "Certificat serveur rejeté par l'utilisateur"
 
-#: lxc/info.go:119
+#: lxc/info.go:122
 #, c-format
 msgid "Remote: %s"
 msgstr "Serveur distant : %s"
@@ -1105,7 +1115,7 @@ msgstr ""
 msgid "Require user confirmation"
 msgstr "Requérir une confirmation de l'utilisateur"
 
-#: lxc/info.go:156
+#: lxc/info.go:159
 msgid "Resources:"
 msgstr "Ressources :"
 
@@ -1119,7 +1129,7 @@ msgstr "Création du conteneur"
 msgid "Retrieve the container's console log"
 msgstr "Forcer l'arrêt du conteneur (seulement pour stop)"
 
-#: lxc/init.go:289
+#: lxc/init.go:292
 #, c-format
 msgid "Retrieving image: %s"
 msgstr "Récupération de l'image : %s"
@@ -1212,7 +1222,7 @@ msgstr ""
 msgid "Size: %.2fMB"
 msgstr "Taille : %.2f Mo"
 
-#: lxc/info.go:237
+#: lxc/info.go:240
 msgid "Snapshots:"
 msgstr "Instantanés :"
 
@@ -1235,7 +1245,7 @@ msgstr "Création du conteneur"
 msgid "Starting %s"
 msgstr "Démarrage de %s"
 
-#: lxc/info.go:127
+#: lxc/info.go:130
 #, c-format
 msgid "Status: %s"
 msgstr "État : %s"
@@ -1268,7 +1278,7 @@ msgstr "Le réseau %s a été créé"
 msgid "Storage pool %s deleted"
 msgstr "Le réseau %s a été supprimé"
 
-#: lxc/init.go:145 lxc/init.go:146
+#: lxc/init.go:146 lxc/init.go:147
 msgid "Storage pool name"
 msgstr "Nom de l'ensemble de stockage"
 
@@ -1286,11 +1296,11 @@ msgstr "Profil %s supprimé"
 msgid "Store the container state (only for stop)"
 msgstr "Forcer l'arrêt du conteneur (seulement pour stop)"
 
-#: lxc/info.go:198
+#: lxc/info.go:201
 msgid "Swap (current)"
 msgstr "Swap (courant)"
 
-#: lxc/info.go:202
+#: lxc/info.go:205
 msgid "Swap (peak)"
 msgstr "Swap (pointe)"
 
@@ -1311,7 +1321,7 @@ msgstr ""
 "Le conteneur est en cours d'exécution. Utiliser --force pour qu'il soit "
 "arrêté et redémarré."
 
-#: lxc/init.go:362
+#: lxc/init.go:365
 msgid "The container you are starting doesn't have any network attached to it."
 msgstr ""
 "Le conteneur que vous démarrez n'est attaché à aucune interface réseau."
@@ -1326,7 +1336,7 @@ msgstr "Le périphérique n'existe pas"
 msgid "The device doesn't exist"
 msgstr "Le périphérique n'existe pas"
 
-#: lxc/init.go:346
+#: lxc/init.go:349
 #, c-format
 msgid "The local image '%s' couldn't be found, trying '%s:' instead."
 msgstr "L'image locale '%s' n'a pas été trouvée, essayer '%s:' à la place."
@@ -1369,11 +1379,11 @@ msgstr "Temps d'attente du conteneur avant de le tuer"
 msgid "Timestamps:"
 msgstr "Horodatage :"
 
-#: lxc/init.go:364
+#: lxc/init.go:367
 msgid "To attach a network to a container, use: lxc network attach"
 msgstr "Pour attacher un réseau à un conteneur, utiliser : lxc network attach"
 
-#: lxc/init.go:363
+#: lxc/init.go:366
 msgid "To create a new network, use: lxc network create"
 msgstr "Pour créer un réseau, utiliser : lxc network create"
 
@@ -1405,11 +1415,11 @@ msgstr "Transfert de l'image : %s"
 msgid "Try `lxc info --show-log %s` for more info"
 msgstr "Essayer `lxc info --show-log %s` pour plus d'informations"
 
-#: lxc/info.go:129
+#: lxc/info.go:132
 msgid "Type: ephemeral"
 msgstr "Type : éphémère"
 
-#: lxc/info.go:131
+#: lxc/info.go:134
 msgid "Type: persistent"
 msgstr "Type : persistant"
 
@@ -1977,12 +1987,13 @@ msgstr ""
 "lxc info [<serveur distant>:]\n"
 "    Pour l'information du serveur LXD."
 
-#: lxc/init.go:77
+#: lxc/init.go:78
 #, fuzzy
 msgid ""
 "Usage: lxc init [<remote>:]<image> [<remote>:][<name>] [--ephemeral|-e] [--"
 "profile|-p <profile>...] [--config|-c <key=value>...] [--network|-n "
-"<network>] [--storage|-s <pool>] [--type|-t <instance type>]\n"
+"<network>] [--storage|-s <pool>] [--type|-t <instance type>] [--target "
+"<node>]\n"
 "\n"
 "Create containers from images.\n"
 "\n"
@@ -2820,7 +2831,7 @@ msgstr "impossible de spécifier uid/gid/mode en mode récursif"
 msgid "default"
 msgstr "par défaut"
 
-#: lxc/init.go:311
+#: lxc/init.go:314
 msgid "didn't get any affected image, container or snapshot from server"
 msgstr "pas d'image, conteneur ou instantané affecté sur ce serveur"
 
@@ -2879,15 +2890,15 @@ msgstr "le serveur distant %s existe en tant que <%s>"
 msgid "remote %s is static and cannot be modified"
 msgstr "le serveur distant %s est statique et ne peut être modifié"
 
-#: lxc/info.go:248
+#: lxc/info.go:251
 msgid "stateful"
 msgstr "à suivi d'état"
 
-#: lxc/info.go:250
+#: lxc/info.go:253
 msgid "stateless"
 msgstr "sans suivi d'état"
 
-#: lxc/info.go:244
+#: lxc/info.go:247
 #, c-format
 msgid "taken at %s"
 msgstr "pris à %s"
diff --git a/po/id.po b/po/id.po
index a32bded7e..47bd4546a 100644
--- a/po/id.po
+++ b/po/id.po
@@ -208,7 +208,7 @@ msgstr ""
 msgid "Aliases:"
 msgstr ""
 
-#: lxc/image.go:600 lxc/info.go:122
+#: lxc/image.go:600 lxc/info.go:125
 #, c-format
 msgid "Architecture: %s"
 msgstr ""
@@ -228,11 +228,11 @@ msgstr ""
 msgid "Bad property: %s"
 msgstr ""
 
-#: lxc/info.go:215
+#: lxc/info.go:218
 msgid "Bytes received"
 msgstr ""
 
-#: lxc/info.go:216
+#: lxc/info.go:219
 msgid "Bytes sent"
 msgstr ""
 
@@ -244,11 +244,11 @@ msgstr ""
 msgid "COMMON NAME"
 msgstr ""
 
-#: lxc/info.go:179
+#: lxc/info.go:182
 msgid "CPU usage (in seconds)"
 msgstr ""
 
-#: lxc/info.go:183
+#: lxc/info.go:186
 msgid "CPU usage:"
 msgstr ""
 
@@ -309,7 +309,7 @@ msgstr ""
 msgid "Commands:"
 msgstr ""
 
-#: lxc/copy.go:35 lxc/copy.go:36 lxc/init.go:137 lxc/init.go:138
+#: lxc/copy.go:35 lxc/copy.go:36 lxc/init.go:138 lxc/init.go:139
 msgid "Config key/value to apply to the new container"
 msgstr ""
 
@@ -331,7 +331,7 @@ msgstr ""
 msgid "Container name is mandatory"
 msgstr ""
 
-#: lxc/copy.go:229 lxc/init.go:317
+#: lxc/copy.go:229 lxc/init.go:320
 #, c-format
 msgid "Container name is: %s"
 msgstr ""
@@ -366,17 +366,17 @@ msgstr ""
 msgid "Create any directories necessary"
 msgstr ""
 
-#: lxc/image.go:605 lxc/info.go:124
+#: lxc/image.go:605 lxc/info.go:127
 #, c-format
 msgid "Created: %s"
 msgstr ""
 
-#: lxc/init.go:196
+#: lxc/init.go:199
 #, c-format
 msgid "Creating %s"
 msgstr ""
 
-#: lxc/init.go:194
+#: lxc/init.go:197
 msgid "Creating the container"
 msgstr ""
 
@@ -424,7 +424,7 @@ msgstr ""
 msgid "Disable stdin (reads from /dev/null)"
 msgstr ""
 
-#: lxc/info.go:172
+#: lxc/info.go:175
 msgid "Disk usage:"
 msgstr ""
 
@@ -452,7 +452,7 @@ msgstr ""
 msgid "Environment:"
 msgstr ""
 
-#: lxc/copy.go:39 lxc/copy.go:40 lxc/init.go:141 lxc/init.go:142
+#: lxc/copy.go:39 lxc/copy.go:40 lxc/init.go:142 lxc/init.go:143
 msgid "Ephemeral container"
 msgstr ""
 
@@ -610,7 +610,7 @@ msgstr ""
 msgid "Input data"
 msgstr ""
 
-#: lxc/init.go:147
+#: lxc/init.go:148
 msgid "Instance type"
 msgstr ""
 
@@ -647,7 +647,7 @@ msgstr ""
 msgid "Invalid target %s"
 msgstr ""
 
-#: lxc/info.go:153
+#: lxc/info.go:156
 msgid "Ips:"
 msgstr ""
 
@@ -672,7 +672,7 @@ msgstr ""
 msgid "Last used: never"
 msgstr ""
 
-#: lxc/info.go:268
+#: lxc/info.go:271
 msgid "Log:"
 msgstr ""
 
@@ -688,15 +688,15 @@ msgstr ""
 msgid "Make the image public"
 msgstr ""
 
-#: lxc/info.go:190
+#: lxc/info.go:193
 msgid "Memory (current)"
 msgstr ""
 
-#: lxc/info.go:194
+#: lxc/info.go:197
 msgid "Memory (peak)"
 msgstr ""
 
-#: lxc/info.go:206
+#: lxc/info.go:209
 msgid "Memory usage:"
 msgstr ""
 
@@ -753,11 +753,11 @@ msgstr ""
 msgid "Network %s renamed to %s"
 msgstr ""
 
-#: lxc/init.go:143 lxc/init.go:144
+#: lxc/init.go:144 lxc/init.go:145
 msgid "Network name"
 msgstr ""
 
-#: lxc/info.go:223
+#: lxc/info.go:226
 msgid "Network usage:"
 msgstr ""
 
@@ -786,6 +786,15 @@ msgstr ""
 msgid "Node %s removed"
 msgstr ""
 
+#: lxc/init.go:149
+msgid "Node name"
+msgstr ""
+
+#: lxc/info.go:119
+#, c-format
+msgid "Node: %s"
+msgstr ""
+
 #: lxc/storage.go:355 lxc/storage.go:448
 msgid "Only \"custom\" volumes can be attached to containers."
 msgstr ""
@@ -835,11 +844,11 @@ msgstr ""
 msgid "PUBLIC"
 msgstr ""
 
-#: lxc/info.go:217
+#: lxc/info.go:220
 msgid "Packets received"
 msgstr ""
 
-#: lxc/info.go:218
+#: lxc/info.go:221
 msgid "Packets sent"
 msgstr ""
 
@@ -859,7 +868,7 @@ msgstr ""
 msgid "Permission denied, are you in the lxd group?"
 msgstr ""
 
-#: lxc/info.go:135
+#: lxc/info.go:138
 #, c-format
 msgid "Pid: %d"
 msgstr ""
@@ -889,7 +898,7 @@ msgstr ""
 msgid "Print verbose information"
 msgstr ""
 
-#: lxc/info.go:159
+#: lxc/info.go:162
 #, c-format
 msgid "Processes: %d"
 msgstr ""
@@ -919,7 +928,7 @@ msgstr ""
 msgid "Profile %s renamed to %s"
 msgstr ""
 
-#: lxc/copy.go:37 lxc/copy.go:38 lxc/init.go:139 lxc/init.go:140
+#: lxc/copy.go:37 lxc/copy.go:38 lxc/init.go:140 lxc/init.go:141
 msgid "Profile to apply to the new container"
 msgstr ""
 
@@ -928,7 +937,7 @@ msgstr ""
 msgid "Profiles %s applied to %s"
 msgstr ""
 
-#: lxc/info.go:133
+#: lxc/info.go:136
 #, c-format
 msgid "Profiles: %s"
 msgstr ""
@@ -963,7 +972,7 @@ msgstr ""
 msgid "Remote operation canceled by user"
 msgstr ""
 
-#: lxc/info.go:119
+#: lxc/info.go:122
 #, c-format
 msgid "Remote: %s"
 msgstr ""
@@ -982,7 +991,7 @@ msgstr ""
 msgid "Require user confirmation"
 msgstr ""
 
-#: lxc/info.go:156
+#: lxc/info.go:159
 msgid "Resources:"
 msgstr ""
 
@@ -994,7 +1003,7 @@ msgstr ""
 msgid "Retrieve the container's console log"
 msgstr ""
 
-#: lxc/init.go:289
+#: lxc/init.go:292
 #, c-format
 msgid "Retrieving image: %s"
 msgstr ""
@@ -1084,7 +1093,7 @@ msgstr ""
 msgid "Size: %.2fMB"
 msgstr ""
 
-#: lxc/info.go:237
+#: lxc/info.go:240
 msgid "Snapshots:"
 msgstr ""
 
@@ -1106,7 +1115,7 @@ msgstr ""
 msgid "Starting %s"
 msgstr ""
 
-#: lxc/info.go:127
+#: lxc/info.go:130
 #, c-format
 msgid "Status: %s"
 msgstr ""
@@ -1138,7 +1147,7 @@ msgstr ""
 msgid "Storage pool %s deleted"
 msgstr ""
 
-#: lxc/init.go:145 lxc/init.go:146
+#: lxc/init.go:146 lxc/init.go:147
 msgid "Storage pool name"
 msgstr ""
 
@@ -1156,11 +1165,11 @@ msgstr ""
 msgid "Store the container state (only for stop)"
 msgstr ""
 
-#: lxc/info.go:198
+#: lxc/info.go:201
 msgid "Swap (current)"
 msgstr ""
 
-#: lxc/info.go:202
+#: lxc/info.go:205
 msgid "Swap (peak)"
 msgstr ""
 
@@ -1178,7 +1187,7 @@ msgid ""
 "restarted."
 msgstr ""
 
-#: lxc/init.go:362
+#: lxc/init.go:365
 msgid "The container you are starting doesn't have any network attached to it."
 msgstr ""
 
@@ -1191,7 +1200,7 @@ msgstr ""
 msgid "The device doesn't exist"
 msgstr ""
 
-#: lxc/init.go:346
+#: lxc/init.go:349
 #, c-format
 msgid "The local image '%s' couldn't be found, trying '%s:' instead."
 msgstr ""
@@ -1228,11 +1237,11 @@ msgstr ""
 msgid "Timestamps:"
 msgstr ""
 
-#: lxc/init.go:364
+#: lxc/init.go:367
 msgid "To attach a network to a container, use: lxc network attach"
 msgstr ""
 
-#: lxc/init.go:363
+#: lxc/init.go:366
 msgid "To create a new network, use: lxc network create"
 msgstr ""
 
@@ -1263,11 +1272,11 @@ msgstr ""
 msgid "Try `lxc info --show-log %s` for more info"
 msgstr ""
 
-#: lxc/info.go:129
+#: lxc/info.go:132
 msgid "Type: ephemeral"
 msgstr ""
 
-#: lxc/info.go:131
+#: lxc/info.go:134
 msgid "Type: persistent"
 msgstr ""
 
@@ -1641,11 +1650,12 @@ msgid ""
 "    For LXD server information."
 msgstr ""
 
-#: lxc/init.go:77
+#: lxc/init.go:78
 msgid ""
 "Usage: lxc init [<remote>:]<image> [<remote>:][<name>] [--ephemeral|-e] [--"
 "profile|-p <profile>...] [--config|-c <key=value>...] [--network|-n "
-"<network>] [--storage|-s <pool>] [--type|-t <instance type>]\n"
+"<network>] [--storage|-s <pool>] [--type|-t <instance type>] [--target "
+"<node>]\n"
 "\n"
 "Create containers from images.\n"
 "\n"
@@ -2211,7 +2221,7 @@ msgstr ""
 msgid "default"
 msgstr ""
 
-#: lxc/init.go:311
+#: lxc/init.go:314
 msgid "didn't get any affected image, container or snapshot from server"
 msgstr ""
 
@@ -2270,15 +2280,15 @@ msgstr ""
 msgid "remote %s is static and cannot be modified"
 msgstr ""
 
-#: lxc/info.go:248
+#: lxc/info.go:251
 msgid "stateful"
 msgstr ""
 
-#: lxc/info.go:250
+#: lxc/info.go:253
 msgid "stateless"
 msgstr ""
 
-#: lxc/info.go:244
+#: lxc/info.go:247
 #, c-format
 msgid "taken at %s"
 msgstr ""
diff --git a/po/it.po b/po/it.po
index 5e87787fb..41c6d6bd4 100644
--- a/po/it.po
+++ b/po/it.po
@@ -232,7 +232,7 @@ msgstr "Password amministratore per %s: "
 msgid "Aliases:"
 msgstr "Alias:"
 
-#: lxc/image.go:600 lxc/info.go:122
+#: lxc/image.go:600 lxc/info.go:125
 #, c-format
 msgid "Architecture: %s"
 msgstr "Architettura: %s"
@@ -252,11 +252,11 @@ msgstr "Aggiornamento automatico: %s"
 msgid "Bad property: %s"
 msgstr "Proprietà errata: %s"
 
-#: lxc/info.go:215
+#: lxc/info.go:218
 msgid "Bytes received"
 msgstr "Bytes ricevuti"
 
-#: lxc/info.go:216
+#: lxc/info.go:219
 msgid "Bytes sent"
 msgstr "Byte inviati"
 
@@ -268,11 +268,11 @@ msgstr ""
 msgid "COMMON NAME"
 msgstr "NOME COMUNE"
 
-#: lxc/info.go:179
+#: lxc/info.go:182
 msgid "CPU usage (in seconds)"
 msgstr "Utilizzo CPU (in secondi)"
 
-#: lxc/info.go:183
+#: lxc/info.go:186
 msgid "CPU usage:"
 msgstr "Utilizzo CPU:"
 
@@ -334,7 +334,7 @@ msgstr "Colonne"
 msgid "Commands:"
 msgstr "Comandi:"
 
-#: lxc/copy.go:35 lxc/copy.go:36 lxc/init.go:137 lxc/init.go:138
+#: lxc/copy.go:35 lxc/copy.go:36 lxc/init.go:138 lxc/init.go:139
 msgid "Config key/value to apply to the new container"
 msgstr ""
 
@@ -356,7 +356,7 @@ msgstr ""
 msgid "Container name is mandatory"
 msgstr ""
 
-#: lxc/copy.go:229 lxc/init.go:317
+#: lxc/copy.go:229 lxc/init.go:320
 #, c-format
 msgid "Container name is: %s"
 msgstr "Il nome del container è: %s"
@@ -391,17 +391,17 @@ msgstr ""
 msgid "Create any directories necessary"
 msgstr ""
 
-#: lxc/image.go:605 lxc/info.go:124
+#: lxc/image.go:605 lxc/info.go:127
 #, c-format
 msgid "Created: %s"
 msgstr ""
 
-#: lxc/init.go:196
+#: lxc/init.go:199
 #, c-format
 msgid "Creating %s"
 msgstr "Creazione di %s in corso"
 
-#: lxc/init.go:194
+#: lxc/init.go:197
 msgid "Creating the container"
 msgstr "Creazione del container in corso"
 
@@ -449,7 +449,7 @@ msgstr ""
 msgid "Disable stdin (reads from /dev/null)"
 msgstr ""
 
-#: lxc/info.go:172
+#: lxc/info.go:175
 msgid "Disk usage:"
 msgstr "Utilizzo disco:"
 
@@ -477,7 +477,7 @@ msgstr ""
 msgid "Environment:"
 msgstr ""
 
-#: lxc/copy.go:39 lxc/copy.go:40 lxc/init.go:141 lxc/init.go:142
+#: lxc/copy.go:39 lxc/copy.go:40 lxc/init.go:142 lxc/init.go:143
 msgid "Ephemeral container"
 msgstr ""
 
@@ -636,7 +636,7 @@ msgstr ""
 msgid "Input data"
 msgstr ""
 
-#: lxc/init.go:147
+#: lxc/init.go:148
 msgid "Instance type"
 msgstr ""
 
@@ -673,7 +673,7 @@ msgstr ""
 msgid "Invalid target %s"
 msgstr ""
 
-#: lxc/info.go:153
+#: lxc/info.go:156
 msgid "Ips:"
 msgstr ""
 
@@ -698,7 +698,7 @@ msgstr ""
 msgid "Last used: never"
 msgstr ""
 
-#: lxc/info.go:268
+#: lxc/info.go:271
 msgid "Log:"
 msgstr ""
 
@@ -714,15 +714,15 @@ msgstr ""
 msgid "Make the image public"
 msgstr ""
 
-#: lxc/info.go:190
+#: lxc/info.go:193
 msgid "Memory (current)"
 msgstr ""
 
-#: lxc/info.go:194
+#: lxc/info.go:197
 msgid "Memory (peak)"
 msgstr ""
 
-#: lxc/info.go:206
+#: lxc/info.go:209
 msgid "Memory usage:"
 msgstr ""
 
@@ -779,11 +779,11 @@ msgstr ""
 msgid "Network %s renamed to %s"
 msgstr ""
 
-#: lxc/init.go:143 lxc/init.go:144
+#: lxc/init.go:144 lxc/init.go:145
 msgid "Network name"
 msgstr ""
 
-#: lxc/info.go:223
+#: lxc/info.go:226
 msgid "Network usage:"
 msgstr ""
 
@@ -812,6 +812,15 @@ msgstr ""
 msgid "Node %s removed"
 msgstr ""
 
+#: lxc/init.go:149
+msgid "Node name"
+msgstr ""
+
+#: lxc/info.go:119
+#, fuzzy, c-format
+msgid "Node: %s"
+msgstr "Aggiornamento automatico: %s"
+
 #: lxc/storage.go:355 lxc/storage.go:448
 msgid "Only \"custom\" volumes can be attached to containers."
 msgstr ""
@@ -861,11 +870,11 @@ msgstr ""
 msgid "PUBLIC"
 msgstr ""
 
-#: lxc/info.go:217
+#: lxc/info.go:220
 msgid "Packets received"
 msgstr ""
 
-#: lxc/info.go:218
+#: lxc/info.go:221
 msgid "Packets sent"
 msgstr ""
 
@@ -885,7 +894,7 @@ msgstr ""
 msgid "Permission denied, are you in the lxd group?"
 msgstr ""
 
-#: lxc/info.go:135
+#: lxc/info.go:138
 #, c-format
 msgid "Pid: %d"
 msgstr ""
@@ -915,7 +924,7 @@ msgstr ""
 msgid "Print verbose information"
 msgstr ""
 
-#: lxc/info.go:159
+#: lxc/info.go:162
 #, c-format
 msgid "Processes: %d"
 msgstr ""
@@ -945,7 +954,7 @@ msgstr ""
 msgid "Profile %s renamed to %s"
 msgstr ""
 
-#: lxc/copy.go:37 lxc/copy.go:38 lxc/init.go:139 lxc/init.go:140
+#: lxc/copy.go:37 lxc/copy.go:38 lxc/init.go:140 lxc/init.go:141
 msgid "Profile to apply to the new container"
 msgstr ""
 
@@ -954,7 +963,7 @@ msgstr ""
 msgid "Profiles %s applied to %s"
 msgstr ""
 
-#: lxc/info.go:133
+#: lxc/info.go:136
 #, c-format
 msgid "Profiles: %s"
 msgstr ""
@@ -989,7 +998,7 @@ msgstr ""
 msgid "Remote operation canceled by user"
 msgstr ""
 
-#: lxc/info.go:119
+#: lxc/info.go:122
 #, c-format
 msgid "Remote: %s"
 msgstr ""
@@ -1008,7 +1017,7 @@ msgstr ""
 msgid "Require user confirmation"
 msgstr ""
 
-#: lxc/info.go:156
+#: lxc/info.go:159
 msgid "Resources:"
 msgstr ""
 
@@ -1020,7 +1029,7 @@ msgstr ""
 msgid "Retrieve the container's console log"
 msgstr ""
 
-#: lxc/init.go:289
+#: lxc/init.go:292
 #, c-format
 msgid "Retrieving image: %s"
 msgstr ""
@@ -1110,7 +1119,7 @@ msgstr ""
 msgid "Size: %.2fMB"
 msgstr ""
 
-#: lxc/info.go:237
+#: lxc/info.go:240
 msgid "Snapshots:"
 msgstr ""
 
@@ -1132,7 +1141,7 @@ msgstr ""
 msgid "Starting %s"
 msgstr ""
 
-#: lxc/info.go:127
+#: lxc/info.go:130
 #, c-format
 msgid "Status: %s"
 msgstr ""
@@ -1164,7 +1173,7 @@ msgstr ""
 msgid "Storage pool %s deleted"
 msgstr ""
 
-#: lxc/init.go:145 lxc/init.go:146
+#: lxc/init.go:146 lxc/init.go:147
 msgid "Storage pool name"
 msgstr ""
 
@@ -1182,11 +1191,11 @@ msgstr ""
 msgid "Store the container state (only for stop)"
 msgstr ""
 
-#: lxc/info.go:198
+#: lxc/info.go:201
 msgid "Swap (current)"
 msgstr ""
 
-#: lxc/info.go:202
+#: lxc/info.go:205
 msgid "Swap (peak)"
 msgstr ""
 
@@ -1204,7 +1213,7 @@ msgid ""
 "restarted."
 msgstr ""
 
-#: lxc/init.go:362
+#: lxc/init.go:365
 msgid "The container you are starting doesn't have any network attached to it."
 msgstr ""
 
@@ -1217,7 +1226,7 @@ msgstr "La periferica esiste già"
 msgid "The device doesn't exist"
 msgstr ""
 
-#: lxc/init.go:346
+#: lxc/init.go:349
 #, c-format
 msgid "The local image '%s' couldn't be found, trying '%s:' instead."
 msgstr ""
@@ -1254,11 +1263,11 @@ msgstr ""
 msgid "Timestamps:"
 msgstr ""
 
-#: lxc/init.go:364
+#: lxc/init.go:367
 msgid "To attach a network to a container, use: lxc network attach"
 msgstr ""
 
-#: lxc/init.go:363
+#: lxc/init.go:366
 msgid "To create a new network, use: lxc network create"
 msgstr ""
 
@@ -1289,11 +1298,11 @@ msgstr ""
 msgid "Try `lxc info --show-log %s` for more info"
 msgstr ""
 
-#: lxc/info.go:129
+#: lxc/info.go:132
 msgid "Type: ephemeral"
 msgstr ""
 
-#: lxc/info.go:131
+#: lxc/info.go:134
 msgid "Type: persistent"
 msgstr ""
 
@@ -1667,11 +1676,12 @@ msgid ""
 "    For LXD server information."
 msgstr ""
 
-#: lxc/init.go:77
+#: lxc/init.go:78
 msgid ""
 "Usage: lxc init [<remote>:]<image> [<remote>:][<name>] [--ephemeral|-e] [--"
 "profile|-p <profile>...] [--config|-c <key=value>...] [--network|-n "
-"<network>] [--storage|-s <pool>] [--type|-t <instance type>]\n"
+"<network>] [--storage|-s <pool>] [--type|-t <instance type>] [--target "
+"<node>]\n"
 "\n"
 "Create containers from images.\n"
 "\n"
@@ -2237,7 +2247,7 @@ msgstr ""
 msgid "default"
 msgstr ""
 
-#: lxc/init.go:311
+#: lxc/init.go:314
 msgid "didn't get any affected image, container or snapshot from server"
 msgstr ""
 
@@ -2296,15 +2306,15 @@ msgstr "il remote %s esiste come %s"
 msgid "remote %s is static and cannot be modified"
 msgstr "il remote %s è statico e non può essere modificato"
 
-#: lxc/info.go:248
+#: lxc/info.go:251
 msgid "stateful"
 msgstr ""
 
-#: lxc/info.go:250
+#: lxc/info.go:253
 msgid "stateless"
 msgstr "senza stato"
 
-#: lxc/info.go:244
+#: lxc/info.go:247
 #, c-format
 msgid "taken at %s"
 msgstr "salvato alle %s"
diff --git a/po/ja.po b/po/ja.po
index b7ef05360..73b4b86ec 100644
--- a/po/ja.po
+++ b/po/ja.po
@@ -212,7 +212,7 @@ msgstr "%s の管理者パスワード: "
 msgid "Aliases:"
 msgstr "エイリアス:"
 
-#: lxc/image.go:600 lxc/info.go:122
+#: lxc/image.go:600 lxc/info.go:125
 #, c-format
 msgid "Architecture: %s"
 msgstr "アーキテクチャ: %s"
@@ -232,11 +232,11 @@ msgstr "自動更新: %s"
 msgid "Bad property: %s"
 msgstr "不正なイメージプロパティ形式: %s"
 
-#: lxc/info.go:215
+#: lxc/info.go:218
 msgid "Bytes received"
 msgstr "受信バイト数"
 
-#: lxc/info.go:216
+#: lxc/info.go:219
 msgid "Bytes sent"
 msgstr "送信バイト数"
 
@@ -248,11 +248,11 @@ msgstr ""
 msgid "COMMON NAME"
 msgstr ""
 
-#: lxc/info.go:179
+#: lxc/info.go:182
 msgid "CPU usage (in seconds)"
 msgstr "CPU使用量(秒)"
 
-#: lxc/info.go:183
+#: lxc/info.go:186
 msgid "CPU usage:"
 msgstr "CPU使用量:"
 
@@ -315,7 +315,7 @@ msgstr "カラムレイアウト"
 msgid "Commands:"
 msgstr "コマンド:"
 
-#: lxc/copy.go:35 lxc/copy.go:36 lxc/init.go:137 lxc/init.go:138
+#: lxc/copy.go:35 lxc/copy.go:36 lxc/init.go:138 lxc/init.go:139
 msgid "Config key/value to apply to the new container"
 msgstr "新しいコンテナに適用するキー/値の設定"
 
@@ -337,7 +337,7 @@ msgstr ""
 msgid "Container name is mandatory"
 msgstr "コンテナ名を指定する必要があります"
 
-#: lxc/copy.go:229 lxc/init.go:317
+#: lxc/copy.go:229 lxc/init.go:320
 #, c-format
 msgid "Container name is: %s"
 msgstr "コンテナ名: %s"
@@ -372,17 +372,17 @@ msgstr "サーバ証明書格納用のディレクトリを作成できません
 msgid "Create any directories necessary"
 msgstr "必要なディレクトリをすべて作成します"
 
-#: lxc/image.go:605 lxc/info.go:124
+#: lxc/image.go:605 lxc/info.go:127
 #, c-format
 msgid "Created: %s"
 msgstr "作成日時: %s"
 
-#: lxc/init.go:196
+#: lxc/init.go:199
 #, c-format
 msgid "Creating %s"
 msgstr "%s を作成中"
 
-#: lxc/init.go:194
+#: lxc/init.go:197
 msgid "Creating the container"
 msgstr "コンテナを作成中"
 
@@ -430,7 +430,7 @@ msgstr "擬似端末の割り当てを無効にします"
 msgid "Disable stdin (reads from /dev/null)"
 msgstr "標準入力を無効にします (/dev/null から読み込みます)"
 
-#: lxc/info.go:172
+#: lxc/info.go:175
 msgid "Disk usage:"
 msgstr "ディスク使用量:"
 
@@ -458,7 +458,7 @@ msgstr "環境変数を設定します (例: HOME=/home/foo)"
 msgid "Environment:"
 msgstr "環境変数:"
 
-#: lxc/copy.go:39 lxc/copy.go:40 lxc/init.go:141 lxc/init.go:142
+#: lxc/copy.go:39 lxc/copy.go:40 lxc/init.go:142 lxc/init.go:143
 msgid "Ephemeral container"
 msgstr "Ephemeral コンテナ"
 
@@ -619,7 +619,7 @@ msgstr "イメージの更新が成功しました!"
 msgid "Input data"
 msgstr "入力するデータ"
 
-#: lxc/init.go:147
+#: lxc/init.go:148
 msgid "Instance type"
 msgstr "インスタンスタイプ"
 
@@ -656,7 +656,7 @@ msgstr "不正なソース %s"
 msgid "Invalid target %s"
 msgstr "不正な送り先 %s"
 
-#: lxc/info.go:153
+#: lxc/info.go:156
 msgid "Ips:"
 msgstr "IPアドレス:"
 
@@ -681,7 +681,7 @@ msgstr "最終使用: %s"
 msgid "Last used: never"
 msgstr "最終使用: 未使用"
 
-#: lxc/info.go:268
+#: lxc/info.go:271
 msgid "Log:"
 msgstr "ログ:"
 
@@ -697,15 +697,15 @@ msgstr "イメージを public にする"
 msgid "Make the image public"
 msgstr "イメージを public にする"
 
-#: lxc/info.go:190
+#: lxc/info.go:193
 msgid "Memory (current)"
 msgstr "メモリ (現在値)"
 
-#: lxc/info.go:194
+#: lxc/info.go:197
 msgid "Memory (peak)"
 msgstr "メモリ (ピーク)"
 
-#: lxc/info.go:206
+#: lxc/info.go:209
 msgid "Memory usage:"
 msgstr "メモリ消費量:"
 
@@ -764,11 +764,11 @@ msgstr "ネットワーク %s を削除しました"
 msgid "Network %s renamed to %s"
 msgstr "ネットワーク %s を作成しました"
 
-#: lxc/init.go:143 lxc/init.go:144
+#: lxc/init.go:144 lxc/init.go:145
 msgid "Network name"
 msgstr "ネットワーク名:"
 
-#: lxc/info.go:223
+#: lxc/info.go:226
 msgid "Network usage:"
 msgstr "ネットワーク使用状況:"
 
@@ -797,6 +797,16 @@ msgstr "フィンガープリントが指定されていません。"
 msgid "Node %s removed"
 msgstr "プロファイル %s が %s から削除されました"
 
+#: lxc/init.go:149
+#, fuzzy
+msgid "Node name"
+msgstr "ネットワーク名:"
+
+#: lxc/info.go:119
+#, fuzzy, c-format
+msgid "Node: %s"
+msgstr "コンテナ名: %s"
+
 #: lxc/storage.go:355 lxc/storage.go:448
 msgid "Only \"custom\" volumes can be attached to containers."
 msgstr "\"カスタム\" のボリュームのみがコンテナにアタッチできます。"
@@ -846,11 +856,11 @@ msgstr ""
 msgid "PUBLIC"
 msgstr ""
 
-#: lxc/info.go:217
+#: lxc/info.go:220
 msgid "Packets received"
 msgstr "受信パケット"
 
-#: lxc/info.go:218
+#: lxc/info.go:221
 msgid "Packets sent"
 msgstr "送信パケット"
 
@@ -870,7 +880,7 @@ msgstr "コンテナを一時停止します。"
 msgid "Permission denied, are you in the lxd group?"
 msgstr "アクセスが拒否されました。lxd グループに所属していますか?"
 
-#: lxc/info.go:135
+#: lxc/info.go:138
 #, c-format
 msgid "Pid: %d"
 msgstr "Pid: %d"
@@ -900,7 +910,7 @@ msgstr "レスポンスをそのまま表示します"
 msgid "Print verbose information"
 msgstr "詳細情報を表示します"
 
-#: lxc/info.go:159
+#: lxc/info.go:162
 #, c-format
 msgid "Processes: %d"
 msgstr "プロセス数: %d"
@@ -930,7 +940,7 @@ msgstr "プロファイル %s が %s から削除されました"
 msgid "Profile %s renamed to %s"
 msgstr "プロファイル %s が %s に追加されました"
 
-#: lxc/copy.go:37 lxc/copy.go:38 lxc/init.go:139 lxc/init.go:140
+#: lxc/copy.go:37 lxc/copy.go:38 lxc/init.go:140 lxc/init.go:141
 msgid "Profile to apply to the new container"
 msgstr "新しいコンテナに適用するプロファイル"
 
@@ -939,7 +949,7 @@ msgstr "新しいコンテナに適用するプロファイル"
 msgid "Profiles %s applied to %s"
 msgstr "プロファイル %s が %s に追加されました"
 
-#: lxc/info.go:133
+#: lxc/info.go:136
 #, c-format
 msgid "Profiles: %s"
 msgstr "プロファイル: %s"
@@ -974,7 +984,7 @@ msgstr "リモートの管理者パスワード"
 msgid "Remote operation canceled by user"
 msgstr "リモート操作がユーザによってキャンセルされました"
 
-#: lxc/info.go:119
+#: lxc/info.go:122
 #, c-format
 msgid "Remote: %s"
 msgstr "リモート名: %s"
@@ -993,7 +1003,7 @@ msgstr ""
 msgid "Require user confirmation"
 msgstr "ユーザの確認を要求する"
 
-#: lxc/info.go:156
+#: lxc/info.go:159
 msgid "Resources:"
 msgstr "リソース:"
 
@@ -1006,7 +1016,7 @@ msgstr "コンテナを再起動します。"
 msgid "Retrieve the container's console log"
 msgstr "コンテナの状態を保存します (stopのみ)"
 
-#: lxc/init.go:289
+#: lxc/init.go:292
 #, c-format
 msgid "Retrieving image: %s"
 msgstr "イメージの取得中: %s"
@@ -1097,7 +1107,7 @@ msgstr ""
 msgid "Size: %.2fMB"
 msgstr "サイズ: %.2fMB"
 
-#: lxc/info.go:237
+#: lxc/info.go:240
 msgid "Snapshots:"
 msgstr "スナップショット:"
 
@@ -1119,7 +1129,7 @@ msgstr "コンテナを起動します。"
 msgid "Starting %s"
 msgstr "%s を起動中"
 
-#: lxc/info.go:127
+#: lxc/info.go:130
 #, c-format
 msgid "Status: %s"
 msgstr "状態: %s"
@@ -1151,7 +1161,7 @@ msgstr "ストレージプール %s を作成しました"
 msgid "Storage pool %s deleted"
 msgstr "ストレージプール %s を削除しました"
 
-#: lxc/init.go:145 lxc/init.go:146
+#: lxc/init.go:146 lxc/init.go:147
 msgid "Storage pool name"
 msgstr "ストレージプール名"
 
@@ -1169,11 +1179,11 @@ msgstr "ストレージボリューム %s を削除しました"
 msgid "Store the container state (only for stop)"
 msgstr "コンテナの状態を保存します (stopのみ)"
 
-#: lxc/info.go:198
+#: lxc/info.go:201
 msgid "Swap (current)"
 msgstr "Swap (現在値)"
 
-#: lxc/info.go:202
+#: lxc/info.go:205
 msgid "Swap (peak)"
 msgstr "Swap (ピーク)"
 
@@ -1193,7 +1203,7 @@ msgstr ""
 "コンテナは現在実行中です。停止して、再起動するために --force を使用してくだ\n"
 "さい。"
 
-#: lxc/init.go:362
+#: lxc/init.go:365
 msgid "The container you are starting doesn't have any network attached to it."
 msgstr "起動しようとしたコンテナに接続されているネットワークがありません。"
 
@@ -1206,7 +1216,7 @@ msgstr "デバイスはすでに存在します"
 msgid "The device doesn't exist"
 msgstr "デバイスが存在しません"
 
-#: lxc/init.go:346
+#: lxc/init.go:349
 #, c-format
 msgid "The local image '%s' couldn't be found, trying '%s:' instead."
 msgstr ""
@@ -1250,12 +1260,12 @@ msgstr "コンテナを強制停止するまでの時間"
 msgid "Timestamps:"
 msgstr "タイムスタンプ:"
 
-#: lxc/init.go:364
+#: lxc/init.go:367
 msgid "To attach a network to a container, use: lxc network attach"
 msgstr ""
 "コンテナにネットワークを接続するには、lxc network attach を使用してください"
 
-#: lxc/init.go:363
+#: lxc/init.go:366
 msgid "To create a new network, use: lxc network create"
 msgstr ""
 "新しいネットワークを作成するには、lxc network create を使用してください"
@@ -1289,11 +1299,11 @@ msgstr "イメージを転送中: %s"
 msgid "Try `lxc info --show-log %s` for more info"
 msgstr "更に情報を得るために `lxc info --show-log %s` を実行してみてください"
 
-#: lxc/info.go:129
+#: lxc/info.go:132
 msgid "Type: ephemeral"
 msgstr "タイプ: ephemeral"
 
-#: lxc/info.go:131
+#: lxc/info.go:134
 msgid "Type: persistent"
 msgstr "タイプ: persistent"
 
@@ -1935,11 +1945,13 @@ msgstr ""
 "lxc info [<remote>:]\n"
 "    LXD サーバの情報を表示します。"
 
-#: lxc/init.go:77
+#: lxc/init.go:78
+#, fuzzy
 msgid ""
 "Usage: lxc init [<remote>:]<image> [<remote>:][<name>] [--ephemeral|-e] [--"
 "profile|-p <profile>...] [--config|-c <key=value>...] [--network|-n "
-"<network>] [--storage|-s <pool>] [--type|-t <instance type>]\n"
+"<network>] [--storage|-s <pool>] [--type|-t <instance type>] [--target "
+"<node>]\n"
 "\n"
 "Create containers from images.\n"
 "\n"
@@ -2935,7 +2947,7 @@ msgstr "再帰 (recursive) モードでは uid/gid/mode を指定できません
 msgid "default"
 msgstr ""
 
-#: lxc/init.go:311
+#: lxc/init.go:314
 msgid "didn't get any affected image, container or snapshot from server"
 msgstr ""
 "サーバから変更されたイメージ、コンテナ、スナップショットを取得できませんで\n"
@@ -2996,15 +3008,15 @@ msgstr "リモート %s は <%s> として存在します"
 msgid "remote %s is static and cannot be modified"
 msgstr "リモート %s は static ですので変更できません"
 
-#: lxc/info.go:248
+#: lxc/info.go:251
 msgid "stateful"
 msgstr "ステートフル"
 
-#: lxc/info.go:250
+#: lxc/info.go:253
 msgid "stateless"
 msgstr "ステートレス"
 
-#: lxc/info.go:244
+#: lxc/info.go:247
 #, c-format
 msgid "taken at %s"
 msgstr "%s に取得しました"
diff --git a/po/lxd.pot b/po/lxd.pot
index bd9895e26..e8cdff180 100644
--- a/po/lxd.pot
+++ b/po/lxd.pot
@@ -201,7 +201,7 @@ msgstr  ""
 msgid   "Aliases:"
 msgstr  ""
 
-#: lxc/image.go:600 lxc/info.go:122
+#: lxc/image.go:600 lxc/info.go:125
 #, c-format
 msgid   "Architecture: %s"
 msgstr  ""
@@ -221,11 +221,11 @@ msgstr  ""
 msgid   "Bad property: %s"
 msgstr  ""
 
-#: lxc/info.go:215
+#: lxc/info.go:218
 msgid   "Bytes received"
 msgstr  ""
 
-#: lxc/info.go:216
+#: lxc/info.go:219
 msgid   "Bytes sent"
 msgstr  ""
 
@@ -237,11 +237,11 @@ msgstr  ""
 msgid   "COMMON NAME"
 msgstr  ""
 
-#: lxc/info.go:179
+#: lxc/info.go:182
 msgid   "CPU usage (in seconds)"
 msgstr  ""
 
-#: lxc/info.go:183
+#: lxc/info.go:186
 msgid   "CPU usage:"
 msgstr  ""
 
@@ -302,7 +302,7 @@ msgstr  ""
 msgid   "Commands:"
 msgstr  ""
 
-#: lxc/copy.go:35 lxc/copy.go:36 lxc/init.go:137 lxc/init.go:138
+#: lxc/copy.go:35 lxc/copy.go:36 lxc/init.go:138 lxc/init.go:139
 msgid   "Config key/value to apply to the new container"
 msgstr  ""
 
@@ -323,7 +323,7 @@ msgstr  ""
 msgid   "Container name is mandatory"
 msgstr  ""
 
-#: lxc/copy.go:229 lxc/init.go:317
+#: lxc/copy.go:229 lxc/init.go:320
 #, c-format
 msgid   "Container name is: %s"
 msgstr  ""
@@ -358,17 +358,17 @@ msgstr  ""
 msgid   "Create any directories necessary"
 msgstr  ""
 
-#: lxc/image.go:605 lxc/info.go:124
+#: lxc/image.go:605 lxc/info.go:127
 #, c-format
 msgid   "Created: %s"
 msgstr  ""
 
-#: lxc/init.go:196
+#: lxc/init.go:199
 #, c-format
 msgid   "Creating %s"
 msgstr  ""
 
-#: lxc/init.go:194
+#: lxc/init.go:197
 msgid   "Creating the container"
 msgstr  ""
 
@@ -415,7 +415,7 @@ msgstr  ""
 msgid   "Disable stdin (reads from /dev/null)"
 msgstr  ""
 
-#: lxc/info.go:172
+#: lxc/info.go:175
 msgid   "Disk usage:"
 msgstr  ""
 
@@ -443,7 +443,7 @@ msgstr  ""
 msgid   "Environment:"
 msgstr  ""
 
-#: lxc/copy.go:39 lxc/copy.go:40 lxc/init.go:141 lxc/init.go:142
+#: lxc/copy.go:39 lxc/copy.go:40 lxc/init.go:142 lxc/init.go:143
 msgid   "Ephemeral container"
 msgstr  ""
 
@@ -601,7 +601,7 @@ msgstr  ""
 msgid   "Input data"
 msgstr  ""
 
-#: lxc/init.go:147
+#: lxc/init.go:148
 msgid   "Instance type"
 msgstr  ""
 
@@ -638,7 +638,7 @@ msgstr  ""
 msgid   "Invalid target %s"
 msgstr  ""
 
-#: lxc/info.go:153
+#: lxc/info.go:156
 msgid   "Ips:"
 msgstr  ""
 
@@ -663,7 +663,7 @@ msgstr  ""
 msgid   "Last used: never"
 msgstr  ""
 
-#: lxc/info.go:268
+#: lxc/info.go:271
 msgid   "Log:"
 msgstr  ""
 
@@ -679,15 +679,15 @@ msgstr  ""
 msgid   "Make the image public"
 msgstr  ""
 
-#: lxc/info.go:190
+#: lxc/info.go:193
 msgid   "Memory (current)"
 msgstr  ""
 
-#: lxc/info.go:194
+#: lxc/info.go:197
 msgid   "Memory (peak)"
 msgstr  ""
 
-#: lxc/info.go:206
+#: lxc/info.go:209
 msgid   "Memory usage:"
 msgstr  ""
 
@@ -743,11 +743,11 @@ msgstr  ""
 msgid   "Network %s renamed to %s"
 msgstr  ""
 
-#: lxc/init.go:143 lxc/init.go:144
+#: lxc/init.go:144 lxc/init.go:145
 msgid   "Network name"
 msgstr  ""
 
-#: lxc/info.go:223
+#: lxc/info.go:226
 msgid   "Network usage:"
 msgstr  ""
 
@@ -776,6 +776,15 @@ msgstr  ""
 msgid   "Node %s removed"
 msgstr  ""
 
+#: lxc/init.go:149
+msgid   "Node name"
+msgstr  ""
+
+#: lxc/info.go:119
+#, c-format
+msgid   "Node: %s"
+msgstr  ""
+
 #: lxc/storage.go:355 lxc/storage.go:448
 msgid   "Only \"custom\" volumes can be attached to containers."
 msgstr  ""
@@ -825,11 +834,11 @@ msgstr  ""
 msgid   "PUBLIC"
 msgstr  ""
 
-#: lxc/info.go:217
+#: lxc/info.go:220
 msgid   "Packets received"
 msgstr  ""
 
-#: lxc/info.go:218
+#: lxc/info.go:221
 msgid   "Packets sent"
 msgstr  ""
 
@@ -849,7 +858,7 @@ msgstr  ""
 msgid   "Permission denied, are you in the lxd group?"
 msgstr  ""
 
-#: lxc/info.go:135
+#: lxc/info.go:138
 #, c-format
 msgid   "Pid: %d"
 msgstr  ""
@@ -878,7 +887,7 @@ msgstr  ""
 msgid   "Print verbose information"
 msgstr  ""
 
-#: lxc/info.go:159
+#: lxc/info.go:162
 #, c-format
 msgid   "Processes: %d"
 msgstr  ""
@@ -908,7 +917,7 @@ msgstr  ""
 msgid   "Profile %s renamed to %s"
 msgstr  ""
 
-#: lxc/copy.go:37 lxc/copy.go:38 lxc/init.go:139 lxc/init.go:140
+#: lxc/copy.go:37 lxc/copy.go:38 lxc/init.go:140 lxc/init.go:141
 msgid   "Profile to apply to the new container"
 msgstr  ""
 
@@ -917,7 +926,7 @@ msgstr  ""
 msgid   "Profiles %s applied to %s"
 msgstr  ""
 
-#: lxc/info.go:133
+#: lxc/info.go:136
 #, c-format
 msgid   "Profiles: %s"
 msgstr  ""
@@ -952,7 +961,7 @@ msgstr  ""
 msgid   "Remote operation canceled by user"
 msgstr  ""
 
-#: lxc/info.go:119
+#: lxc/info.go:122
 #, c-format
 msgid   "Remote: %s"
 msgstr  ""
@@ -971,7 +980,7 @@ msgstr  ""
 msgid   "Require user confirmation"
 msgstr  ""
 
-#: lxc/info.go:156
+#: lxc/info.go:159
 msgid   "Resources:"
 msgstr  ""
 
@@ -983,7 +992,7 @@ msgstr  ""
 msgid   "Retrieve the container's console log"
 msgstr  ""
 
-#: lxc/init.go:289
+#: lxc/init.go:292
 #, c-format
 msgid   "Retrieving image: %s"
 msgstr  ""
@@ -1073,7 +1082,7 @@ msgstr  ""
 msgid   "Size: %.2fMB"
 msgstr  ""
 
-#: lxc/info.go:237
+#: lxc/info.go:240
 msgid   "Snapshots:"
 msgstr  ""
 
@@ -1095,7 +1104,7 @@ msgstr  ""
 msgid   "Starting %s"
 msgstr  ""
 
-#: lxc/info.go:127
+#: lxc/info.go:130
 #, c-format
 msgid   "Status: %s"
 msgstr  ""
@@ -1127,7 +1136,7 @@ msgstr  ""
 msgid   "Storage pool %s deleted"
 msgstr  ""
 
-#: lxc/init.go:145 lxc/init.go:146
+#: lxc/init.go:146 lxc/init.go:147
 msgid   "Storage pool name"
 msgstr  ""
 
@@ -1145,11 +1154,11 @@ msgstr  ""
 msgid   "Store the container state (only for stop)"
 msgstr  ""
 
-#: lxc/info.go:198
+#: lxc/info.go:201
 msgid   "Swap (current)"
 msgstr  ""
 
-#: lxc/info.go:202
+#: lxc/info.go:205
 msgid   "Swap (peak)"
 msgstr  ""
 
@@ -1165,7 +1174,7 @@ msgstr  ""
 msgid   "The container is currently running. Use --force to have it stopped and restarted."
 msgstr  ""
 
-#: lxc/init.go:362
+#: lxc/init.go:365
 msgid   "The container you are starting doesn't have any network attached to it."
 msgstr  ""
 
@@ -1177,7 +1186,7 @@ msgstr  ""
 msgid   "The device doesn't exist"
 msgstr  ""
 
-#: lxc/init.go:346
+#: lxc/init.go:349
 #, c-format
 msgid   "The local image '%s' couldn't be found, trying '%s:' instead."
 msgstr  ""
@@ -1213,11 +1222,11 @@ msgstr  ""
 msgid   "Timestamps:"
 msgstr  ""
 
-#: lxc/init.go:364
+#: lxc/init.go:367
 msgid   "To attach a network to a container, use: lxc network attach"
 msgstr  ""
 
-#: lxc/init.go:363
+#: lxc/init.go:366
 msgid   "To create a new network, use: lxc network create"
 msgstr  ""
 
@@ -1248,11 +1257,11 @@ msgstr  ""
 msgid   "Try `lxc info --show-log %s` for more info"
 msgstr  ""
 
-#: lxc/info.go:129
+#: lxc/info.go:132
 msgid   "Type: ephemeral"
 msgstr  ""
 
-#: lxc/info.go:131
+#: lxc/info.go:134
 msgid   "Type: persistent"
 msgstr  ""
 
@@ -1590,8 +1599,8 @@ msgid   "Usage: lxc info [<remote>:][<container>] [--show-log] [--resources]\n"
         "    For LXD server information."
 msgstr  ""
 
-#: lxc/init.go:77
-msgid   "Usage: lxc init [<remote>:]<image> [<remote>:][<name>] [--ephemeral|-e] [--profile|-p <profile>...] [--config|-c <key=value>...] [--network|-n <network>] [--storage|-s <pool>] [--type|-t <instance type>]\n"
+#: lxc/init.go:78
+msgid   "Usage: lxc init [<remote>:]<image> [<remote>:][<name>] [--ephemeral|-e] [--profile|-p <profile>...] [--config|-c <key=value>...] [--network|-n <network>] [--storage|-s <pool>] [--type|-t <instance type>] [--target <node>]\n"
         "\n"
         "Create containers from images.\n"
         "\n"
@@ -2103,7 +2112,7 @@ msgstr  ""
 msgid   "default"
 msgstr  ""
 
-#: lxc/init.go:311
+#: lxc/init.go:314
 msgid   "didn't get any affected image, container or snapshot from server"
 msgstr  ""
 
@@ -2162,15 +2171,15 @@ msgstr  ""
 msgid   "remote %s is static and cannot be modified"
 msgstr  ""
 
-#: lxc/info.go:248
+#: lxc/info.go:251
 msgid   "stateful"
 msgstr  ""
 
-#: lxc/info.go:250
+#: lxc/info.go:253
 msgid   "stateless"
 msgstr  ""
 
-#: lxc/info.go:244
+#: lxc/info.go:247
 #, c-format
 msgid   "taken at %s"
 msgstr  ""
diff --git a/po/nb_NO.po b/po/nb_NO.po
index 59b2bf8cb..47eb6e555 100644
--- a/po/nb_NO.po
+++ b/po/nb_NO.po
@@ -208,7 +208,7 @@ msgstr ""
 msgid "Aliases:"
 msgstr ""
 
-#: lxc/image.go:600 lxc/info.go:122
+#: lxc/image.go:600 lxc/info.go:125
 #, c-format
 msgid "Architecture: %s"
 msgstr ""
@@ -228,11 +228,11 @@ msgstr ""
 msgid "Bad property: %s"
 msgstr ""
 
-#: lxc/info.go:215
+#: lxc/info.go:218
 msgid "Bytes received"
 msgstr ""
 
-#: lxc/info.go:216
+#: lxc/info.go:219
 msgid "Bytes sent"
 msgstr ""
 
@@ -244,11 +244,11 @@ msgstr ""
 msgid "COMMON NAME"
 msgstr ""
 
-#: lxc/info.go:179
+#: lxc/info.go:182
 msgid "CPU usage (in seconds)"
 msgstr ""
 
-#: lxc/info.go:183
+#: lxc/info.go:186
 msgid "CPU usage:"
 msgstr ""
 
@@ -309,7 +309,7 @@ msgstr ""
 msgid "Commands:"
 msgstr ""
 
-#: lxc/copy.go:35 lxc/copy.go:36 lxc/init.go:137 lxc/init.go:138
+#: lxc/copy.go:35 lxc/copy.go:36 lxc/init.go:138 lxc/init.go:139
 msgid "Config key/value to apply to the new container"
 msgstr ""
 
@@ -331,7 +331,7 @@ msgstr ""
 msgid "Container name is mandatory"
 msgstr ""
 
-#: lxc/copy.go:229 lxc/init.go:317
+#: lxc/copy.go:229 lxc/init.go:320
 #, c-format
 msgid "Container name is: %s"
 msgstr ""
@@ -366,17 +366,17 @@ msgstr ""
 msgid "Create any directories necessary"
 msgstr ""
 
-#: lxc/image.go:605 lxc/info.go:124
+#: lxc/image.go:605 lxc/info.go:127
 #, c-format
 msgid "Created: %s"
 msgstr ""
 
-#: lxc/init.go:196
+#: lxc/init.go:199
 #, c-format
 msgid "Creating %s"
 msgstr ""
 
-#: lxc/init.go:194
+#: lxc/init.go:197
 msgid "Creating the container"
 msgstr ""
 
@@ -424,7 +424,7 @@ msgstr ""
 msgid "Disable stdin (reads from /dev/null)"
 msgstr ""
 
-#: lxc/info.go:172
+#: lxc/info.go:175
 msgid "Disk usage:"
 msgstr ""
 
@@ -452,7 +452,7 @@ msgstr ""
 msgid "Environment:"
 msgstr ""
 
-#: lxc/copy.go:39 lxc/copy.go:40 lxc/init.go:141 lxc/init.go:142
+#: lxc/copy.go:39 lxc/copy.go:40 lxc/init.go:142 lxc/init.go:143
 msgid "Ephemeral container"
 msgstr ""
 
@@ -610,7 +610,7 @@ msgstr ""
 msgid "Input data"
 msgstr ""
 
-#: lxc/init.go:147
+#: lxc/init.go:148
 msgid "Instance type"
 msgstr ""
 
@@ -647,7 +647,7 @@ msgstr ""
 msgid "Invalid target %s"
 msgstr ""
 
-#: lxc/info.go:153
+#: lxc/info.go:156
 msgid "Ips:"
 msgstr ""
 
@@ -672,7 +672,7 @@ msgstr ""
 msgid "Last used: never"
 msgstr ""
 
-#: lxc/info.go:268
+#: lxc/info.go:271
 msgid "Log:"
 msgstr ""
 
@@ -688,15 +688,15 @@ msgstr ""
 msgid "Make the image public"
 msgstr ""
 
-#: lxc/info.go:190
+#: lxc/info.go:193
 msgid "Memory (current)"
 msgstr ""
 
-#: lxc/info.go:194
+#: lxc/info.go:197
 msgid "Memory (peak)"
 msgstr ""
 
-#: lxc/info.go:206
+#: lxc/info.go:209
 msgid "Memory usage:"
 msgstr ""
 
@@ -753,11 +753,11 @@ msgstr ""
 msgid "Network %s renamed to %s"
 msgstr ""
 
-#: lxc/init.go:143 lxc/init.go:144
+#: lxc/init.go:144 lxc/init.go:145
 msgid "Network name"
 msgstr ""
 
-#: lxc/info.go:223
+#: lxc/info.go:226
 msgid "Network usage:"
 msgstr ""
 
@@ -786,6 +786,15 @@ msgstr ""
 msgid "Node %s removed"
 msgstr ""
 
+#: lxc/init.go:149
+msgid "Node name"
+msgstr ""
+
+#: lxc/info.go:119
+#, c-format
+msgid "Node: %s"
+msgstr ""
+
 #: lxc/storage.go:355 lxc/storage.go:448
 msgid "Only \"custom\" volumes can be attached to containers."
 msgstr ""
@@ -835,11 +844,11 @@ msgstr ""
 msgid "PUBLIC"
 msgstr ""
 
-#: lxc/info.go:217
+#: lxc/info.go:220
 msgid "Packets received"
 msgstr ""
 
-#: lxc/info.go:218
+#: lxc/info.go:221
 msgid "Packets sent"
 msgstr ""
 
@@ -859,7 +868,7 @@ msgstr ""
 msgid "Permission denied, are you in the lxd group?"
 msgstr ""
 
-#: lxc/info.go:135
+#: lxc/info.go:138
 #, c-format
 msgid "Pid: %d"
 msgstr ""
@@ -889,7 +898,7 @@ msgstr ""
 msgid "Print verbose information"
 msgstr ""
 
-#: lxc/info.go:159
+#: lxc/info.go:162
 #, c-format
 msgid "Processes: %d"
 msgstr ""
@@ -919,7 +928,7 @@ msgstr ""
 msgid "Profile %s renamed to %s"
 msgstr ""
 
-#: lxc/copy.go:37 lxc/copy.go:38 lxc/init.go:139 lxc/init.go:140
+#: lxc/copy.go:37 lxc/copy.go:38 lxc/init.go:140 lxc/init.go:141
 msgid "Profile to apply to the new container"
 msgstr ""
 
@@ -928,7 +937,7 @@ msgstr ""
 msgid "Profiles %s applied to %s"
 msgstr ""
 
-#: lxc/info.go:133
+#: lxc/info.go:136
 #, c-format
 msgid "Profiles: %s"
 msgstr ""
@@ -963,7 +972,7 @@ msgstr ""
 msgid "Remote operation canceled by user"
 msgstr ""
 
-#: lxc/info.go:119
+#: lxc/info.go:122
 #, c-format
 msgid "Remote: %s"
 msgstr ""
@@ -982,7 +991,7 @@ msgstr ""
 msgid "Require user confirmation"
 msgstr ""
 
-#: lxc/info.go:156
+#: lxc/info.go:159
 msgid "Resources:"
 msgstr ""
 
@@ -994,7 +1003,7 @@ msgstr ""
 msgid "Retrieve the container's console log"
 msgstr ""
 
-#: lxc/init.go:289
+#: lxc/init.go:292
 #, c-format
 msgid "Retrieving image: %s"
 msgstr ""
@@ -1084,7 +1093,7 @@ msgstr ""
 msgid "Size: %.2fMB"
 msgstr ""
 
-#: lxc/info.go:237
+#: lxc/info.go:240
 msgid "Snapshots:"
 msgstr ""
 
@@ -1106,7 +1115,7 @@ msgstr ""
 msgid "Starting %s"
 msgstr ""
 
-#: lxc/info.go:127
+#: lxc/info.go:130
 #, c-format
 msgid "Status: %s"
 msgstr ""
@@ -1138,7 +1147,7 @@ msgstr ""
 msgid "Storage pool %s deleted"
 msgstr ""
 
-#: lxc/init.go:145 lxc/init.go:146
+#: lxc/init.go:146 lxc/init.go:147
 msgid "Storage pool name"
 msgstr ""
 
@@ -1156,11 +1165,11 @@ msgstr ""
 msgid "Store the container state (only for stop)"
 msgstr ""
 
-#: lxc/info.go:198
+#: lxc/info.go:201
 msgid "Swap (current)"
 msgstr ""
 
-#: lxc/info.go:202
+#: lxc/info.go:205
 msgid "Swap (peak)"
 msgstr ""
 
@@ -1178,7 +1187,7 @@ msgid ""
 "restarted."
 msgstr ""
 
-#: lxc/init.go:362
+#: lxc/init.go:365
 msgid "The container you are starting doesn't have any network attached to it."
 msgstr ""
 
@@ -1191,7 +1200,7 @@ msgstr ""
 msgid "The device doesn't exist"
 msgstr ""
 
-#: lxc/init.go:346
+#: lxc/init.go:349
 #, c-format
 msgid "The local image '%s' couldn't be found, trying '%s:' instead."
 msgstr ""
@@ -1228,11 +1237,11 @@ msgstr ""
 msgid "Timestamps:"
 msgstr ""
 
-#: lxc/init.go:364
+#: lxc/init.go:367
 msgid "To attach a network to a container, use: lxc network attach"
 msgstr ""
 
-#: lxc/init.go:363
+#: lxc/init.go:366
 msgid "To create a new network, use: lxc network create"
 msgstr ""
 
@@ -1263,11 +1272,11 @@ msgstr ""
 msgid "Try `lxc info --show-log %s` for more info"
 msgstr ""
 
-#: lxc/info.go:129
+#: lxc/info.go:132
 msgid "Type: ephemeral"
 msgstr ""
 
-#: lxc/info.go:131
+#: lxc/info.go:134
 msgid "Type: persistent"
 msgstr ""
 
@@ -1641,11 +1650,12 @@ msgid ""
 "    For LXD server information."
 msgstr ""
 
-#: lxc/init.go:77
+#: lxc/init.go:78
 msgid ""
 "Usage: lxc init [<remote>:]<image> [<remote>:][<name>] [--ephemeral|-e] [--"
 "profile|-p <profile>...] [--config|-c <key=value>...] [--network|-n "
-"<network>] [--storage|-s <pool>] [--type|-t <instance type>]\n"
+"<network>] [--storage|-s <pool>] [--type|-t <instance type>] [--target "
+"<node>]\n"
 "\n"
 "Create containers from images.\n"
 "\n"
@@ -2211,7 +2221,7 @@ msgstr ""
 msgid "default"
 msgstr ""
 
-#: lxc/init.go:311
+#: lxc/init.go:314
 msgid "didn't get any affected image, container or snapshot from server"
 msgstr ""
 
@@ -2270,15 +2280,15 @@ msgstr ""
 msgid "remote %s is static and cannot be modified"
 msgstr ""
 
-#: lxc/info.go:248
+#: lxc/info.go:251
 msgid "stateful"
 msgstr ""
 
-#: lxc/info.go:250
+#: lxc/info.go:253
 msgid "stateless"
 msgstr ""
 
-#: lxc/info.go:244
+#: lxc/info.go:247
 #, c-format
 msgid "taken at %s"
 msgstr ""
diff --git a/po/nl.po b/po/nl.po
index 190d5cd49..c45c353b4 100644
--- a/po/nl.po
+++ b/po/nl.po
@@ -208,7 +208,7 @@ msgstr ""
 msgid "Aliases:"
 msgstr ""
 
-#: lxc/image.go:600 lxc/info.go:122
+#: lxc/image.go:600 lxc/info.go:125
 #, c-format
 msgid "Architecture: %s"
 msgstr ""
@@ -228,11 +228,11 @@ msgstr ""
 msgid "Bad property: %s"
 msgstr ""
 
-#: lxc/info.go:215
+#: lxc/info.go:218
 msgid "Bytes received"
 msgstr ""
 
-#: lxc/info.go:216
+#: lxc/info.go:219
 msgid "Bytes sent"
 msgstr ""
 
@@ -244,11 +244,11 @@ msgstr ""
 msgid "COMMON NAME"
 msgstr ""
 
-#: lxc/info.go:179
+#: lxc/info.go:182
 msgid "CPU usage (in seconds)"
 msgstr ""
 
-#: lxc/info.go:183
+#: lxc/info.go:186
 msgid "CPU usage:"
 msgstr ""
 
@@ -309,7 +309,7 @@ msgstr ""
 msgid "Commands:"
 msgstr ""
 
-#: lxc/copy.go:35 lxc/copy.go:36 lxc/init.go:137 lxc/init.go:138
+#: lxc/copy.go:35 lxc/copy.go:36 lxc/init.go:138 lxc/init.go:139
 msgid "Config key/value to apply to the new container"
 msgstr ""
 
@@ -331,7 +331,7 @@ msgstr ""
 msgid "Container name is mandatory"
 msgstr ""
 
-#: lxc/copy.go:229 lxc/init.go:317
+#: lxc/copy.go:229 lxc/init.go:320
 #, c-format
 msgid "Container name is: %s"
 msgstr ""
@@ -366,17 +366,17 @@ msgstr ""
 msgid "Create any directories necessary"
 msgstr ""
 
-#: lxc/image.go:605 lxc/info.go:124
+#: lxc/image.go:605 lxc/info.go:127
 #, c-format
 msgid "Created: %s"
 msgstr ""
 
-#: lxc/init.go:196
+#: lxc/init.go:199
 #, c-format
 msgid "Creating %s"
 msgstr ""
 
-#: lxc/init.go:194
+#: lxc/init.go:197
 msgid "Creating the container"
 msgstr ""
 
@@ -424,7 +424,7 @@ msgstr ""
 msgid "Disable stdin (reads from /dev/null)"
 msgstr ""
 
-#: lxc/info.go:172
+#: lxc/info.go:175
 msgid "Disk usage:"
 msgstr ""
 
@@ -452,7 +452,7 @@ msgstr ""
 msgid "Environment:"
 msgstr ""
 
-#: lxc/copy.go:39 lxc/copy.go:40 lxc/init.go:141 lxc/init.go:142
+#: lxc/copy.go:39 lxc/copy.go:40 lxc/init.go:142 lxc/init.go:143
 msgid "Ephemeral container"
 msgstr ""
 
@@ -610,7 +610,7 @@ msgstr ""
 msgid "Input data"
 msgstr ""
 
-#: lxc/init.go:147
+#: lxc/init.go:148
 msgid "Instance type"
 msgstr ""
 
@@ -647,7 +647,7 @@ msgstr ""
 msgid "Invalid target %s"
 msgstr ""
 
-#: lxc/info.go:153
+#: lxc/info.go:156
 msgid "Ips:"
 msgstr ""
 
@@ -672,7 +672,7 @@ msgstr ""
 msgid "Last used: never"
 msgstr ""
 
-#: lxc/info.go:268
+#: lxc/info.go:271
 msgid "Log:"
 msgstr ""
 
@@ -688,15 +688,15 @@ msgstr ""
 msgid "Make the image public"
 msgstr ""
 
-#: lxc/info.go:190
+#: lxc/info.go:193
 msgid "Memory (current)"
 msgstr ""
 
-#: lxc/info.go:194
+#: lxc/info.go:197
 msgid "Memory (peak)"
 msgstr ""
 
-#: lxc/info.go:206
+#: lxc/info.go:209
 msgid "Memory usage:"
 msgstr ""
 
@@ -753,11 +753,11 @@ msgstr ""
 msgid "Network %s renamed to %s"
 msgstr ""
 
-#: lxc/init.go:143 lxc/init.go:144
+#: lxc/init.go:144 lxc/init.go:145
 msgid "Network name"
 msgstr ""
 
-#: lxc/info.go:223
+#: lxc/info.go:226
 msgid "Network usage:"
 msgstr ""
 
@@ -786,6 +786,15 @@ msgstr ""
 msgid "Node %s removed"
 msgstr ""
 
+#: lxc/init.go:149
+msgid "Node name"
+msgstr ""
+
+#: lxc/info.go:119
+#, c-format
+msgid "Node: %s"
+msgstr ""
+
 #: lxc/storage.go:355 lxc/storage.go:448
 msgid "Only \"custom\" volumes can be attached to containers."
 msgstr ""
@@ -835,11 +844,11 @@ msgstr ""
 msgid "PUBLIC"
 msgstr ""
 
-#: lxc/info.go:217
+#: lxc/info.go:220
 msgid "Packets received"
 msgstr ""
 
-#: lxc/info.go:218
+#: lxc/info.go:221
 msgid "Packets sent"
 msgstr ""
 
@@ -859,7 +868,7 @@ msgstr ""
 msgid "Permission denied, are you in the lxd group?"
 msgstr ""
 
-#: lxc/info.go:135
+#: lxc/info.go:138
 #, c-format
 msgid "Pid: %d"
 msgstr ""
@@ -889,7 +898,7 @@ msgstr ""
 msgid "Print verbose information"
 msgstr ""
 
-#: lxc/info.go:159
+#: lxc/info.go:162
 #, c-format
 msgid "Processes: %d"
 msgstr ""
@@ -919,7 +928,7 @@ msgstr ""
 msgid "Profile %s renamed to %s"
 msgstr ""
 
-#: lxc/copy.go:37 lxc/copy.go:38 lxc/init.go:139 lxc/init.go:140
+#: lxc/copy.go:37 lxc/copy.go:38 lxc/init.go:140 lxc/init.go:141
 msgid "Profile to apply to the new container"
 msgstr ""
 
@@ -928,7 +937,7 @@ msgstr ""
 msgid "Profiles %s applied to %s"
 msgstr ""
 
-#: lxc/info.go:133
+#: lxc/info.go:136
 #, c-format
 msgid "Profiles: %s"
 msgstr ""
@@ -963,7 +972,7 @@ msgstr ""
 msgid "Remote operation canceled by user"
 msgstr ""
 
-#: lxc/info.go:119
+#: lxc/info.go:122
 #, c-format
 msgid "Remote: %s"
 msgstr ""
@@ -982,7 +991,7 @@ msgstr ""
 msgid "Require user confirmation"
 msgstr ""
 
-#: lxc/info.go:156
+#: lxc/info.go:159
 msgid "Resources:"
 msgstr ""
 
@@ -994,7 +1003,7 @@ msgstr ""
 msgid "Retrieve the container's console log"
 msgstr ""
 
-#: lxc/init.go:289
+#: lxc/init.go:292
 #, c-format
 msgid "Retrieving image: %s"
 msgstr ""
@@ -1084,7 +1093,7 @@ msgstr ""
 msgid "Size: %.2fMB"
 msgstr ""
 
-#: lxc/info.go:237
+#: lxc/info.go:240
 msgid "Snapshots:"
 msgstr ""
 
@@ -1106,7 +1115,7 @@ msgstr ""
 msgid "Starting %s"
 msgstr ""
 
-#: lxc/info.go:127
+#: lxc/info.go:130
 #, c-format
 msgid "Status: %s"
 msgstr ""
@@ -1138,7 +1147,7 @@ msgstr ""
 msgid "Storage pool %s deleted"
 msgstr ""
 
-#: lxc/init.go:145 lxc/init.go:146
+#: lxc/init.go:146 lxc/init.go:147
 msgid "Storage pool name"
 msgstr ""
 
@@ -1156,11 +1165,11 @@ msgstr ""
 msgid "Store the container state (only for stop)"
 msgstr ""
 
-#: lxc/info.go:198
+#: lxc/info.go:201
 msgid "Swap (current)"
 msgstr ""
 
-#: lxc/info.go:202
+#: lxc/info.go:205
 msgid "Swap (peak)"
 msgstr ""
 
@@ -1178,7 +1187,7 @@ msgid ""
 "restarted."
 msgstr ""
 
-#: lxc/init.go:362
+#: lxc/init.go:365
 msgid "The container you are starting doesn't have any network attached to it."
 msgstr ""
 
@@ -1191,7 +1200,7 @@ msgstr ""
 msgid "The device doesn't exist"
 msgstr ""
 
-#: lxc/init.go:346
+#: lxc/init.go:349
 #, c-format
 msgid "The local image '%s' couldn't be found, trying '%s:' instead."
 msgstr ""
@@ -1228,11 +1237,11 @@ msgstr ""
 msgid "Timestamps:"
 msgstr ""
 
-#: lxc/init.go:364
+#: lxc/init.go:367
 msgid "To attach a network to a container, use: lxc network attach"
 msgstr ""
 
-#: lxc/init.go:363
+#: lxc/init.go:366
 msgid "To create a new network, use: lxc network create"
 msgstr ""
 
@@ -1263,11 +1272,11 @@ msgstr ""
 msgid "Try `lxc info --show-log %s` for more info"
 msgstr ""
 
-#: lxc/info.go:129
+#: lxc/info.go:132
 msgid "Type: ephemeral"
 msgstr ""
 
-#: lxc/info.go:131
+#: lxc/info.go:134
 msgid "Type: persistent"
 msgstr ""
 
@@ -1641,11 +1650,12 @@ msgid ""
 "    For LXD server information."
 msgstr ""
 
-#: lxc/init.go:77
+#: lxc/init.go:78
 msgid ""
 "Usage: lxc init [<remote>:]<image> [<remote>:][<name>] [--ephemeral|-e] [--"
 "profile|-p <profile>...] [--config|-c <key=value>...] [--network|-n "
-"<network>] [--storage|-s <pool>] [--type|-t <instance type>]\n"
+"<network>] [--storage|-s <pool>] [--type|-t <instance type>] [--target "
+"<node>]\n"
 "\n"
 "Create containers from images.\n"
 "\n"
@@ -2211,7 +2221,7 @@ msgstr ""
 msgid "default"
 msgstr ""
 
-#: lxc/init.go:311
+#: lxc/init.go:314
 msgid "didn't get any affected image, container or snapshot from server"
 msgstr ""
 
@@ -2270,15 +2280,15 @@ msgstr ""
 msgid "remote %s is static and cannot be modified"
 msgstr ""
 
-#: lxc/info.go:248
+#: lxc/info.go:251
 msgid "stateful"
 msgstr ""
 
-#: lxc/info.go:250
+#: lxc/info.go:253
 msgid "stateless"
 msgstr ""
 
-#: lxc/info.go:244
+#: lxc/info.go:247
 #, c-format
 msgid "taken at %s"
 msgstr ""
diff --git a/po/pt_BR.po b/po/pt_BR.po
index 30d59c628..e090ce3e4 100644
--- a/po/pt_BR.po
+++ b/po/pt_BR.po
@@ -208,7 +208,7 @@ msgstr ""
 msgid "Aliases:"
 msgstr ""
 
-#: lxc/image.go:600 lxc/info.go:122
+#: lxc/image.go:600 lxc/info.go:125
 #, c-format
 msgid "Architecture: %s"
 msgstr ""
@@ -228,11 +228,11 @@ msgstr ""
 msgid "Bad property: %s"
 msgstr ""
 
-#: lxc/info.go:215
+#: lxc/info.go:218
 msgid "Bytes received"
 msgstr ""
 
-#: lxc/info.go:216
+#: lxc/info.go:219
 msgid "Bytes sent"
 msgstr ""
 
@@ -244,11 +244,11 @@ msgstr ""
 msgid "COMMON NAME"
 msgstr ""
 
-#: lxc/info.go:179
+#: lxc/info.go:182
 msgid "CPU usage (in seconds)"
 msgstr ""
 
-#: lxc/info.go:183
+#: lxc/info.go:186
 msgid "CPU usage:"
 msgstr ""
 
@@ -309,7 +309,7 @@ msgstr ""
 msgid "Commands:"
 msgstr ""
 
-#: lxc/copy.go:35 lxc/copy.go:36 lxc/init.go:137 lxc/init.go:138
+#: lxc/copy.go:35 lxc/copy.go:36 lxc/init.go:138 lxc/init.go:139
 msgid "Config key/value to apply to the new container"
 msgstr ""
 
@@ -331,7 +331,7 @@ msgstr ""
 msgid "Container name is mandatory"
 msgstr ""
 
-#: lxc/copy.go:229 lxc/init.go:317
+#: lxc/copy.go:229 lxc/init.go:320
 #, c-format
 msgid "Container name is: %s"
 msgstr ""
@@ -366,17 +366,17 @@ msgstr ""
 msgid "Create any directories necessary"
 msgstr ""
 
-#: lxc/image.go:605 lxc/info.go:124
+#: lxc/image.go:605 lxc/info.go:127
 #, c-format
 msgid "Created: %s"
 msgstr ""
 
-#: lxc/init.go:196
+#: lxc/init.go:199
 #, c-format
 msgid "Creating %s"
 msgstr ""
 
-#: lxc/init.go:194
+#: lxc/init.go:197
 msgid "Creating the container"
 msgstr ""
 
@@ -424,7 +424,7 @@ msgstr ""
 msgid "Disable stdin (reads from /dev/null)"
 msgstr ""
 
-#: lxc/info.go:172
+#: lxc/info.go:175
 msgid "Disk usage:"
 msgstr ""
 
@@ -452,7 +452,7 @@ msgstr ""
 msgid "Environment:"
 msgstr ""
 
-#: lxc/copy.go:39 lxc/copy.go:40 lxc/init.go:141 lxc/init.go:142
+#: lxc/copy.go:39 lxc/copy.go:40 lxc/init.go:142 lxc/init.go:143
 msgid "Ephemeral container"
 msgstr ""
 
@@ -610,7 +610,7 @@ msgstr ""
 msgid "Input data"
 msgstr ""
 
-#: lxc/init.go:147
+#: lxc/init.go:148
 msgid "Instance type"
 msgstr ""
 
@@ -647,7 +647,7 @@ msgstr ""
 msgid "Invalid target %s"
 msgstr ""
 
-#: lxc/info.go:153
+#: lxc/info.go:156
 msgid "Ips:"
 msgstr ""
 
@@ -672,7 +672,7 @@ msgstr ""
 msgid "Last used: never"
 msgstr ""
 
-#: lxc/info.go:268
+#: lxc/info.go:271
 msgid "Log:"
 msgstr ""
 
@@ -688,15 +688,15 @@ msgstr ""
 msgid "Make the image public"
 msgstr ""
 
-#: lxc/info.go:190
+#: lxc/info.go:193
 msgid "Memory (current)"
 msgstr ""
 
-#: lxc/info.go:194
+#: lxc/info.go:197
 msgid "Memory (peak)"
 msgstr ""
 
-#: lxc/info.go:206
+#: lxc/info.go:209
 msgid "Memory usage:"
 msgstr ""
 
@@ -753,11 +753,11 @@ msgstr ""
 msgid "Network %s renamed to %s"
 msgstr ""
 
-#: lxc/init.go:143 lxc/init.go:144
+#: lxc/init.go:144 lxc/init.go:145
 msgid "Network name"
 msgstr ""
 
-#: lxc/info.go:223
+#: lxc/info.go:226
 msgid "Network usage:"
 msgstr ""
 
@@ -786,6 +786,15 @@ msgstr ""
 msgid "Node %s removed"
 msgstr ""
 
+#: lxc/init.go:149
+msgid "Node name"
+msgstr ""
+
+#: lxc/info.go:119
+#, c-format
+msgid "Node: %s"
+msgstr ""
+
 #: lxc/storage.go:355 lxc/storage.go:448
 msgid "Only \"custom\" volumes can be attached to containers."
 msgstr ""
@@ -835,11 +844,11 @@ msgstr ""
 msgid "PUBLIC"
 msgstr ""
 
-#: lxc/info.go:217
+#: lxc/info.go:220
 msgid "Packets received"
 msgstr ""
 
-#: lxc/info.go:218
+#: lxc/info.go:221
 msgid "Packets sent"
 msgstr ""
 
@@ -859,7 +868,7 @@ msgstr ""
 msgid "Permission denied, are you in the lxd group?"
 msgstr ""
 
-#: lxc/info.go:135
+#: lxc/info.go:138
 #, c-format
 msgid "Pid: %d"
 msgstr ""
@@ -889,7 +898,7 @@ msgstr ""
 msgid "Print verbose information"
 msgstr ""
 
-#: lxc/info.go:159
+#: lxc/info.go:162
 #, c-format
 msgid "Processes: %d"
 msgstr ""
@@ -919,7 +928,7 @@ msgstr ""
 msgid "Profile %s renamed to %s"
 msgstr ""
 
-#: lxc/copy.go:37 lxc/copy.go:38 lxc/init.go:139 lxc/init.go:140
+#: lxc/copy.go:37 lxc/copy.go:38 lxc/init.go:140 lxc/init.go:141
 msgid "Profile to apply to the new container"
 msgstr ""
 
@@ -928,7 +937,7 @@ msgstr ""
 msgid "Profiles %s applied to %s"
 msgstr ""
 
-#: lxc/info.go:133
+#: lxc/info.go:136
 #, c-format
 msgid "Profiles: %s"
 msgstr ""
@@ -963,7 +972,7 @@ msgstr ""
 msgid "Remote operation canceled by user"
 msgstr ""
 
-#: lxc/info.go:119
+#: lxc/info.go:122
 #, c-format
 msgid "Remote: %s"
 msgstr ""
@@ -982,7 +991,7 @@ msgstr ""
 msgid "Require user confirmation"
 msgstr ""
 
-#: lxc/info.go:156
+#: lxc/info.go:159
 msgid "Resources:"
 msgstr ""
 
@@ -994,7 +1003,7 @@ msgstr ""
 msgid "Retrieve the container's console log"
 msgstr ""
 
-#: lxc/init.go:289
+#: lxc/init.go:292
 #, c-format
 msgid "Retrieving image: %s"
 msgstr ""
@@ -1084,7 +1093,7 @@ msgstr ""
 msgid "Size: %.2fMB"
 msgstr ""
 
-#: lxc/info.go:237
+#: lxc/info.go:240
 msgid "Snapshots:"
 msgstr ""
 
@@ -1106,7 +1115,7 @@ msgstr ""
 msgid "Starting %s"
 msgstr ""
 
-#: lxc/info.go:127
+#: lxc/info.go:130
 #, c-format
 msgid "Status: %s"
 msgstr ""
@@ -1138,7 +1147,7 @@ msgstr ""
 msgid "Storage pool %s deleted"
 msgstr ""
 
-#: lxc/init.go:145 lxc/init.go:146
+#: lxc/init.go:146 lxc/init.go:147
 msgid "Storage pool name"
 msgstr ""
 
@@ -1156,11 +1165,11 @@ msgstr ""
 msgid "Store the container state (only for stop)"
 msgstr ""
 
-#: lxc/info.go:198
+#: lxc/info.go:201
 msgid "Swap (current)"
 msgstr ""
 
-#: lxc/info.go:202
+#: lxc/info.go:205
 msgid "Swap (peak)"
 msgstr ""
 
@@ -1178,7 +1187,7 @@ msgid ""
 "restarted."
 msgstr ""
 
-#: lxc/init.go:362
+#: lxc/init.go:365
 msgid "The container you are starting doesn't have any network attached to it."
 msgstr ""
 
@@ -1191,7 +1200,7 @@ msgstr ""
 msgid "The device doesn't exist"
 msgstr ""
 
-#: lxc/init.go:346
+#: lxc/init.go:349
 #, c-format
 msgid "The local image '%s' couldn't be found, trying '%s:' instead."
 msgstr ""
@@ -1228,11 +1237,11 @@ msgstr ""
 msgid "Timestamps:"
 msgstr ""
 
-#: lxc/init.go:364
+#: lxc/init.go:367
 msgid "To attach a network to a container, use: lxc network attach"
 msgstr ""
 
-#: lxc/init.go:363
+#: lxc/init.go:366
 msgid "To create a new network, use: lxc network create"
 msgstr ""
 
@@ -1263,11 +1272,11 @@ msgstr ""
 msgid "Try `lxc info --show-log %s` for more info"
 msgstr ""
 
-#: lxc/info.go:129
+#: lxc/info.go:132
 msgid "Type: ephemeral"
 msgstr ""
 
-#: lxc/info.go:131
+#: lxc/info.go:134
 msgid "Type: persistent"
 msgstr ""
 
@@ -1641,11 +1650,12 @@ msgid ""
 "    For LXD server information."
 msgstr ""
 
-#: lxc/init.go:77
+#: lxc/init.go:78
 msgid ""
 "Usage: lxc init [<remote>:]<image> [<remote>:][<name>] [--ephemeral|-e] [--"
 "profile|-p <profile>...] [--config|-c <key=value>...] [--network|-n "
-"<network>] [--storage|-s <pool>] [--type|-t <instance type>]\n"
+"<network>] [--storage|-s <pool>] [--type|-t <instance type>] [--target "
+"<node>]\n"
 "\n"
 "Create containers from images.\n"
 "\n"
@@ -2211,7 +2221,7 @@ msgstr ""
 msgid "default"
 msgstr ""
 
-#: lxc/init.go:311
+#: lxc/init.go:314
 msgid "didn't get any affected image, container or snapshot from server"
 msgstr ""
 
@@ -2270,15 +2280,15 @@ msgstr ""
 msgid "remote %s is static and cannot be modified"
 msgstr ""
 
-#: lxc/info.go:248
+#: lxc/info.go:251
 msgid "stateful"
 msgstr ""
 
-#: lxc/info.go:250
+#: lxc/info.go:253
 msgid "stateless"
 msgstr ""
 
-#: lxc/info.go:244
+#: lxc/info.go:247
 #, c-format
 msgid "taken at %s"
 msgstr ""
diff --git a/po/ru.po b/po/ru.po
index da1fea3b2..f8f6f5b90 100644
--- a/po/ru.po
+++ b/po/ru.po
@@ -294,7 +294,7 @@ msgstr "Пароль администратора для %s: "
 msgid "Aliases:"
 msgstr "Псевдонимы:"
 
-#: lxc/image.go:600 lxc/info.go:122
+#: lxc/image.go:600 lxc/info.go:125
 #, c-format
 msgid "Architecture: %s"
 msgstr "Архитектура: %s"
@@ -314,11 +314,11 @@ msgstr "Авто-обновление: %s"
 msgid "Bad property: %s"
 msgstr ""
 
-#: lxc/info.go:215
+#: lxc/info.go:218
 msgid "Bytes received"
 msgstr "Получено байтов"
 
-#: lxc/info.go:216
+#: lxc/info.go:219
 msgid "Bytes sent"
 msgstr "Отправлено байтов"
 
@@ -330,11 +330,11 @@ msgstr ""
 msgid "COMMON NAME"
 msgstr "ОБЩЕЕ ИМЯ"
 
-#: lxc/info.go:179
+#: lxc/info.go:182
 msgid "CPU usage (in seconds)"
 msgstr "Использование ЦП (в секундах)"
 
-#: lxc/info.go:183
+#: lxc/info.go:186
 #, fuzzy
 msgid "CPU usage:"
 msgstr " Использование ЦП:"
@@ -397,7 +397,7 @@ msgstr "Столбцы"
 msgid "Commands:"
 msgstr ""
 
-#: lxc/copy.go:35 lxc/copy.go:36 lxc/init.go:137 lxc/init.go:138
+#: lxc/copy.go:35 lxc/copy.go:36 lxc/init.go:138 lxc/init.go:139
 msgid "Config key/value to apply to the new container"
 msgstr ""
 
@@ -419,7 +419,7 @@ msgstr ""
 msgid "Container name is mandatory"
 msgstr "Имя контейнера является обязательным"
 
-#: lxc/copy.go:229 lxc/init.go:317
+#: lxc/copy.go:229 lxc/init.go:320
 #, c-format
 msgid "Container name is: %s"
 msgstr "Имя контейнера: %s"
@@ -454,17 +454,17 @@ msgstr "Не удалось создать каталог сертификата
 msgid "Create any directories necessary"
 msgstr ""
 
-#: lxc/image.go:605 lxc/info.go:124
+#: lxc/image.go:605 lxc/info.go:127
 #, c-format
 msgid "Created: %s"
 msgstr ""
 
-#: lxc/init.go:196
+#: lxc/init.go:199
 #, c-format
 msgid "Creating %s"
 msgstr ""
 
-#: lxc/init.go:194
+#: lxc/init.go:197
 msgid "Creating the container"
 msgstr ""
 
@@ -512,7 +512,7 @@ msgstr ""
 msgid "Disable stdin (reads from /dev/null)"
 msgstr ""
 
-#: lxc/info.go:172
+#: lxc/info.go:175
 #, fuzzy
 msgid "Disk usage:"
 msgstr " Использование диска:"
@@ -541,7 +541,7 @@ msgstr ""
 msgid "Environment:"
 msgstr ""
 
-#: lxc/copy.go:39 lxc/copy.go:40 lxc/init.go:141 lxc/init.go:142
+#: lxc/copy.go:39 lxc/copy.go:40 lxc/init.go:142 lxc/init.go:143
 msgid "Ephemeral container"
 msgstr ""
 
@@ -699,7 +699,7 @@ msgstr ""
 msgid "Input data"
 msgstr ""
 
-#: lxc/init.go:147
+#: lxc/init.go:148
 msgid "Instance type"
 msgstr ""
 
@@ -736,7 +736,7 @@ msgstr ""
 msgid "Invalid target %s"
 msgstr ""
 
-#: lxc/info.go:153
+#: lxc/info.go:156
 msgid "Ips:"
 msgstr ""
 
@@ -761,7 +761,7 @@ msgstr ""
 msgid "Last used: never"
 msgstr ""
 
-#: lxc/info.go:268
+#: lxc/info.go:271
 msgid "Log:"
 msgstr ""
 
@@ -777,15 +777,15 @@ msgstr ""
 msgid "Make the image public"
 msgstr ""
 
-#: lxc/info.go:190
+#: lxc/info.go:193
 msgid "Memory (current)"
 msgstr ""
 
-#: lxc/info.go:194
+#: lxc/info.go:197
 msgid "Memory (peak)"
 msgstr ""
 
-#: lxc/info.go:206
+#: lxc/info.go:209
 #, fuzzy
 msgid "Memory usage:"
 msgstr " Использование памяти:"
@@ -843,11 +843,11 @@ msgstr ""
 msgid "Network %s renamed to %s"
 msgstr ""
 
-#: lxc/init.go:143 lxc/init.go:144
+#: lxc/init.go:144 lxc/init.go:145
 msgid "Network name"
 msgstr ""
 
-#: lxc/info.go:223
+#: lxc/info.go:226
 #, fuzzy
 msgid "Network usage:"
 msgstr " Использование сети:"
@@ -877,6 +877,15 @@ msgstr ""
 msgid "Node %s removed"
 msgstr ""
 
+#: lxc/init.go:149
+msgid "Node name"
+msgstr ""
+
+#: lxc/info.go:119
+#, fuzzy, c-format
+msgid "Node: %s"
+msgstr "Авто-обновление: %s"
+
 #: lxc/storage.go:355 lxc/storage.go:448
 msgid "Only \"custom\" volumes can be attached to containers."
 msgstr ""
@@ -926,11 +935,11 @@ msgstr ""
 msgid "PUBLIC"
 msgstr ""
 
-#: lxc/info.go:217
+#: lxc/info.go:220
 msgid "Packets received"
 msgstr ""
 
-#: lxc/info.go:218
+#: lxc/info.go:221
 msgid "Packets sent"
 msgstr ""
 
@@ -950,7 +959,7 @@ msgstr ""
 msgid "Permission denied, are you in the lxd group?"
 msgstr ""
 
-#: lxc/info.go:135
+#: lxc/info.go:138
 #, c-format
 msgid "Pid: %d"
 msgstr ""
@@ -980,7 +989,7 @@ msgstr ""
 msgid "Print verbose information"
 msgstr ""
 
-#: lxc/info.go:159
+#: lxc/info.go:162
 #, c-format
 msgid "Processes: %d"
 msgstr ""
@@ -1010,7 +1019,7 @@ msgstr ""
 msgid "Profile %s renamed to %s"
 msgstr ""
 
-#: lxc/copy.go:37 lxc/copy.go:38 lxc/init.go:139 lxc/init.go:140
+#: lxc/copy.go:37 lxc/copy.go:38 lxc/init.go:140 lxc/init.go:141
 msgid "Profile to apply to the new container"
 msgstr ""
 
@@ -1019,7 +1028,7 @@ msgstr ""
 msgid "Profiles %s applied to %s"
 msgstr ""
 
-#: lxc/info.go:133
+#: lxc/info.go:136
 #, c-format
 msgid "Profiles: %s"
 msgstr ""
@@ -1054,7 +1063,7 @@ msgstr ""
 msgid "Remote operation canceled by user"
 msgstr ""
 
-#: lxc/info.go:119
+#: lxc/info.go:122
 #, c-format
 msgid "Remote: %s"
 msgstr ""
@@ -1073,7 +1082,7 @@ msgstr ""
 msgid "Require user confirmation"
 msgstr ""
 
-#: lxc/info.go:156
+#: lxc/info.go:159
 msgid "Resources:"
 msgstr ""
 
@@ -1085,7 +1094,7 @@ msgstr ""
 msgid "Retrieve the container's console log"
 msgstr ""
 
-#: lxc/init.go:289
+#: lxc/init.go:292
 #, c-format
 msgid "Retrieving image: %s"
 msgstr ""
@@ -1175,7 +1184,7 @@ msgstr ""
 msgid "Size: %.2fMB"
 msgstr ""
 
-#: lxc/info.go:237
+#: lxc/info.go:240
 msgid "Snapshots:"
 msgstr ""
 
@@ -1197,7 +1206,7 @@ msgstr ""
 msgid "Starting %s"
 msgstr ""
 
-#: lxc/info.go:127
+#: lxc/info.go:130
 #, c-format
 msgid "Status: %s"
 msgstr ""
@@ -1229,7 +1238,7 @@ msgstr ""
 msgid "Storage pool %s deleted"
 msgstr ""
 
-#: lxc/init.go:145 lxc/init.go:146
+#: lxc/init.go:146 lxc/init.go:147
 msgid "Storage pool name"
 msgstr ""
 
@@ -1247,11 +1256,11 @@ msgstr ""
 msgid "Store the container state (only for stop)"
 msgstr ""
 
-#: lxc/info.go:198
+#: lxc/info.go:201
 msgid "Swap (current)"
 msgstr ""
 
-#: lxc/info.go:202
+#: lxc/info.go:205
 msgid "Swap (peak)"
 msgstr ""
 
@@ -1269,7 +1278,7 @@ msgid ""
 "restarted."
 msgstr ""
 
-#: lxc/init.go:362
+#: lxc/init.go:365
 msgid "The container you are starting doesn't have any network attached to it."
 msgstr ""
 
@@ -1282,7 +1291,7 @@ msgstr ""
 msgid "The device doesn't exist"
 msgstr ""
 
-#: lxc/init.go:346
+#: lxc/init.go:349
 #, c-format
 msgid "The local image '%s' couldn't be found, trying '%s:' instead."
 msgstr ""
@@ -1319,11 +1328,11 @@ msgstr ""
 msgid "Timestamps:"
 msgstr ""
 
-#: lxc/init.go:364
+#: lxc/init.go:367
 msgid "To attach a network to a container, use: lxc network attach"
 msgstr ""
 
-#: lxc/init.go:363
+#: lxc/init.go:366
 msgid "To create a new network, use: lxc network create"
 msgstr ""
 
@@ -1354,11 +1363,11 @@ msgstr ""
 msgid "Try `lxc info --show-log %s` for more info"
 msgstr ""
 
-#: lxc/info.go:129
+#: lxc/info.go:132
 msgid "Type: ephemeral"
 msgstr ""
 
-#: lxc/info.go:131
+#: lxc/info.go:134
 msgid "Type: persistent"
 msgstr ""
 
@@ -1740,11 +1749,12 @@ msgid ""
 "    For LXD server information."
 msgstr ""
 
-#: lxc/init.go:77
+#: lxc/init.go:78
 msgid ""
 "Usage: lxc init [<remote>:]<image> [<remote>:][<name>] [--ephemeral|-e] [--"
 "profile|-p <profile>...] [--config|-c <key=value>...] [--network|-n "
-"<network>] [--storage|-s <pool>] [--type|-t <instance type>]\n"
+"<network>] [--storage|-s <pool>] [--type|-t <instance type>] [--target "
+"<node>]\n"
 "\n"
 "Create containers from images.\n"
 "\n"
@@ -2314,7 +2324,7 @@ msgstr ""
 msgid "default"
 msgstr ""
 
-#: lxc/init.go:311
+#: lxc/init.go:314
 msgid "didn't get any affected image, container or snapshot from server"
 msgstr ""
 
@@ -2373,15 +2383,15 @@ msgstr ""
 msgid "remote %s is static and cannot be modified"
 msgstr ""
 
-#: lxc/info.go:248
+#: lxc/info.go:251
 msgid "stateful"
 msgstr ""
 
-#: lxc/info.go:250
+#: lxc/info.go:253
 msgid "stateless"
 msgstr ""
 
-#: lxc/info.go:244
+#: lxc/info.go:247
 #, c-format
 msgid "taken at %s"
 msgstr ""
diff --git a/po/sr.po b/po/sr.po
index f57b3c688..d0d496232 100644
--- a/po/sr.po
+++ b/po/sr.po
@@ -208,7 +208,7 @@ msgstr ""
 msgid "Aliases:"
 msgstr ""
 
-#: lxc/image.go:600 lxc/info.go:122
+#: lxc/image.go:600 lxc/info.go:125
 #, c-format
 msgid "Architecture: %s"
 msgstr ""
@@ -228,11 +228,11 @@ msgstr ""
 msgid "Bad property: %s"
 msgstr ""
 
-#: lxc/info.go:215
+#: lxc/info.go:218
 msgid "Bytes received"
 msgstr ""
 
-#: lxc/info.go:216
+#: lxc/info.go:219
 msgid "Bytes sent"
 msgstr ""
 
@@ -244,11 +244,11 @@ msgstr ""
 msgid "COMMON NAME"
 msgstr ""
 
-#: lxc/info.go:179
+#: lxc/info.go:182
 msgid "CPU usage (in seconds)"
 msgstr ""
 
-#: lxc/info.go:183
+#: lxc/info.go:186
 msgid "CPU usage:"
 msgstr ""
 
@@ -309,7 +309,7 @@ msgstr ""
 msgid "Commands:"
 msgstr ""
 
-#: lxc/copy.go:35 lxc/copy.go:36 lxc/init.go:137 lxc/init.go:138
+#: lxc/copy.go:35 lxc/copy.go:36 lxc/init.go:138 lxc/init.go:139
 msgid "Config key/value to apply to the new container"
 msgstr ""
 
@@ -331,7 +331,7 @@ msgstr ""
 msgid "Container name is mandatory"
 msgstr ""
 
-#: lxc/copy.go:229 lxc/init.go:317
+#: lxc/copy.go:229 lxc/init.go:320
 #, c-format
 msgid "Container name is: %s"
 msgstr ""
@@ -366,17 +366,17 @@ msgstr ""
 msgid "Create any directories necessary"
 msgstr ""
 
-#: lxc/image.go:605 lxc/info.go:124
+#: lxc/image.go:605 lxc/info.go:127
 #, c-format
 msgid "Created: %s"
 msgstr ""
 
-#: lxc/init.go:196
+#: lxc/init.go:199
 #, c-format
 msgid "Creating %s"
 msgstr ""
 
-#: lxc/init.go:194
+#: lxc/init.go:197
 msgid "Creating the container"
 msgstr ""
 
@@ -424,7 +424,7 @@ msgstr ""
 msgid "Disable stdin (reads from /dev/null)"
 msgstr ""
 
-#: lxc/info.go:172
+#: lxc/info.go:175
 msgid "Disk usage:"
 msgstr ""
 
@@ -452,7 +452,7 @@ msgstr ""
 msgid "Environment:"
 msgstr ""
 
-#: lxc/copy.go:39 lxc/copy.go:40 lxc/init.go:141 lxc/init.go:142
+#: lxc/copy.go:39 lxc/copy.go:40 lxc/init.go:142 lxc/init.go:143
 msgid "Ephemeral container"
 msgstr ""
 
@@ -610,7 +610,7 @@ msgstr ""
 msgid "Input data"
 msgstr ""
 
-#: lxc/init.go:147
+#: lxc/init.go:148
 msgid "Instance type"
 msgstr ""
 
@@ -647,7 +647,7 @@ msgstr ""
 msgid "Invalid target %s"
 msgstr ""
 
-#: lxc/info.go:153
+#: lxc/info.go:156
 msgid "Ips:"
 msgstr ""
 
@@ -672,7 +672,7 @@ msgstr ""
 msgid "Last used: never"
 msgstr ""
 
-#: lxc/info.go:268
+#: lxc/info.go:271
 msgid "Log:"
 msgstr ""
 
@@ -688,15 +688,15 @@ msgstr ""
 msgid "Make the image public"
 msgstr ""
 
-#: lxc/info.go:190
+#: lxc/info.go:193
 msgid "Memory (current)"
 msgstr ""
 
-#: lxc/info.go:194
+#: lxc/info.go:197
 msgid "Memory (peak)"
 msgstr ""
 
-#: lxc/info.go:206
+#: lxc/info.go:209
 msgid "Memory usage:"
 msgstr ""
 
@@ -753,11 +753,11 @@ msgstr ""
 msgid "Network %s renamed to %s"
 msgstr ""
 
-#: lxc/init.go:143 lxc/init.go:144
+#: lxc/init.go:144 lxc/init.go:145
 msgid "Network name"
 msgstr ""
 
-#: lxc/info.go:223
+#: lxc/info.go:226
 msgid "Network usage:"
 msgstr ""
 
@@ -786,6 +786,15 @@ msgstr ""
 msgid "Node %s removed"
 msgstr ""
 
+#: lxc/init.go:149
+msgid "Node name"
+msgstr ""
+
+#: lxc/info.go:119
+#, c-format
+msgid "Node: %s"
+msgstr ""
+
 #: lxc/storage.go:355 lxc/storage.go:448
 msgid "Only \"custom\" volumes can be attached to containers."
 msgstr ""
@@ -835,11 +844,11 @@ msgstr ""
 msgid "PUBLIC"
 msgstr ""
 
-#: lxc/info.go:217
+#: lxc/info.go:220
 msgid "Packets received"
 msgstr ""
 
-#: lxc/info.go:218
+#: lxc/info.go:221
 msgid "Packets sent"
 msgstr ""
 
@@ -859,7 +868,7 @@ msgstr ""
 msgid "Permission denied, are you in the lxd group?"
 msgstr ""
 
-#: lxc/info.go:135
+#: lxc/info.go:138
 #, c-format
 msgid "Pid: %d"
 msgstr ""
@@ -889,7 +898,7 @@ msgstr ""
 msgid "Print verbose information"
 msgstr ""
 
-#: lxc/info.go:159
+#: lxc/info.go:162
 #, c-format
 msgid "Processes: %d"
 msgstr ""
@@ -919,7 +928,7 @@ msgstr ""
 msgid "Profile %s renamed to %s"
 msgstr ""
 
-#: lxc/copy.go:37 lxc/copy.go:38 lxc/init.go:139 lxc/init.go:140
+#: lxc/copy.go:37 lxc/copy.go:38 lxc/init.go:140 lxc/init.go:141
 msgid "Profile to apply to the new container"
 msgstr ""
 
@@ -928,7 +937,7 @@ msgstr ""
 msgid "Profiles %s applied to %s"
 msgstr ""
 
-#: lxc/info.go:133
+#: lxc/info.go:136
 #, c-format
 msgid "Profiles: %s"
 msgstr ""
@@ -963,7 +972,7 @@ msgstr ""
 msgid "Remote operation canceled by user"
 msgstr ""
 
-#: lxc/info.go:119
+#: lxc/info.go:122
 #, c-format
 msgid "Remote: %s"
 msgstr ""
@@ -982,7 +991,7 @@ msgstr ""
 msgid "Require user confirmation"
 msgstr ""
 
-#: lxc/info.go:156
+#: lxc/info.go:159
 msgid "Resources:"
 msgstr ""
 
@@ -994,7 +1003,7 @@ msgstr ""
 msgid "Retrieve the container's console log"
 msgstr ""
 
-#: lxc/init.go:289
+#: lxc/init.go:292
 #, c-format
 msgid "Retrieving image: %s"
 msgstr ""
@@ -1084,7 +1093,7 @@ msgstr ""
 msgid "Size: %.2fMB"
 msgstr ""
 
-#: lxc/info.go:237
+#: lxc/info.go:240
 msgid "Snapshots:"
 msgstr ""
 
@@ -1106,7 +1115,7 @@ msgstr ""
 msgid "Starting %s"
 msgstr ""
 
-#: lxc/info.go:127
+#: lxc/info.go:130
 #, c-format
 msgid "Status: %s"
 msgstr ""
@@ -1138,7 +1147,7 @@ msgstr ""
 msgid "Storage pool %s deleted"
 msgstr ""
 
-#: lxc/init.go:145 lxc/init.go:146
+#: lxc/init.go:146 lxc/init.go:147
 msgid "Storage pool name"
 msgstr ""
 
@@ -1156,11 +1165,11 @@ msgstr ""
 msgid "Store the container state (only for stop)"
 msgstr ""
 
-#: lxc/info.go:198
+#: lxc/info.go:201
 msgid "Swap (current)"
 msgstr ""
 
-#: lxc/info.go:202
+#: lxc/info.go:205
 msgid "Swap (peak)"
 msgstr ""
 
@@ -1178,7 +1187,7 @@ msgid ""
 "restarted."
 msgstr ""
 
-#: lxc/init.go:362
+#: lxc/init.go:365
 msgid "The container you are starting doesn't have any network attached to it."
 msgstr ""
 
@@ -1191,7 +1200,7 @@ msgstr ""
 msgid "The device doesn't exist"
 msgstr ""
 
-#: lxc/init.go:346
+#: lxc/init.go:349
 #, c-format
 msgid "The local image '%s' couldn't be found, trying '%s:' instead."
 msgstr ""
@@ -1228,11 +1237,11 @@ msgstr ""
 msgid "Timestamps:"
 msgstr ""
 
-#: lxc/init.go:364
+#: lxc/init.go:367
 msgid "To attach a network to a container, use: lxc network attach"
 msgstr ""
 
-#: lxc/init.go:363
+#: lxc/init.go:366
 msgid "To create a new network, use: lxc network create"
 msgstr ""
 
@@ -1263,11 +1272,11 @@ msgstr ""
 msgid "Try `lxc info --show-log %s` for more info"
 msgstr ""
 
-#: lxc/info.go:129
+#: lxc/info.go:132
 msgid "Type: ephemeral"
 msgstr ""
 
-#: lxc/info.go:131
+#: lxc/info.go:134
 msgid "Type: persistent"
 msgstr ""
 
@@ -1641,11 +1650,12 @@ msgid ""
 "    For LXD server information."
 msgstr ""
 
-#: lxc/init.go:77
+#: lxc/init.go:78
 msgid ""
 "Usage: lxc init [<remote>:]<image> [<remote>:][<name>] [--ephemeral|-e] [--"
 "profile|-p <profile>...] [--config|-c <key=value>...] [--network|-n "
-"<network>] [--storage|-s <pool>] [--type|-t <instance type>]\n"
+"<network>] [--storage|-s <pool>] [--type|-t <instance type>] [--target "
+"<node>]\n"
 "\n"
 "Create containers from images.\n"
 "\n"
@@ -2211,7 +2221,7 @@ msgstr ""
 msgid "default"
 msgstr ""
 
-#: lxc/init.go:311
+#: lxc/init.go:314
 msgid "didn't get any affected image, container or snapshot from server"
 msgstr ""
 
@@ -2270,15 +2280,15 @@ msgstr ""
 msgid "remote %s is static and cannot be modified"
 msgstr ""
 
-#: lxc/info.go:248
+#: lxc/info.go:251
 msgid "stateful"
 msgstr ""
 
-#: lxc/info.go:250
+#: lxc/info.go:253
 msgid "stateless"
 msgstr ""
 
-#: lxc/info.go:244
+#: lxc/info.go:247
 #, c-format
 msgid "taken at %s"
 msgstr ""
diff --git a/po/sv.po b/po/sv.po
index 80a2f488a..975cba0c7 100644
--- a/po/sv.po
+++ b/po/sv.po
@@ -208,7 +208,7 @@ msgstr ""
 msgid "Aliases:"
 msgstr ""
 
-#: lxc/image.go:600 lxc/info.go:122
+#: lxc/image.go:600 lxc/info.go:125
 #, c-format
 msgid "Architecture: %s"
 msgstr ""
@@ -228,11 +228,11 @@ msgstr ""
 msgid "Bad property: %s"
 msgstr ""
 
-#: lxc/info.go:215
+#: lxc/info.go:218
 msgid "Bytes received"
 msgstr ""
 
-#: lxc/info.go:216
+#: lxc/info.go:219
 msgid "Bytes sent"
 msgstr ""
 
@@ -244,11 +244,11 @@ msgstr ""
 msgid "COMMON NAME"
 msgstr ""
 
-#: lxc/info.go:179
+#: lxc/info.go:182
 msgid "CPU usage (in seconds)"
 msgstr ""
 
-#: lxc/info.go:183
+#: lxc/info.go:186
 msgid "CPU usage:"
 msgstr ""
 
@@ -309,7 +309,7 @@ msgstr ""
 msgid "Commands:"
 msgstr ""
 
-#: lxc/copy.go:35 lxc/copy.go:36 lxc/init.go:137 lxc/init.go:138
+#: lxc/copy.go:35 lxc/copy.go:36 lxc/init.go:138 lxc/init.go:139
 msgid "Config key/value to apply to the new container"
 msgstr ""
 
@@ -331,7 +331,7 @@ msgstr ""
 msgid "Container name is mandatory"
 msgstr ""
 
-#: lxc/copy.go:229 lxc/init.go:317
+#: lxc/copy.go:229 lxc/init.go:320
 #, c-format
 msgid "Container name is: %s"
 msgstr ""
@@ -366,17 +366,17 @@ msgstr ""
 msgid "Create any directories necessary"
 msgstr ""
 
-#: lxc/image.go:605 lxc/info.go:124
+#: lxc/image.go:605 lxc/info.go:127
 #, c-format
 msgid "Created: %s"
 msgstr ""
 
-#: lxc/init.go:196
+#: lxc/init.go:199
 #, c-format
 msgid "Creating %s"
 msgstr ""
 
-#: lxc/init.go:194
+#: lxc/init.go:197
 msgid "Creating the container"
 msgstr ""
 
@@ -424,7 +424,7 @@ msgstr ""
 msgid "Disable stdin (reads from /dev/null)"
 msgstr ""
 
-#: lxc/info.go:172
+#: lxc/info.go:175
 msgid "Disk usage:"
 msgstr ""
 
@@ -452,7 +452,7 @@ msgstr ""
 msgid "Environment:"
 msgstr ""
 
-#: lxc/copy.go:39 lxc/copy.go:40 lxc/init.go:141 lxc/init.go:142
+#: lxc/copy.go:39 lxc/copy.go:40 lxc/init.go:142 lxc/init.go:143
 msgid "Ephemeral container"
 msgstr ""
 
@@ -610,7 +610,7 @@ msgstr ""
 msgid "Input data"
 msgstr ""
 
-#: lxc/init.go:147
+#: lxc/init.go:148
 msgid "Instance type"
 msgstr ""
 
@@ -647,7 +647,7 @@ msgstr ""
 msgid "Invalid target %s"
 msgstr ""
 
-#: lxc/info.go:153
+#: lxc/info.go:156
 msgid "Ips:"
 msgstr ""
 
@@ -672,7 +672,7 @@ msgstr ""
 msgid "Last used: never"
 msgstr ""
 
-#: lxc/info.go:268
+#: lxc/info.go:271
 msgid "Log:"
 msgstr ""
 
@@ -688,15 +688,15 @@ msgstr ""
 msgid "Make the image public"
 msgstr ""
 
-#: lxc/info.go:190
+#: lxc/info.go:193
 msgid "Memory (current)"
 msgstr ""
 
-#: lxc/info.go:194
+#: lxc/info.go:197
 msgid "Memory (peak)"
 msgstr ""
 
-#: lxc/info.go:206
+#: lxc/info.go:209
 msgid "Memory usage:"
 msgstr ""
 
@@ -753,11 +753,11 @@ msgstr ""
 msgid "Network %s renamed to %s"
 msgstr ""
 
-#: lxc/init.go:143 lxc/init.go:144
+#: lxc/init.go:144 lxc/init.go:145
 msgid "Network name"
 msgstr ""
 
-#: lxc/info.go:223
+#: lxc/info.go:226
 msgid "Network usage:"
 msgstr ""
 
@@ -786,6 +786,15 @@ msgstr ""
 msgid "Node %s removed"
 msgstr ""
 
+#: lxc/init.go:149
+msgid "Node name"
+msgstr ""
+
+#: lxc/info.go:119
+#, c-format
+msgid "Node: %s"
+msgstr ""
+
 #: lxc/storage.go:355 lxc/storage.go:448
 msgid "Only \"custom\" volumes can be attached to containers."
 msgstr ""
@@ -835,11 +844,11 @@ msgstr ""
 msgid "PUBLIC"
 msgstr ""
 
-#: lxc/info.go:217
+#: lxc/info.go:220
 msgid "Packets received"
 msgstr ""
 
-#: lxc/info.go:218
+#: lxc/info.go:221
 msgid "Packets sent"
 msgstr ""
 
@@ -859,7 +868,7 @@ msgstr ""
 msgid "Permission denied, are you in the lxd group?"
 msgstr ""
 
-#: lxc/info.go:135
+#: lxc/info.go:138
 #, c-format
 msgid "Pid: %d"
 msgstr ""
@@ -889,7 +898,7 @@ msgstr ""
 msgid "Print verbose information"
 msgstr ""
 
-#: lxc/info.go:159
+#: lxc/info.go:162
 #, c-format
 msgid "Processes: %d"
 msgstr ""
@@ -919,7 +928,7 @@ msgstr ""
 msgid "Profile %s renamed to %s"
 msgstr ""
 
-#: lxc/copy.go:37 lxc/copy.go:38 lxc/init.go:139 lxc/init.go:140
+#: lxc/copy.go:37 lxc/copy.go:38 lxc/init.go:140 lxc/init.go:141
 msgid "Profile to apply to the new container"
 msgstr ""
 
@@ -928,7 +937,7 @@ msgstr ""
 msgid "Profiles %s applied to %s"
 msgstr ""
 
-#: lxc/info.go:133
+#: lxc/info.go:136
 #, c-format
 msgid "Profiles: %s"
 msgstr ""
@@ -963,7 +972,7 @@ msgstr ""
 msgid "Remote operation canceled by user"
 msgstr ""
 
-#: lxc/info.go:119
+#: lxc/info.go:122
 #, c-format
 msgid "Remote: %s"
 msgstr ""
@@ -982,7 +991,7 @@ msgstr ""
 msgid "Require user confirmation"
 msgstr ""
 
-#: lxc/info.go:156
+#: lxc/info.go:159
 msgid "Resources:"
 msgstr ""
 
@@ -994,7 +1003,7 @@ msgstr ""
 msgid "Retrieve the container's console log"
 msgstr ""
 
-#: lxc/init.go:289
+#: lxc/init.go:292
 #, c-format
 msgid "Retrieving image: %s"
 msgstr ""
@@ -1084,7 +1093,7 @@ msgstr ""
 msgid "Size: %.2fMB"
 msgstr ""
 
-#: lxc/info.go:237
+#: lxc/info.go:240
 msgid "Snapshots:"
 msgstr ""
 
@@ -1106,7 +1115,7 @@ msgstr ""
 msgid "Starting %s"
 msgstr ""
 
-#: lxc/info.go:127
+#: lxc/info.go:130
 #, c-format
 msgid "Status: %s"
 msgstr ""
@@ -1138,7 +1147,7 @@ msgstr ""
 msgid "Storage pool %s deleted"
 msgstr ""
 
-#: lxc/init.go:145 lxc/init.go:146
+#: lxc/init.go:146 lxc/init.go:147
 msgid "Storage pool name"
 msgstr ""
 
@@ -1156,11 +1165,11 @@ msgstr ""
 msgid "Store the container state (only for stop)"
 msgstr ""
 
-#: lxc/info.go:198
+#: lxc/info.go:201
 msgid "Swap (current)"
 msgstr ""
 
-#: lxc/info.go:202
+#: lxc/info.go:205
 msgid "Swap (peak)"
 msgstr ""
 
@@ -1178,7 +1187,7 @@ msgid ""
 "restarted."
 msgstr ""
 
-#: lxc/init.go:362
+#: lxc/init.go:365
 msgid "The container you are starting doesn't have any network attached to it."
 msgstr ""
 
@@ -1191,7 +1200,7 @@ msgstr ""
 msgid "The device doesn't exist"
 msgstr ""
 
-#: lxc/init.go:346
+#: lxc/init.go:349
 #, c-format
 msgid "The local image '%s' couldn't be found, trying '%s:' instead."
 msgstr ""
@@ -1228,11 +1237,11 @@ msgstr ""
 msgid "Timestamps:"
 msgstr ""
 
-#: lxc/init.go:364
+#: lxc/init.go:367
 msgid "To attach a network to a container, use: lxc network attach"
 msgstr ""
 
-#: lxc/init.go:363
+#: lxc/init.go:366
 msgid "To create a new network, use: lxc network create"
 msgstr ""
 
@@ -1263,11 +1272,11 @@ msgstr ""
 msgid "Try `lxc info --show-log %s` for more info"
 msgstr ""
 
-#: lxc/info.go:129
+#: lxc/info.go:132
 msgid "Type: ephemeral"
 msgstr ""
 
-#: lxc/info.go:131
+#: lxc/info.go:134
 msgid "Type: persistent"
 msgstr ""
 
@@ -1641,11 +1650,12 @@ msgid ""
 "    For LXD server information."
 msgstr ""
 
-#: lxc/init.go:77
+#: lxc/init.go:78
 msgid ""
 "Usage: lxc init [<remote>:]<image> [<remote>:][<name>] [--ephemeral|-e] [--"
 "profile|-p <profile>...] [--config|-c <key=value>...] [--network|-n "
-"<network>] [--storage|-s <pool>] [--type|-t <instance type>]\n"
+"<network>] [--storage|-s <pool>] [--type|-t <instance type>] [--target "
+"<node>]\n"
 "\n"
 "Create containers from images.\n"
 "\n"
@@ -2211,7 +2221,7 @@ msgstr ""
 msgid "default"
 msgstr ""
 
-#: lxc/init.go:311
+#: lxc/init.go:314
 msgid "didn't get any affected image, container or snapshot from server"
 msgstr ""
 
@@ -2270,15 +2280,15 @@ msgstr ""
 msgid "remote %s is static and cannot be modified"
 msgstr ""
 
-#: lxc/info.go:248
+#: lxc/info.go:251
 msgid "stateful"
 msgstr ""
 
-#: lxc/info.go:250
+#: lxc/info.go:253
 msgid "stateless"
 msgstr ""
 
-#: lxc/info.go:244
+#: lxc/info.go:247
 #, c-format
 msgid "taken at %s"
 msgstr ""
diff --git a/po/tr.po b/po/tr.po
index 29890d47a..3f1eb8ae2 100644
--- a/po/tr.po
+++ b/po/tr.po
@@ -208,7 +208,7 @@ msgstr ""
 msgid "Aliases:"
 msgstr ""
 
-#: lxc/image.go:600 lxc/info.go:122
+#: lxc/image.go:600 lxc/info.go:125
 #, c-format
 msgid "Architecture: %s"
 msgstr ""
@@ -228,11 +228,11 @@ msgstr ""
 msgid "Bad property: %s"
 msgstr ""
 
-#: lxc/info.go:215
+#: lxc/info.go:218
 msgid "Bytes received"
 msgstr ""
 
-#: lxc/info.go:216
+#: lxc/info.go:219
 msgid "Bytes sent"
 msgstr ""
 
@@ -244,11 +244,11 @@ msgstr ""
 msgid "COMMON NAME"
 msgstr ""
 
-#: lxc/info.go:179
+#: lxc/info.go:182
 msgid "CPU usage (in seconds)"
 msgstr ""
 
-#: lxc/info.go:183
+#: lxc/info.go:186
 msgid "CPU usage:"
 msgstr ""
 
@@ -309,7 +309,7 @@ msgstr ""
 msgid "Commands:"
 msgstr ""
 
-#: lxc/copy.go:35 lxc/copy.go:36 lxc/init.go:137 lxc/init.go:138
+#: lxc/copy.go:35 lxc/copy.go:36 lxc/init.go:138 lxc/init.go:139
 msgid "Config key/value to apply to the new container"
 msgstr ""
 
@@ -331,7 +331,7 @@ msgstr ""
 msgid "Container name is mandatory"
 msgstr ""
 
-#: lxc/copy.go:229 lxc/init.go:317
+#: lxc/copy.go:229 lxc/init.go:320
 #, c-format
 msgid "Container name is: %s"
 msgstr ""
@@ -366,17 +366,17 @@ msgstr ""
 msgid "Create any directories necessary"
 msgstr ""
 
-#: lxc/image.go:605 lxc/info.go:124
+#: lxc/image.go:605 lxc/info.go:127
 #, c-format
 msgid "Created: %s"
 msgstr ""
 
-#: lxc/init.go:196
+#: lxc/init.go:199
 #, c-format
 msgid "Creating %s"
 msgstr ""
 
-#: lxc/init.go:194
+#: lxc/init.go:197
 msgid "Creating the container"
 msgstr ""
 
@@ -424,7 +424,7 @@ msgstr ""
 msgid "Disable stdin (reads from /dev/null)"
 msgstr ""
 
-#: lxc/info.go:172
+#: lxc/info.go:175
 msgid "Disk usage:"
 msgstr ""
 
@@ -452,7 +452,7 @@ msgstr ""
 msgid "Environment:"
 msgstr ""
 
-#: lxc/copy.go:39 lxc/copy.go:40 lxc/init.go:141 lxc/init.go:142
+#: lxc/copy.go:39 lxc/copy.go:40 lxc/init.go:142 lxc/init.go:143
 msgid "Ephemeral container"
 msgstr ""
 
@@ -610,7 +610,7 @@ msgstr ""
 msgid "Input data"
 msgstr ""
 
-#: lxc/init.go:147
+#: lxc/init.go:148
 msgid "Instance type"
 msgstr ""
 
@@ -647,7 +647,7 @@ msgstr ""
 msgid "Invalid target %s"
 msgstr ""
 
-#: lxc/info.go:153
+#: lxc/info.go:156
 msgid "Ips:"
 msgstr ""
 
@@ -672,7 +672,7 @@ msgstr ""
 msgid "Last used: never"
 msgstr ""
 
-#: lxc/info.go:268
+#: lxc/info.go:271
 msgid "Log:"
 msgstr ""
 
@@ -688,15 +688,15 @@ msgstr ""
 msgid "Make the image public"
 msgstr ""
 
-#: lxc/info.go:190
+#: lxc/info.go:193
 msgid "Memory (current)"
 msgstr ""
 
-#: lxc/info.go:194
+#: lxc/info.go:197
 msgid "Memory (peak)"
 msgstr ""
 
-#: lxc/info.go:206
+#: lxc/info.go:209
 msgid "Memory usage:"
 msgstr ""
 
@@ -753,11 +753,11 @@ msgstr ""
 msgid "Network %s renamed to %s"
 msgstr ""
 
-#: lxc/init.go:143 lxc/init.go:144
+#: lxc/init.go:144 lxc/init.go:145
 msgid "Network name"
 msgstr ""
 
-#: lxc/info.go:223
+#: lxc/info.go:226
 msgid "Network usage:"
 msgstr ""
 
@@ -786,6 +786,15 @@ msgstr ""
 msgid "Node %s removed"
 msgstr ""
 
+#: lxc/init.go:149
+msgid "Node name"
+msgstr ""
+
+#: lxc/info.go:119
+#, c-format
+msgid "Node: %s"
+msgstr ""
+
 #: lxc/storage.go:355 lxc/storage.go:448
 msgid "Only \"custom\" volumes can be attached to containers."
 msgstr ""
@@ -835,11 +844,11 @@ msgstr ""
 msgid "PUBLIC"
 msgstr ""
 
-#: lxc/info.go:217
+#: lxc/info.go:220
 msgid "Packets received"
 msgstr ""
 
-#: lxc/info.go:218
+#: lxc/info.go:221
 msgid "Packets sent"
 msgstr ""
 
@@ -859,7 +868,7 @@ msgstr ""
 msgid "Permission denied, are you in the lxd group?"
 msgstr ""
 
-#: lxc/info.go:135
+#: lxc/info.go:138
 #, c-format
 msgid "Pid: %d"
 msgstr ""
@@ -889,7 +898,7 @@ msgstr ""
 msgid "Print verbose information"
 msgstr ""
 
-#: lxc/info.go:159
+#: lxc/info.go:162
 #, c-format
 msgid "Processes: %d"
 msgstr ""
@@ -919,7 +928,7 @@ msgstr ""
 msgid "Profile %s renamed to %s"
 msgstr ""
 
-#: lxc/copy.go:37 lxc/copy.go:38 lxc/init.go:139 lxc/init.go:140
+#: lxc/copy.go:37 lxc/copy.go:38 lxc/init.go:140 lxc/init.go:141
 msgid "Profile to apply to the new container"
 msgstr ""
 
@@ -928,7 +937,7 @@ msgstr ""
 msgid "Profiles %s applied to %s"
 msgstr ""
 
-#: lxc/info.go:133
+#: lxc/info.go:136
 #, c-format
 msgid "Profiles: %s"
 msgstr ""
@@ -963,7 +972,7 @@ msgstr ""
 msgid "Remote operation canceled by user"
 msgstr ""
 
-#: lxc/info.go:119
+#: lxc/info.go:122
 #, c-format
 msgid "Remote: %s"
 msgstr ""
@@ -982,7 +991,7 @@ msgstr ""
 msgid "Require user confirmation"
 msgstr ""
 
-#: lxc/info.go:156
+#: lxc/info.go:159
 msgid "Resources:"
 msgstr ""
 
@@ -994,7 +1003,7 @@ msgstr ""
 msgid "Retrieve the container's console log"
 msgstr ""
 
-#: lxc/init.go:289
+#: lxc/init.go:292
 #, c-format
 msgid "Retrieving image: %s"
 msgstr ""
@@ -1084,7 +1093,7 @@ msgstr ""
 msgid "Size: %.2fMB"
 msgstr ""
 
-#: lxc/info.go:237
+#: lxc/info.go:240
 msgid "Snapshots:"
 msgstr ""
 
@@ -1106,7 +1115,7 @@ msgstr ""
 msgid "Starting %s"
 msgstr ""
 
-#: lxc/info.go:127
+#: lxc/info.go:130
 #, c-format
 msgid "Status: %s"
 msgstr ""
@@ -1138,7 +1147,7 @@ msgstr ""
 msgid "Storage pool %s deleted"
 msgstr ""
 
-#: lxc/init.go:145 lxc/init.go:146
+#: lxc/init.go:146 lxc/init.go:147
 msgid "Storage pool name"
 msgstr ""
 
@@ -1156,11 +1165,11 @@ msgstr ""
 msgid "Store the container state (only for stop)"
 msgstr ""
 
-#: lxc/info.go:198
+#: lxc/info.go:201
 msgid "Swap (current)"
 msgstr ""
 
-#: lxc/info.go:202
+#: lxc/info.go:205
 msgid "Swap (peak)"
 msgstr ""
 
@@ -1178,7 +1187,7 @@ msgid ""
 "restarted."
 msgstr ""
 
-#: lxc/init.go:362
+#: lxc/init.go:365
 msgid "The container you are starting doesn't have any network attached to it."
 msgstr ""
 
@@ -1191,7 +1200,7 @@ msgstr ""
 msgid "The device doesn't exist"
 msgstr ""
 
-#: lxc/init.go:346
+#: lxc/init.go:349
 #, c-format
 msgid "The local image '%s' couldn't be found, trying '%s:' instead."
 msgstr ""
@@ -1228,11 +1237,11 @@ msgstr ""
 msgid "Timestamps:"
 msgstr ""
 
-#: lxc/init.go:364
+#: lxc/init.go:367
 msgid "To attach a network to a container, use: lxc network attach"
 msgstr ""
 
-#: lxc/init.go:363
+#: lxc/init.go:366
 msgid "To create a new network, use: lxc network create"
 msgstr ""
 
@@ -1263,11 +1272,11 @@ msgstr ""
 msgid "Try `lxc info --show-log %s` for more info"
 msgstr ""
 
-#: lxc/info.go:129
+#: lxc/info.go:132
 msgid "Type: ephemeral"
 msgstr ""
 
-#: lxc/info.go:131
+#: lxc/info.go:134
 msgid "Type: persistent"
 msgstr ""
 
@@ -1641,11 +1650,12 @@ msgid ""
 "    For LXD server information."
 msgstr ""
 
-#: lxc/init.go:77
+#: lxc/init.go:78
 msgid ""
 "Usage: lxc init [<remote>:]<image> [<remote>:][<name>] [--ephemeral|-e] [--"
 "profile|-p <profile>...] [--config|-c <key=value>...] [--network|-n "
-"<network>] [--storage|-s <pool>] [--type|-t <instance type>]\n"
+"<network>] [--storage|-s <pool>] [--type|-t <instance type>] [--target "
+"<node>]\n"
 "\n"
 "Create containers from images.\n"
 "\n"
@@ -2211,7 +2221,7 @@ msgstr ""
 msgid "default"
 msgstr ""
 
-#: lxc/init.go:311
+#: lxc/init.go:314
 msgid "didn't get any affected image, container or snapshot from server"
 msgstr ""
 
@@ -2270,15 +2280,15 @@ msgstr ""
 msgid "remote %s is static and cannot be modified"
 msgstr ""
 
-#: lxc/info.go:248
+#: lxc/info.go:251
 msgid "stateful"
 msgstr ""
 
-#: lxc/info.go:250
+#: lxc/info.go:253
 msgid "stateless"
 msgstr ""
 
-#: lxc/info.go:244
+#: lxc/info.go:247
 #, c-format
 msgid "taken at %s"
 msgstr ""
diff --git a/po/zh.po b/po/zh.po
index 1ea3f41d9..d35b9bcf1 100644
--- a/po/zh.po
+++ b/po/zh.po
@@ -208,7 +208,7 @@ msgstr ""
 msgid "Aliases:"
 msgstr ""
 
-#: lxc/image.go:600 lxc/info.go:122
+#: lxc/image.go:600 lxc/info.go:125
 #, c-format
 msgid "Architecture: %s"
 msgstr ""
@@ -228,11 +228,11 @@ msgstr ""
 msgid "Bad property: %s"
 msgstr ""
 
-#: lxc/info.go:215
+#: lxc/info.go:218
 msgid "Bytes received"
 msgstr ""
 
-#: lxc/info.go:216
+#: lxc/info.go:219
 msgid "Bytes sent"
 msgstr ""
 
@@ -244,11 +244,11 @@ msgstr ""
 msgid "COMMON NAME"
 msgstr ""
 
-#: lxc/info.go:179
+#: lxc/info.go:182
 msgid "CPU usage (in seconds)"
 msgstr ""
 
-#: lxc/info.go:183
+#: lxc/info.go:186
 msgid "CPU usage:"
 msgstr ""
 
@@ -309,7 +309,7 @@ msgstr ""
 msgid "Commands:"
 msgstr ""
 
-#: lxc/copy.go:35 lxc/copy.go:36 lxc/init.go:137 lxc/init.go:138
+#: lxc/copy.go:35 lxc/copy.go:36 lxc/init.go:138 lxc/init.go:139
 msgid "Config key/value to apply to the new container"
 msgstr ""
 
@@ -331,7 +331,7 @@ msgstr ""
 msgid "Container name is mandatory"
 msgstr ""
 
-#: lxc/copy.go:229 lxc/init.go:317
+#: lxc/copy.go:229 lxc/init.go:320
 #, c-format
 msgid "Container name is: %s"
 msgstr ""
@@ -366,17 +366,17 @@ msgstr ""
 msgid "Create any directories necessary"
 msgstr ""
 
-#: lxc/image.go:605 lxc/info.go:124
+#: lxc/image.go:605 lxc/info.go:127
 #, c-format
 msgid "Created: %s"
 msgstr ""
 
-#: lxc/init.go:196
+#: lxc/init.go:199
 #, c-format
 msgid "Creating %s"
 msgstr ""
 
-#: lxc/init.go:194
+#: lxc/init.go:197
 msgid "Creating the container"
 msgstr ""
 
@@ -424,7 +424,7 @@ msgstr ""
 msgid "Disable stdin (reads from /dev/null)"
 msgstr ""
 
-#: lxc/info.go:172
+#: lxc/info.go:175
 msgid "Disk usage:"
 msgstr ""
 
@@ -452,7 +452,7 @@ msgstr ""
 msgid "Environment:"
 msgstr ""
 
-#: lxc/copy.go:39 lxc/copy.go:40 lxc/init.go:141 lxc/init.go:142
+#: lxc/copy.go:39 lxc/copy.go:40 lxc/init.go:142 lxc/init.go:143
 msgid "Ephemeral container"
 msgstr ""
 
@@ -610,7 +610,7 @@ msgstr ""
 msgid "Input data"
 msgstr ""
 
-#: lxc/init.go:147
+#: lxc/init.go:148
 msgid "Instance type"
 msgstr ""
 
@@ -647,7 +647,7 @@ msgstr ""
 msgid "Invalid target %s"
 msgstr ""
 
-#: lxc/info.go:153
+#: lxc/info.go:156
 msgid "Ips:"
 msgstr ""
 
@@ -672,7 +672,7 @@ msgstr ""
 msgid "Last used: never"
 msgstr ""
 
-#: lxc/info.go:268
+#: lxc/info.go:271
 msgid "Log:"
 msgstr ""
 
@@ -688,15 +688,15 @@ msgstr ""
 msgid "Make the image public"
 msgstr ""
 
-#: lxc/info.go:190
+#: lxc/info.go:193
 msgid "Memory (current)"
 msgstr ""
 
-#: lxc/info.go:194
+#: lxc/info.go:197
 msgid "Memory (peak)"
 msgstr ""
 
-#: lxc/info.go:206
+#: lxc/info.go:209
 msgid "Memory usage:"
 msgstr ""
 
@@ -753,11 +753,11 @@ msgstr ""
 msgid "Network %s renamed to %s"
 msgstr ""
 
-#: lxc/init.go:143 lxc/init.go:144
+#: lxc/init.go:144 lxc/init.go:145
 msgid "Network name"
 msgstr ""
 
-#: lxc/info.go:223
+#: lxc/info.go:226
 msgid "Network usage:"
 msgstr ""
 
@@ -786,6 +786,15 @@ msgstr ""
 msgid "Node %s removed"
 msgstr ""
 
+#: lxc/init.go:149
+msgid "Node name"
+msgstr ""
+
+#: lxc/info.go:119
+#, c-format
+msgid "Node: %s"
+msgstr ""
+
 #: lxc/storage.go:355 lxc/storage.go:448
 msgid "Only \"custom\" volumes can be attached to containers."
 msgstr ""
@@ -835,11 +844,11 @@ msgstr ""
 msgid "PUBLIC"
 msgstr ""
 
-#: lxc/info.go:217
+#: lxc/info.go:220
 msgid "Packets received"
 msgstr ""
 
-#: lxc/info.go:218
+#: lxc/info.go:221
 msgid "Packets sent"
 msgstr ""
 
@@ -859,7 +868,7 @@ msgstr ""
 msgid "Permission denied, are you in the lxd group?"
 msgstr ""
 
-#: lxc/info.go:135
+#: lxc/info.go:138
 #, c-format
 msgid "Pid: %d"
 msgstr ""
@@ -889,7 +898,7 @@ msgstr ""
 msgid "Print verbose information"
 msgstr ""
 
-#: lxc/info.go:159
+#: lxc/info.go:162
 #, c-format
 msgid "Processes: %d"
 msgstr ""
@@ -919,7 +928,7 @@ msgstr ""
 msgid "Profile %s renamed to %s"
 msgstr ""
 
-#: lxc/copy.go:37 lxc/copy.go:38 lxc/init.go:139 lxc/init.go:140
+#: lxc/copy.go:37 lxc/copy.go:38 lxc/init.go:140 lxc/init.go:141
 msgid "Profile to apply to the new container"
 msgstr ""
 
@@ -928,7 +937,7 @@ msgstr ""
 msgid "Profiles %s applied to %s"
 msgstr ""
 
-#: lxc/info.go:133
+#: lxc/info.go:136
 #, c-format
 msgid "Profiles: %s"
 msgstr ""
@@ -963,7 +972,7 @@ msgstr ""
 msgid "Remote operation canceled by user"
 msgstr ""
 
-#: lxc/info.go:119
+#: lxc/info.go:122
 #, c-format
 msgid "Remote: %s"
 msgstr ""
@@ -982,7 +991,7 @@ msgstr ""
 msgid "Require user confirmation"
 msgstr ""
 
-#: lxc/info.go:156
+#: lxc/info.go:159
 msgid "Resources:"
 msgstr ""
 
@@ -994,7 +1003,7 @@ msgstr ""
 msgid "Retrieve the container's console log"
 msgstr ""
 
-#: lxc/init.go:289
+#: lxc/init.go:292
 #, c-format
 msgid "Retrieving image: %s"
 msgstr ""
@@ -1084,7 +1093,7 @@ msgstr ""
 msgid "Size: %.2fMB"
 msgstr ""
 
-#: lxc/info.go:237
+#: lxc/info.go:240
 msgid "Snapshots:"
 msgstr ""
 
@@ -1106,7 +1115,7 @@ msgstr ""
 msgid "Starting %s"
 msgstr ""
 
-#: lxc/info.go:127
+#: lxc/info.go:130
 #, c-format
 msgid "Status: %s"
 msgstr ""
@@ -1138,7 +1147,7 @@ msgstr ""
 msgid "Storage pool %s deleted"
 msgstr ""
 
-#: lxc/init.go:145 lxc/init.go:146
+#: lxc/init.go:146 lxc/init.go:147
 msgid "Storage pool name"
 msgstr ""
 
@@ -1156,11 +1165,11 @@ msgstr ""
 msgid "Store the container state (only for stop)"
 msgstr ""
 
-#: lxc/info.go:198
+#: lxc/info.go:201
 msgid "Swap (current)"
 msgstr ""
 
-#: lxc/info.go:202
+#: lxc/info.go:205
 msgid "Swap (peak)"
 msgstr ""
 
@@ -1178,7 +1187,7 @@ msgid ""
 "restarted."
 msgstr ""
 
-#: lxc/init.go:362
+#: lxc/init.go:365
 msgid "The container you are starting doesn't have any network attached to it."
 msgstr ""
 
@@ -1191,7 +1200,7 @@ msgstr ""
 msgid "The device doesn't exist"
 msgstr ""
 
-#: lxc/init.go:346
+#: lxc/init.go:349
 #, c-format
 msgid "The local image '%s' couldn't be found, trying '%s:' instead."
 msgstr ""
@@ -1228,11 +1237,11 @@ msgstr ""
 msgid "Timestamps:"
 msgstr ""
 
-#: lxc/init.go:364
+#: lxc/init.go:367
 msgid "To attach a network to a container, use: lxc network attach"
 msgstr ""
 
-#: lxc/init.go:363
+#: lxc/init.go:366
 msgid "To create a new network, use: lxc network create"
 msgstr ""
 
@@ -1263,11 +1272,11 @@ msgstr ""
 msgid "Try `lxc info --show-log %s` for more info"
 msgstr ""
 
-#: lxc/info.go:129
+#: lxc/info.go:132
 msgid "Type: ephemeral"
 msgstr ""
 
-#: lxc/info.go:131
+#: lxc/info.go:134
 msgid "Type: persistent"
 msgstr ""
 
@@ -1641,11 +1650,12 @@ msgid ""
 "    For LXD server information."
 msgstr ""
 
-#: lxc/init.go:77
+#: lxc/init.go:78
 msgid ""
 "Usage: lxc init [<remote>:]<image> [<remote>:][<name>] [--ephemeral|-e] [--"
 "profile|-p <profile>...] [--config|-c <key=value>...] [--network|-n "
-"<network>] [--storage|-s <pool>] [--type|-t <instance type>]\n"
+"<network>] [--storage|-s <pool>] [--type|-t <instance type>] [--target "
+"<node>]\n"
 "\n"
 "Create containers from images.\n"
 "\n"
@@ -2211,7 +2221,7 @@ msgstr ""
 msgid "default"
 msgstr ""
 
-#: lxc/init.go:311
+#: lxc/init.go:314
 msgid "didn't get any affected image, container or snapshot from server"
 msgstr ""
 
@@ -2270,15 +2280,15 @@ msgstr ""
 msgid "remote %s is static and cannot be modified"
 msgstr ""
 
-#: lxc/info.go:248
+#: lxc/info.go:251
 msgid "stateful"
 msgstr ""
 
-#: lxc/info.go:250
+#: lxc/info.go:253
 msgid "stateless"
 msgstr ""
 
-#: lxc/info.go:244
+#: lxc/info.go:247
 #, c-format
 msgid "taken at %s"
 msgstr ""
diff --git a/po/zh_Hans.po b/po/zh_Hans.po
index bd4c8c3a2..ce61c7c88 100644
--- a/po/zh_Hans.po
+++ b/po/zh_Hans.po
@@ -208,7 +208,7 @@ msgstr ""
 msgid "Aliases:"
 msgstr ""
 
-#: lxc/image.go:600 lxc/info.go:122
+#: lxc/image.go:600 lxc/info.go:125
 #, c-format
 msgid "Architecture: %s"
 msgstr ""
@@ -228,11 +228,11 @@ msgstr ""
 msgid "Bad property: %s"
 msgstr ""
 
-#: lxc/info.go:215
+#: lxc/info.go:218
 msgid "Bytes received"
 msgstr ""
 
-#: lxc/info.go:216
+#: lxc/info.go:219
 msgid "Bytes sent"
 msgstr ""
 
@@ -244,11 +244,11 @@ msgstr ""
 msgid "COMMON NAME"
 msgstr ""
 
-#: lxc/info.go:179
+#: lxc/info.go:182
 msgid "CPU usage (in seconds)"
 msgstr ""
 
-#: lxc/info.go:183
+#: lxc/info.go:186
 msgid "CPU usage:"
 msgstr ""
 
@@ -309,7 +309,7 @@ msgstr ""
 msgid "Commands:"
 msgstr ""
 
-#: lxc/copy.go:35 lxc/copy.go:36 lxc/init.go:137 lxc/init.go:138
+#: lxc/copy.go:35 lxc/copy.go:36 lxc/init.go:138 lxc/init.go:139
 msgid "Config key/value to apply to the new container"
 msgstr ""
 
@@ -331,7 +331,7 @@ msgstr ""
 msgid "Container name is mandatory"
 msgstr ""
 
-#: lxc/copy.go:229 lxc/init.go:317
+#: lxc/copy.go:229 lxc/init.go:320
 #, c-format
 msgid "Container name is: %s"
 msgstr ""
@@ -366,17 +366,17 @@ msgstr ""
 msgid "Create any directories necessary"
 msgstr ""
 
-#: lxc/image.go:605 lxc/info.go:124
+#: lxc/image.go:605 lxc/info.go:127
 #, c-format
 msgid "Created: %s"
 msgstr ""
 
-#: lxc/init.go:196
+#: lxc/init.go:199
 #, c-format
 msgid "Creating %s"
 msgstr ""
 
-#: lxc/init.go:194
+#: lxc/init.go:197
 msgid "Creating the container"
 msgstr ""
 
@@ -424,7 +424,7 @@ msgstr ""
 msgid "Disable stdin (reads from /dev/null)"
 msgstr ""
 
-#: lxc/info.go:172
+#: lxc/info.go:175
 msgid "Disk usage:"
 msgstr ""
 
@@ -452,7 +452,7 @@ msgstr ""
 msgid "Environment:"
 msgstr ""
 
-#: lxc/copy.go:39 lxc/copy.go:40 lxc/init.go:141 lxc/init.go:142
+#: lxc/copy.go:39 lxc/copy.go:40 lxc/init.go:142 lxc/init.go:143
 msgid "Ephemeral container"
 msgstr ""
 
@@ -610,7 +610,7 @@ msgstr ""
 msgid "Input data"
 msgstr ""
 
-#: lxc/init.go:147
+#: lxc/init.go:148
 msgid "Instance type"
 msgstr ""
 
@@ -647,7 +647,7 @@ msgstr ""
 msgid "Invalid target %s"
 msgstr ""
 
-#: lxc/info.go:153
+#: lxc/info.go:156
 msgid "Ips:"
 msgstr ""
 
@@ -672,7 +672,7 @@ msgstr ""
 msgid "Last used: never"
 msgstr ""
 
-#: lxc/info.go:268
+#: lxc/info.go:271
 msgid "Log:"
 msgstr ""
 
@@ -688,15 +688,15 @@ msgstr ""
 msgid "Make the image public"
 msgstr ""
 
-#: lxc/info.go:190
+#: lxc/info.go:193
 msgid "Memory (current)"
 msgstr ""
 
-#: lxc/info.go:194
+#: lxc/info.go:197
 msgid "Memory (peak)"
 msgstr ""
 
-#: lxc/info.go:206
+#: lxc/info.go:209
 msgid "Memory usage:"
 msgstr ""
 
@@ -753,11 +753,11 @@ msgstr ""
 msgid "Network %s renamed to %s"
 msgstr ""
 
-#: lxc/init.go:143 lxc/init.go:144
+#: lxc/init.go:144 lxc/init.go:145
 msgid "Network name"
 msgstr ""
 
-#: lxc/info.go:223
+#: lxc/info.go:226
 msgid "Network usage:"
 msgstr ""
 
@@ -786,6 +786,15 @@ msgstr ""
 msgid "Node %s removed"
 msgstr ""
 
+#: lxc/init.go:149
+msgid "Node name"
+msgstr ""
+
+#: lxc/info.go:119
+#, c-format
+msgid "Node: %s"
+msgstr ""
+
 #: lxc/storage.go:355 lxc/storage.go:448
 msgid "Only \"custom\" volumes can be attached to containers."
 msgstr ""
@@ -835,11 +844,11 @@ msgstr ""
 msgid "PUBLIC"
 msgstr ""
 
-#: lxc/info.go:217
+#: lxc/info.go:220
 msgid "Packets received"
 msgstr ""
 
-#: lxc/info.go:218
+#: lxc/info.go:221
 msgid "Packets sent"
 msgstr ""
 
@@ -859,7 +868,7 @@ msgstr ""
 msgid "Permission denied, are you in the lxd group?"
 msgstr ""
 
-#: lxc/info.go:135
+#: lxc/info.go:138
 #, c-format
 msgid "Pid: %d"
 msgstr ""
@@ -889,7 +898,7 @@ msgstr ""
 msgid "Print verbose information"
 msgstr ""
 
-#: lxc/info.go:159
+#: lxc/info.go:162
 #, c-format
 msgid "Processes: %d"
 msgstr ""
@@ -919,7 +928,7 @@ msgstr ""
 msgid "Profile %s renamed to %s"
 msgstr ""
 
-#: lxc/copy.go:37 lxc/copy.go:38 lxc/init.go:139 lxc/init.go:140
+#: lxc/copy.go:37 lxc/copy.go:38 lxc/init.go:140 lxc/init.go:141
 msgid "Profile to apply to the new container"
 msgstr ""
 
@@ -928,7 +937,7 @@ msgstr ""
 msgid "Profiles %s applied to %s"
 msgstr ""
 
-#: lxc/info.go:133
+#: lxc/info.go:136
 #, c-format
 msgid "Profiles: %s"
 msgstr ""
@@ -963,7 +972,7 @@ msgstr ""
 msgid "Remote operation canceled by user"
 msgstr ""
 
-#: lxc/info.go:119
+#: lxc/info.go:122
 #, c-format
 msgid "Remote: %s"
 msgstr ""
@@ -982,7 +991,7 @@ msgstr ""
 msgid "Require user confirmation"
 msgstr ""
 
-#: lxc/info.go:156
+#: lxc/info.go:159
 msgid "Resources:"
 msgstr ""
 
@@ -994,7 +1003,7 @@ msgstr ""
 msgid "Retrieve the container's console log"
 msgstr ""
 
-#: lxc/init.go:289
+#: lxc/init.go:292
 #, c-format
 msgid "Retrieving image: %s"
 msgstr ""
@@ -1084,7 +1093,7 @@ msgstr ""
 msgid "Size: %.2fMB"
 msgstr ""
 
-#: lxc/info.go:237
+#: lxc/info.go:240
 msgid "Snapshots:"
 msgstr ""
 
@@ -1106,7 +1115,7 @@ msgstr ""
 msgid "Starting %s"
 msgstr ""
 
-#: lxc/info.go:127
+#: lxc/info.go:130
 #, c-format
 msgid "Status: %s"
 msgstr ""
@@ -1138,7 +1147,7 @@ msgstr ""
 msgid "Storage pool %s deleted"
 msgstr ""
 
-#: lxc/init.go:145 lxc/init.go:146
+#: lxc/init.go:146 lxc/init.go:147
 msgid "Storage pool name"
 msgstr ""
 
@@ -1156,11 +1165,11 @@ msgstr ""
 msgid "Store the container state (only for stop)"
 msgstr ""
 
-#: lxc/info.go:198
+#: lxc/info.go:201
 msgid "Swap (current)"
 msgstr ""
 
-#: lxc/info.go:202
+#: lxc/info.go:205
 msgid "Swap (peak)"
 msgstr ""
 
@@ -1178,7 +1187,7 @@ msgid ""
 "restarted."
 msgstr ""
 
-#: lxc/init.go:362
+#: lxc/init.go:365
 msgid "The container you are starting doesn't have any network attached to it."
 msgstr ""
 
@@ -1191,7 +1200,7 @@ msgstr ""
 msgid "The device doesn't exist"
 msgstr ""
 
-#: lxc/init.go:346
+#: lxc/init.go:349
 #, c-format
 msgid "The local image '%s' couldn't be found, trying '%s:' instead."
 msgstr ""
@@ -1228,11 +1237,11 @@ msgstr ""
 msgid "Timestamps:"
 msgstr ""
 
-#: lxc/init.go:364
+#: lxc/init.go:367
 msgid "To attach a network to a container, use: lxc network attach"
 msgstr ""
 
-#: lxc/init.go:363
+#: lxc/init.go:366
 msgid "To create a new network, use: lxc network create"
 msgstr ""
 
@@ -1263,11 +1272,11 @@ msgstr ""
 msgid "Try `lxc info --show-log %s` for more info"
 msgstr ""
 
-#: lxc/info.go:129
+#: lxc/info.go:132
 msgid "Type: ephemeral"
 msgstr ""
 
-#: lxc/info.go:131
+#: lxc/info.go:134
 msgid "Type: persistent"
 msgstr ""
 
@@ -1641,11 +1650,12 @@ msgid ""
 "    For LXD server information."
 msgstr ""
 
-#: lxc/init.go:77
+#: lxc/init.go:78
 msgid ""
 "Usage: lxc init [<remote>:]<image> [<remote>:][<name>] [--ephemeral|-e] [--"
 "profile|-p <profile>...] [--config|-c <key=value>...] [--network|-n "
-"<network>] [--storage|-s <pool>] [--type|-t <instance type>]\n"
+"<network>] [--storage|-s <pool>] [--type|-t <instance type>] [--target "
+"<node>]\n"
 "\n"
 "Create containers from images.\n"
 "\n"
@@ -2211,7 +2221,7 @@ msgstr ""
 msgid "default"
 msgstr ""
 
-#: lxc/init.go:311
+#: lxc/init.go:314
 msgid "didn't get any affected image, container or snapshot from server"
 msgstr ""
 
@@ -2270,15 +2280,15 @@ msgstr ""
 msgid "remote %s is static and cannot be modified"
 msgstr ""
 
-#: lxc/info.go:248
+#: lxc/info.go:251
 msgid "stateful"
 msgstr ""
 
-#: lxc/info.go:250
+#: lxc/info.go:253
 msgid "stateless"
 msgstr ""
 
-#: lxc/info.go:244
+#: lxc/info.go:247
 #, c-format
 msgid "taken at %s"
 msgstr ""
diff --git a/shared/api/container.go b/shared/api/container.go
index 1ecfff755..11c9bf2f6 100644
--- a/shared/api/container.go
+++ b/shared/api/container.go
@@ -70,6 +70,9 @@ type Container struct {
 
 	// API extension: container_last_used_at
 	LastUsedAt time.Time `json:"last_used_at" yaml:"last_used_at"`
+
+	// API extension: clustering
+	Node string `json:"node" yaml:"node"`
 }
 
 // Writable converts a full Container struct into a ContainerPut struct (filters read-only fields)
diff --git a/test/suites/clustering.sh b/test/suites/clustering.sh
index 05535f9b6..b40eceb1b 100644
--- a/test/suites/clustering.sh
+++ b/test/suites/clustering.sh
@@ -122,6 +122,7 @@ test_clustering_containers() {
   LXD_DIR="${LXD_TWO_DIR}" ensure_import_testimage
   LXD_DIR="${LXD_ONE_DIR}" lxc init --target node2 testimage foo
   LXD_DIR="${LXD_TWO_DIR}" lxc list | grep -q foo
+  LXD_DIR="${LXD_ONE_DIR}" lxc info foo | grep -q "Node: node2"
 
   LXD_DIR="${LXD_TWO_DIR}" lxc network delete "${bridge}"
 

From 4af0b31d23099bb443efa124987c826c532e28c1 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Mon, 20 Nov 2017 08:47:18 +0000
Subject: [PATCH 094/116] Add cluster.Connect convenience to connect to cluster
 nodes

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/api_cluster.go     |  7 +------
 lxd/cluster/connect.go | 28 ++++++++++++++++++++++++++++
 lxd/cluster/events.go  | 13 +------------
 lxd/cluster/notify.go  | 15 +--------------
 lxd/containers_post.go |  9 +--------
 lxd/operations.go      | 10 ++--------
 6 files changed, 34 insertions(+), 48 deletions(-)
 create mode 100644 lxd/cluster/connect.go

diff --git a/lxd/api_cluster.go b/lxd/api_cluster.go
index 30de8bb4d..cad2a6dcf 100644
--- a/lxd/api_cluster.go
+++ b/lxd/api_cluster.go
@@ -362,15 +362,10 @@ func clusterNodeDelete(d *Daemon, r *http.Request) Response {
 	} else {
 		// Try to gracefully disable clustering on the target node.
 		cert := d.endpoints.NetworkCert()
-		args := &lxd.ConnectionArgs{
-			TLSServerCert: string(cert.PublicKey()),
-			TLSClientCert: string(cert.PublicKey()),
-			TLSClientKey:  string(cert.PrivateKey()),
-		}
 		run = func(op *operation) error {
 			// First request for this node to be added to the list of
 			// cluster nodes.
-			client, err := lxd.ConnectLXD(fmt.Sprintf("https://%s", address), args)
+			client, err := cluster.Connect(address, cert, false)
 			if err != nil {
 				return err
 			}
diff --git a/lxd/cluster/connect.go b/lxd/cluster/connect.go
new file mode 100644
index 000000000..f8b917c69
--- /dev/null
+++ b/lxd/cluster/connect.go
@@ -0,0 +1,28 @@
+package cluster
+
+import (
+	"fmt"
+
+	lxd "github.com/lxc/lxd/client"
+	"github.com/lxc/lxd/shared"
+)
+
+// Connect is a convenience around lxd.ConnectLXD that configures the client
+// with the correct parameters for node-to-node communication.
+//
+// If 'notify' switch is true, then the user agent will be set to the special
+// value 'lxd-cluster-notifier', which can be used in some cases to distinguish
+// between a regular client request and an internal cluster request.
+func Connect(address string, cert *shared.CertInfo, notify bool) (lxd.ContainerServer, error) {
+	args := &lxd.ConnectionArgs{
+		TLSServerCert: string(cert.PublicKey()),
+		TLSClientCert: string(cert.PublicKey()),
+		TLSClientKey:  string(cert.PrivateKey()),
+	}
+	if notify {
+		args.UserAgent = "lxd-cluster-notifier"
+	}
+
+	url := fmt.Sprintf("https://%s", address)
+	return lxd.ConnectLXD(url, args)
+}
diff --git a/lxd/cluster/events.go b/lxd/cluster/events.go
index fe02df4f7..9b72d1f20 100644
--- a/lxd/cluster/events.go
+++ b/lxd/cluster/events.go
@@ -1,7 +1,6 @@
 package cluster
 
 import (
-	"fmt"
 	"time"
 
 	lxd "github.com/lxc/lxd/client"
@@ -86,17 +85,7 @@ func Events(endpoints *endpoints.Endpoints, cluster *db.Cluster, f func(int64, i
 
 // Establish a client connection to get events from the given node.
 func eventsConnect(address string, cert *shared.CertInfo) (*lxd.EventListener, error) {
-	args := &lxd.ConnectionArgs{
-		TLSServerCert: string(cert.PublicKey()),
-		TLSClientCert: string(cert.PublicKey()),
-		TLSClientKey:  string(cert.PrivateKey()),
-		// Use a special user agent to let the events API handler know that
-		// it should only notify us of local events.
-		UserAgent: "lxd-cluster-notifier",
-	}
-
-	url := fmt.Sprintf("https://%s", address)
-	client, err := lxd.ConnectLXD(url, args)
+	client, err := Connect(address, cert, true)
 	if err != nil {
 		return nil, err
 	}
diff --git a/lxd/cluster/notify.go b/lxd/cluster/notify.go
index cb5a69a76..7cdbb1766 100644
--- a/lxd/cluster/notify.go
+++ b/lxd/cluster/notify.go
@@ -68,19 +68,6 @@ func NewNotifier(state *state.State, cert *shared.CertInfo, policy NotifierPolic
 		return nil, err
 	}
 
-	// Client parameters to connect to a peer cluster node.
-	args := &lxd.ConnectionArgs{
-		TLSServerCert: string(cert.PublicKey()),
-		TLSClientCert: string(cert.PublicKey()),
-		TLSClientKey:  string(cert.PrivateKey()),
-		// Use a special user agent to let the API handlers know they
-		// should not do any database work.
-		UserAgent: "lxd-cluster-notifier",
-	}
-	if cert.CA() != nil {
-		args.TLSCA = string(cert.CA().Raw)
-	}
-
 	notifier := func(hook func(lxd.ContainerServer) error) error {
 		errs := make([]error, len(peers))
 		wg := sync.WaitGroup{}
@@ -89,7 +76,7 @@ func NewNotifier(state *state.State, cert *shared.CertInfo, policy NotifierPolic
 			logger.Debugf("Notify node %s of state changes", address)
 			go func(i int, address string) {
 				defer wg.Done()
-				client, err := lxd.ConnectLXD(fmt.Sprintf("https://%s", address), args)
+				client, err := Connect(address, cert, true)
 				if err != nil {
 					errs[i] = errors.Wrapf(err, "failed to connect to peer %s", address)
 					return
diff --git a/lxd/containers_post.go b/lxd/containers_post.go
index 66053f7ef..2541edbf6 100644
--- a/lxd/containers_post.go
+++ b/lxd/containers_post.go
@@ -11,7 +11,6 @@ import (
 	"github.com/dustinkirkland/golang-petname"
 	"github.com/gorilla/websocket"
 
-	lxd "github.com/lxc/lxd/client"
 	"github.com/lxc/lxd/lxd/cluster"
 	"github.com/lxc/lxd/lxd/db"
 	"github.com/lxc/lxd/lxd/types"
@@ -542,13 +541,7 @@ func containersPost(d *Daemon, r *http.Request) Response {
 		}
 		if address != "" {
 			cert := d.endpoints.NetworkCert()
-			args := &lxd.ConnectionArgs{
-				TLSServerCert: string(cert.PublicKey()),
-				TLSClientCert: string(cert.PublicKey()),
-				TLSClientKey:  string(cert.PrivateKey()),
-			}
-			url := fmt.Sprintf("https://%s", address)
-			client, err := lxd.ConnectLXD(url, args)
+			client, err := cluster.Connect(address, cert, false)
 			if err != nil {
 				return SmartError(err)
 			}
diff --git a/lxd/operations.go b/lxd/operations.go
index cdc5deb75..46ae99597 100644
--- a/lxd/operations.go
+++ b/lxd/operations.go
@@ -12,7 +12,7 @@ import (
 	"github.com/pborman/uuid"
 	"github.com/pkg/errors"
 
-	lxd "github.com/lxc/lxd/client"
+	"github.com/lxc/lxd/lxd/cluster"
 	"github.com/lxc/lxd/lxd/db"
 	"github.com/lxc/lxd/lxd/util"
 	"github.com/lxc/lxd/shared"
@@ -483,13 +483,7 @@ func operationAPIGet(d *Daemon, r *http.Request) Response {
 			return SmartError(err)
 		}
 		cert := d.endpoints.NetworkCert()
-		args := &lxd.ConnectionArgs{
-			TLSServerCert: string(cert.PublicKey()),
-			TLSClientCert: string(cert.PublicKey()),
-			TLSClientKey:  string(cert.PrivateKey()),
-		}
-		url := fmt.Sprintf("https://%s", address)
-		client, err := lxd.ConnectLXD(url, args)
+		client, err := cluster.Connect(address, cert, false)
 		if err != nil {
 			return SmartError(err)
 		}

From df656dd6eea445bdc7c1eafa4611bff2fda7a8a0 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Tue, 21 Nov 2017 07:57:05 +0000
Subject: [PATCH 095/116] Use unshare/nsenter instead of ip netns for isolating
 test nodes

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 test/includes/clustering.sh | 34 +++++++++++++++++++++-------------
 test/includes/lxd.sh        |  2 +-
 test/suites/clustering.sh   |  1 -
 3 files changed, 22 insertions(+), 15 deletions(-)

diff --git a/test/includes/clustering.sh b/test/includes/clustering.sh
index a87af60df..0695ba9ab 100644
--- a/test/includes/clustering.sh
+++ b/test/includes/clustering.sh
@@ -35,7 +35,10 @@ setup_clustering_netns() {
 
   echo "==> Setup clustering netns ${ns}"
 
-  ip netns add "${ns}"
+  mkdir -p /run/netns
+  touch "/run/netns/${ns}"
+
+  unshare -n sh -c "mount --bind /proc/self/ns/net /run/netns/${ns}"
 
   veth1="v${ns}1"
   veth2="v${ns}2"
@@ -43,31 +46,36 @@ setup_clustering_netns() {
   ip link add "${veth1}" type veth peer name "${veth2}"
   ip link set "${veth2}" netns "${ns}"
 
-  bridge="br$$"
-  brctl addif "${bridge}" "${veth1}"
+  nsbridge="br$$"
+  brctl addif "${nsbridge}" "${veth1}"
 
   ip link set "${veth1}" up
 
-  ip netns exec "${ns}" ip link set dev lo up
-  ip netns exec "${ns}" ip link set dev "${veth2}" name eth0
-  ip netns exec "${ns}" ip link set eth0 up
-  ip netns exec "${ns}" ip addr add "10.1.1.10${id}/16" dev eth0
-  ip netns exec "${ns}" ip route add default via 10.1.1.1
+  (
+    cat <<EOF
+    ip link set dev lo up
+    ip link set dev "${veth2}" name eth0
+    ip link set eth0 up
+    ip addr add "10.1.1.10${id}/16" dev eth0
+    ip route add default via 10.1.1.1
+EOF
+  ) | nsenter --net="/run/netns/${ns}" sh
 }
 
 teardown_clustering_netns() {
   prefix="lxd$$"
-  bridge="br$$"
+  nsbridge="br$$"
   for ns in $(ip netns | grep "${prefix}" | cut -f 1 -d " ") ; do
       echo "==> Teardown clustering netns ${ns}"
       veth1="v${ns}1"
       veth2="v${ns}2"
-      ip netns exec "${ns}" ip link set eth0 down
-      ip netns exec "${ns}" ip link set lo down
+      nsenter --net="/run/netns/${ns}" ip link set eth0 down
+      nsenter --net="/run/netns/${ns}" ip link set lo down
       ip link set "${veth1}" down
-      brctl delif "${bridge}" "${veth1}"
+      brctl delif "${nsbridge}" "${veth1}"
       ip link delete "${veth1}" type veth
-      ip netns delete "${ns}"
+      umount "/run/netns/${ns}"
+      rm "/run/netns/${ns}"
   done
 }
 
diff --git a/test/includes/lxd.sh b/test/includes/lxd.sh
index a1b553788..ec82823ee 100644
--- a/test/includes/lxd.sh
+++ b/test/includes/lxd.sh
@@ -43,7 +43,7 @@ spawn_lxd() {
     if [ "${LXD_NETNS}" = "" ]; then
 	LD_LIBRARY_PATH="${sqlite}" LXD_DIR="${lxddir}" lxd --logfile "${lxddir}/lxd.log" "${DEBUG-}" "$@" 2>&1 &
     else
-	LD_LIBRARY_PATH="${sqlite}" LXD_DIR="${lxddir}" ip netns exec "${LXD_NETNS}" lxd --logfile "${lxddir}/lxd.log" "${DEBUG-}" "$@" 2>&1 &
+	LD_LIBRARY_PATH="${sqlite}" LXD_DIR="${lxddir}" nsenter --net="/run/netns/${LXD_NETNS}" lxd --logfile "${lxddir}/lxd.log" "${DEBUG-}" "$@" 2>&1 &
     fi
     LXD_PID=$!
     echo "${LXD_PID}" > "${lxddir}/lxd.pid"
diff --git a/test/suites/clustering.sh b/test/suites/clustering.sh
index b40eceb1b..60039e2f4 100644
--- a/test/suites/clustering.sh
+++ b/test/suites/clustering.sh
@@ -132,4 +132,3 @@ test_clustering_containers() {
   rm -f "${LXD_TWO_DIR}/unix.socket"
   rm -f "${LXD_ONE_DIR}/unix.socket"
 }
-

From aaf53f65e6ff39a7f8daeaf8e4055df2b88133c3 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Tue, 21 Nov 2017 08:54:41 +0000
Subject: [PATCH 096/116] Make it possible to start a container on a different
 node

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/container_state.go    | 24 ++++++++++++++++++++++++
 lxd/db/containers.go      | 43 +++++++++++++++++++++++++++++++++++++++++++
 test/suites/clustering.sh |  4 ++++
 3 files changed, 71 insertions(+)

diff --git a/lxd/container_state.go b/lxd/container_state.go
index 306fbca74..ca7423b89 100644
--- a/lxd/container_state.go
+++ b/lxd/container_state.go
@@ -8,6 +8,7 @@ import (
 
 	"github.com/gorilla/mux"
 
+	"github.com/lxc/lxd/lxd/cluster"
 	"github.com/lxc/lxd/lxd/db"
 	"github.com/lxc/lxd/shared"
 	"github.com/lxc/lxd/shared/api"
@@ -44,6 +45,29 @@ func containerStatePut(d *Daemon, r *http.Request) Response {
 	// Don't mess with containers while in setup mode
 	<-d.readyChan
 
+	// Handle requests targeted to a container on a different node
+	var nodeAddress string
+	err := d.cluster.Transaction(func(tx *db.ClusterTx) error {
+		var err error
+		nodeAddress, err = tx.ContainerNodeAddress(name)
+		return err
+	})
+	if err != nil {
+		return SmartError(err)
+	}
+	if nodeAddress != "" {
+		cert := d.endpoints.NetworkCert()
+		client, err := cluster.Connect(nodeAddress, cert, false)
+		if err != nil {
+			return SmartError(err)
+		}
+		op, err := client.UpdateContainerState(name, raw, "")
+		if err != nil {
+			return SmartError(err)
+		}
+		return ForwardedOperationResponse(&op.Operation)
+	}
+
 	c, err := containerLoadByName(d.State(), name)
 	if err != nil {
 		return SmartError(err)
diff --git a/lxd/db/containers.go b/lxd/db/containers.go
index c2ef39d0b..994b187d3 100644
--- a/lxd/db/containers.go
+++ b/lxd/db/containers.go
@@ -41,6 +41,49 @@ const (
 	CTypeSnapshot ContainerType = 1
 )
 
+// ContainerNodeAddress returns the address of the node hosting the container
+// with the given name.
+//
+// It returns the empty string if the container is hosted on this node.
+func (c *ClusterTx) ContainerNodeAddress(name string) (string, error) {
+	stmt := `
+SELECT nodes.id, nodes.address
+  FROM nodes JOIN containers ON containers.node_id = nodes.id
+    WHERE containers.name = ?
+`
+	var address string
+	var id int64
+	rows, err := c.tx.Query(stmt, name)
+	if err != nil {
+		return "", err
+	}
+	defer rows.Close()
+
+	if !rows.Next() {
+		return "", NoSuchObjectError
+	}
+
+	err = rows.Scan(&id, &address)
+	if err != nil {
+		return "", err
+	}
+
+	if rows.Next() {
+		return "", fmt.Errorf("more than one node associated with container")
+	}
+
+	err = rows.Err()
+	if err != nil {
+		return "", err
+	}
+
+	if id == c.nodeID {
+		return "", nil
+	}
+
+	return address, nil
+}
+
 func (c *Cluster) ContainerRemove(name string) error {
 	id, err := c.ContainerId(name)
 	if err != nil {
diff --git a/test/suites/clustering.sh b/test/suites/clustering.sh
index 60039e2f4..a0dd6a27f 100644
--- a/test/suites/clustering.sh
+++ b/test/suites/clustering.sh
@@ -124,6 +124,10 @@ test_clustering_containers() {
   LXD_DIR="${LXD_TWO_DIR}" lxc list | grep -q foo
   LXD_DIR="${LXD_ONE_DIR}" lxc info foo | grep -q "Node: node2"
 
+  LXD_DIR="${LXD_ONE_DIR}" lxc start foo
+  LXD_DIR="${LXD_TWO_DIR}" lxc info foo | grep -q "Status: Running"
+  LXD_DIR="${LXD_ONE_DIR}" lxc stop foo
+
   LXD_DIR="${LXD_TWO_DIR}" lxc network delete "${bridge}"
 
   LXD_DIR="${LXD_TWO_DIR}" lxd shutdown

From 23334f1ec9db4eff230edf6fb00e729e58d87325 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Tue, 21 Nov 2017 11:22:13 +0000
Subject: [PATCH 097/116] Add "lxc cluster rename" command and related API

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 client/interfaces.go      |  1 +
 client/lxd_cluster.go     |  7 +++++++
 doc/rest-api.md           | 14 ++++++++++++++
 lxc/cluster.go            | 44 +++++++++++++++++++++++++++++++++++---------
 lxd/api_cluster.go        | 25 ++++++++++++++++++++++++-
 lxd/api_cluster_test.go   | 23 +++++++++++++++++++++++
 lxd/db/node.go            | 26 ++++++++++++++++++++++++++
 lxd/db/node_test.go       | 19 +++++++++++++++++++
 po/de.po                  | 24 ++++++++++++++++--------
 po/el.po                  | 24 ++++++++++++++++--------
 po/fr.po                  | 24 ++++++++++++++++--------
 po/id.po                  | 24 ++++++++++++++++--------
 po/it.po                  | 24 ++++++++++++++++--------
 po/ja.po                  | 24 ++++++++++++++++--------
 po/lxd.pot                | 24 ++++++++++++++++--------
 po/nb_NO.po               | 24 ++++++++++++++++--------
 po/nl.po                  | 24 ++++++++++++++++--------
 po/pt_BR.po               | 24 ++++++++++++++++--------
 po/ru.po                  | 24 ++++++++++++++++--------
 po/sr.po                  | 24 ++++++++++++++++--------
 po/sv.po                  | 24 ++++++++++++++++--------
 po/tr.po                  | 24 ++++++++++++++++--------
 po/zh.po                  | 24 ++++++++++++++++--------
 po/zh_Hans.po             | 24 ++++++++++++++++--------
 shared/api/cluster.go     |  9 +++++++++
 test/suites/clustering.sh |  5 ++++-
 26 files changed, 418 insertions(+), 139 deletions(-)

diff --git a/client/interfaces.go b/client/interfaces.go
index a97ad3750..9ef0030a0 100644
--- a/client/interfaces.go
+++ b/client/interfaces.go
@@ -169,6 +169,7 @@ type ContainerServer interface {
 	LeaveCluster(name string, force bool) (err error)
 	GetNodes() (nodes []api.Node, err error)
 	GetNode(name string) (node *api.Node, err error)
+	RenameNode(name string, node api.NodePost) (err error)
 
 	// Internal functions (for internal use)
 	RawQuery(method string, path string, data interface{}, queryETag string) (resp *api.Response, ETag string, err error)
diff --git a/client/lxd_cluster.go b/client/lxd_cluster.go
index 3c12da1d0..93e9a6d6b 100644
--- a/client/lxd_cluster.go
+++ b/client/lxd_cluster.go
@@ -107,3 +107,10 @@ func (r *ProtocolLXD) GetNode(name string) (*api.Node, error) {
 
 	return &node, nil
 }
+
+// RenameNode changes the name of an existing node
+func (r *ProtocolLXD) RenameNode(name string, node api.NodePost) error {
+	url := fmt.Sprintf("/cluster/nodes/%s", name)
+	_, _, err := r.query("POST", url, node, "")
+	return err
+}
diff --git a/doc/rest-api.md b/doc/rest-api.md
index 4010278d1..f4217cd90 100644
--- a/doc/rest-api.md
+++ b/doc/rest-api.md
@@ -2663,6 +2663,20 @@ of the cluster certificate:
         }
     }
 
+## `/1.0/cluster/nodes/<name>`
+### POST
+ * Description: rename a cluster node
+ * Introduced: with API extension `clustering`
+ * Authentication: trusted
+ * Operation: sync
+ * Return: standard return value or standard error
+
+Input:
+
+    {
+        "name": "node1",
+    }
+
 ### DELETE (optional `?force=1`)
  * Description: remove a node from the cluster
  * Introduced: with API extension `clustering`
diff --git a/lxc/cluster.go b/lxc/cluster.go
index 041bb7c7f..02e5d833c 100644
--- a/lxc/cluster.go
+++ b/lxc/cluster.go
@@ -8,6 +8,7 @@ import (
 	yaml "gopkg.in/yaml.v2"
 
 	"github.com/lxc/lxd/lxc/config"
+	"github.com/lxc/lxd/shared/api"
 	"github.com/lxc/lxd/shared/gnuflag"
 	"github.com/lxc/lxd/shared/i18n"
 	"github.com/olekukonko/tablewriter"
@@ -29,6 +30,9 @@ lxc cluster list [<remote>:]
 lxc cluster show [<remote>:]<node>
     Show details of a node.
 
+lxc cluster rename [<remote>:]<node> <new-name>
+    Rename a cluster node.
+
 lxc cluster delete [<remote>:]<node> [--force]
     Delete a node from the cluster.`)
 }
@@ -46,15 +50,14 @@ func (c *clusterCmd) run(conf *config.Config, args []string) error {
 		return errUsage
 	}
 
-	if args[0] == "list" {
+	switch args[0] {
+	case "list":
 		return c.doClusterList(conf, args)
-	}
-
-	if args[0] == "show" {
+	case "show":
 		return c.doClusterNodeShow(conf, args)
-	}
-
-	if args[0] == "delete" {
+	case "rename":
+		return c.doClusterNodeRename(conf, args)
+	case "delete":
 		return c.doClusterNodeDelete(conf, args)
 	}
 
@@ -66,7 +69,6 @@ func (c *clusterCmd) doClusterNodeShow(conf *config.Config, args []string) error
 		return errArgs
 	}
 
-	// [[lxc cluster]] remove production:bionic-1
 	remote, name, err := conf.ParseRemote(args[1])
 	if err != nil {
 		return err
@@ -92,12 +94,36 @@ func (c *clusterCmd) doClusterNodeShow(conf *config.Config, args []string) error
 	return nil
 }
 
+func (c *clusterCmd) doClusterNodeRename(conf *config.Config, args []string) error {
+	if len(args) < 3 {
+		return errArgs
+	}
+	newName := args[2]
+
+	remote, name, err := conf.ParseRemote(args[1])
+	if err != nil {
+		return err
+	}
+
+	client, err := conf.GetContainerServer(remote)
+	if err != nil {
+		return err
+	}
+
+	err = client.RenameNode(name, api.NodePost{Name: newName})
+	if err != nil {
+		return err
+	}
+
+	fmt.Printf(i18n.G("Node %s renamed to %s")+"\n", name, newName)
+	return nil
+}
+
 func (c *clusterCmd) doClusterNodeDelete(conf *config.Config, args []string) error {
 	if len(args) < 2 {
 		return errArgs
 	}
 
-	// [[lxc cluster]] remove production:bionic-1
 	remote, name, err := conf.ParseRemote(args[1])
 	if err != nil {
 		return err
diff --git a/lxd/api_cluster.go b/lxd/api_cluster.go
index cad2a6dcf..1b1ca19ce 100644
--- a/lxd/api_cluster.go
+++ b/lxd/api_cluster.go
@@ -299,12 +299,14 @@ func clusterNodesGet(d *Daemon, r *http.Request) Response {
 var clusterNodeCmd = Command{
 	name:   "cluster/nodes/{name}",
 	get:    clusterNodeGet,
+	post:   clusterNodePost,
 	delete: clusterNodeDelete,
 }
 
 func clusterNodeGet(d *Daemon, r *http.Request) Response {
 	name := mux.Vars(r)["name"]
-	node := api.Node{Name: name}
+	node := api.Node{}
+	node.Name = name
 	address := ""
 	err := d.cluster.Transaction(func(tx *db.ClusterTx) error {
 		dbNode, err := tx.NodeByName(name)
@@ -339,6 +341,27 @@ func clusterNodeGet(d *Daemon, r *http.Request) Response {
 	return SyncResponse(true, node)
 }
 
+func clusterNodePost(d *Daemon, r *http.Request) Response {
+	name := mux.Vars(r)["name"]
+
+	req := api.NodePost{}
+
+	// Parse the request
+	err := json.NewDecoder(r.Body).Decode(&req)
+	if err != nil {
+		return BadRequest(err)
+	}
+
+	err = d.cluster.Transaction(func(tx *db.ClusterTx) error {
+		return tx.NodeRename(name, req.Name)
+	})
+	if err != nil {
+		return SmartError(err)
+	}
+
+	return EmptySyncResponse
+}
+
 func clusterNodeDelete(d *Daemon, r *http.Request) Response {
 	force, err := strconv.Atoi(r.FormValue("force"))
 	if err != nil {
diff --git a/lxd/api_cluster_test.go b/lxd/api_cluster_test.go
index a6d18e7cf..c77cc6ca7 100644
--- a/lxd/api_cluster_test.go
+++ b/lxd/api_cluster_test.go
@@ -7,6 +7,7 @@ import (
 	lxd "github.com/lxc/lxd/client"
 	"github.com/lxc/lxd/lxd/db"
 	"github.com/lxc/lxd/shared"
+	"github.com/lxc/lxd/shared/api"
 	"github.com/stretchr/testify/assert"
 	"github.com/stretchr/testify/require"
 )
@@ -166,6 +167,28 @@ func TestCluster_Leave(t *testing.T) {
 	require.NoError(t, err)
 }
 
+// A LXD node can be renamed.
+func TestCluster_NodeRename(t *testing.T) {
+	daemon, cleanup := newDaemon(t)
+	defer cleanup()
+
+	f := clusterFixture{t: t}
+	f.EnableNetworking(daemon, "")
+
+	client := f.ClientUnix(daemon)
+
+	op, err := client.BootstrapCluster("buzz")
+	require.NoError(t, err)
+	require.NoError(t, op.Wait())
+
+	node := api.NodePost{Name: "rusp"}
+	err = client.RenameNode("buzz", node)
+	require.NoError(t, err)
+
+	_, err = client.GetNode("rusp")
+	require.NoError(t, err)
+}
+
 // Test helper for cluster-related APIs.
 type clusterFixture struct {
 	t       *testing.T
diff --git a/lxd/db/node.go b/lxd/db/node.go
index 4b42ec1cc..01d151f5d 100644
--- a/lxd/db/node.go
+++ b/lxd/db/node.go
@@ -69,6 +69,32 @@ func (c *ClusterTx) Nodes() ([]NodeInfo, error) {
 	return c.nodes("")
 }
 
+// NodeRename changes the name of an existing node.
+//
+// Return an error if a node with the same name already exists.
+func (c *ClusterTx) NodeRename(old, new string) error {
+	count, err := query.Count(c.tx, "nodes", "name=?", new)
+	if err != nil {
+		return errors.Wrap(err, "failed to check existing nodes")
+	}
+	if count != 0 {
+		return DbErrAlreadyDefined
+	}
+	stmt := `UPDATE nodes SET name=? WHERE name=?`
+	result, err := c.tx.Exec(stmt, new, old)
+	if err != nil {
+		return errors.Wrap(err, "failed to update node name")
+	}
+	n, err := result.RowsAffected()
+	if err != nil {
+		return errors.Wrap(err, "failed to get rows count")
+	}
+	if n != 1 {
+		return fmt.Errorf("expected to update one row, not %d", n)
+	}
+	return nil
+}
+
 // Nodes returns all LXD nodes part of the cluster.
 func (c *ClusterTx) nodes(where string, args ...interface{}) ([]NodeInfo, error) {
 	nodes := []NodeInfo{}
diff --git a/lxd/db/node_test.go b/lxd/db/node_test.go
index 22ee430d9..84a6bceff 100644
--- a/lxd/db/node_test.go
+++ b/lxd/db/node_test.go
@@ -37,6 +37,25 @@ func TestNodeAdd(t *testing.T) {
 	assert.Equal(t, "buzz", node.Name)
 }
 
+// Rename a node
+func TestNodeRename(t *testing.T) {
+	tx, cleanup := db.NewTestClusterTx(t)
+	defer cleanup()
+
+	_, err := tx.NodeAdd("buzz", "1.2.3.4:666")
+	require.NoError(t, err)
+	err = tx.NodeRename("buzz", "rusp")
+	require.NoError(t, err)
+	node, err := tx.NodeByName("rusp")
+	require.NoError(t, err)
+	assert.Equal(t, "rusp", node.Name)
+
+	_, err = tx.NodeAdd("buzz", "5.6.7.8:666")
+	require.NoError(t, err)
+	err = tx.NodeRename("rusp", "buzz")
+	assert.Equal(t, db.DbErrAlreadyDefined, err)
+}
+
 // Remove a new raft node.
 func TestNodeRemove(t *testing.T) {
 	tx, cleanup := db.NewTestClusterTx(t)
diff --git a/po/de.po b/po/de.po
index b8c223372..382df103f 100644
--- a/po/de.po
+++ b/po/de.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: LXD\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2017-12-04 08:59+0000\n"
+"POT-Creation-Date: 2017-12-04 09:00+0000\n"
 "PO-Revision-Date: 2017-02-14 17:11+0000\n"
 "Last-Translator: Tim Rose <tim at netlope.de>\n"
 "Language-Team: German <https://hosted.weblate.org/projects/linux-containers/"
@@ -498,7 +498,7 @@ msgstr ""
 msgid "Creating the container"
 msgstr "kann nicht zum selben Container Namen kopieren"
 
-#: lxc/cluster.go:157
+#: lxc/cluster.go:183
 msgid "DATABASE"
 msgstr ""
 
@@ -658,7 +658,7 @@ msgstr "Fingerabdruck: %s\n"
 msgid "Force pseudo-terminal allocation"
 msgstr ""
 
-#: lxc/cluster.go:37
+#: lxc/cluster.go:41
 msgid "Force removing a node, even if degraded"
 msgstr ""
 
@@ -857,7 +857,7 @@ msgstr ""
 msgid "Must supply container name for: "
 msgstr "der Name des Ursprung Containers muss angegeben werden"
 
-#: lxc/cluster.go:155 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
+#: lxc/cluster.go:181 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
 #: lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
 msgid "NAME"
 msgstr ""
@@ -918,11 +918,16 @@ msgstr "Kein Zertifikat für diese Verbindung"
 msgid "No fingerprint specified."
 msgstr "Kein Fingerabdruck angegeben."
 
-#: lxc/cluster.go:116
+#: lxc/cluster.go:142
 #, fuzzy, c-format
 msgid "Node %s removed"
 msgstr "Gerät %s wurde von %s entfernt\n"
 
+#: lxc/cluster.go:118
+#, fuzzy, c-format
+msgid "Node %s renamed to %s"
+msgstr "Profil %s wurde auf %s angewandt\n"
+
 #: lxc/init.go:149
 msgid "Node name"
 msgstr ""
@@ -1165,7 +1170,7 @@ msgstr ""
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/cluster.go:158 lxc/list.go:469
+#: lxc/cluster.go:184 lxc/list.go:469
 msgid "STATE"
 msgstr ""
 
@@ -1441,7 +1446,7 @@ msgstr ""
 msgid "UPLOAD DATE"
 msgstr ""
 
-#: lxc/cluster.go:156 lxc/remote.go:410
+#: lxc/cluster.go:182 lxc/remote.go:410
 msgid "URL"
 msgstr ""
 
@@ -1481,7 +1486,7 @@ msgstr ""
 "Benutzung: lxc [Unterbefehl] [Optionen]\n"
 "Verfügbare Befehle:\n"
 
-#: lxc/cluster.go:21
+#: lxc/cluster.go:22
 msgid ""
 "Usage: lxc cluster <subcommand> [options]\n"
 "\n"
@@ -1493,6 +1498,9 @@ msgid ""
 "lxc cluster show [<remote>:]<node>\n"
 "    Show details of a node.\n"
 "\n"
+"lxc cluster rename [<remote>:]<node> <new-name>\n"
+"    Rename a cluster node.\n"
+"\n"
 "lxc cluster delete [<remote>:]<node> [--force]\n"
 "    Delete a node from the cluster."
 msgstr ""
diff --git a/po/el.po b/po/el.po
index 4282e98fd..9691fad16 100644
--- a/po/el.po
+++ b/po/el.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: lxd\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2017-12-04 08:59+0000\n"
+"POT-Creation-Date: 2017-12-04 09:00+0000\n"
 "PO-Revision-Date: 2017-02-14 08:00+0000\n"
 "Last-Translator: Simos Xenitellis <simos.65 at gmail.com>\n"
 "Language-Team: Greek <https://hosted.weblate.org/projects/linux-containers/"
@@ -384,7 +384,7 @@ msgstr ""
 msgid "Creating the container"
 msgstr ""
 
-#: lxc/cluster.go:157
+#: lxc/cluster.go:183
 msgid "DATABASE"
 msgstr ""
 
@@ -538,7 +538,7 @@ msgstr ""
 msgid "Force pseudo-terminal allocation"
 msgstr ""
 
-#: lxc/cluster.go:37
+#: lxc/cluster.go:41
 msgid "Force removing a node, even if degraded"
 msgstr ""
 
@@ -730,7 +730,7 @@ msgstr ""
 msgid "Must supply container name for: "
 msgstr ""
 
-#: lxc/cluster.go:155 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
+#: lxc/cluster.go:181 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
 #: lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
 msgid "NAME"
 msgstr ""
@@ -788,11 +788,16 @@ msgstr ""
 msgid "No fingerprint specified."
 msgstr ""
 
-#: lxc/cluster.go:116
+#: lxc/cluster.go:142
 #, c-format
 msgid "Node %s removed"
 msgstr ""
 
+#: lxc/cluster.go:118
+#, c-format
+msgid "Node %s renamed to %s"
+msgstr ""
+
 #: lxc/init.go:149
 msgid "Node name"
 msgstr ""
@@ -1027,7 +1032,7 @@ msgstr ""
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/cluster.go:158 lxc/list.go:469
+#: lxc/cluster.go:184 lxc/list.go:469
 msgid "STATE"
 msgstr ""
 
@@ -1291,7 +1296,7 @@ msgstr ""
 msgid "UPLOAD DATE"
 msgstr ""
 
-#: lxc/cluster.go:156 lxc/remote.go:410
+#: lxc/cluster.go:182 lxc/remote.go:410
 msgid "URL"
 msgstr ""
 
@@ -1325,7 +1330,7 @@ msgstr ""
 msgid "Usage: lxc <command> [options]"
 msgstr ""
 
-#: lxc/cluster.go:21
+#: lxc/cluster.go:22
 msgid ""
 "Usage: lxc cluster <subcommand> [options]\n"
 "\n"
@@ -1337,6 +1342,9 @@ msgid ""
 "lxc cluster show [<remote>:]<node>\n"
 "    Show details of a node.\n"
 "\n"
+"lxc cluster rename [<remote>:]<node> <new-name>\n"
+"    Rename a cluster node.\n"
+"\n"
 "lxc cluster delete [<remote>:]<node> [--force]\n"
 "    Delete a node from the cluster."
 msgstr ""
diff --git a/po/fr.po b/po/fr.po
index eca915eed..114db0c9a 100644
--- a/po/fr.po
+++ b/po/fr.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: LXD\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2017-12-04 08:59+0000\n"
+"POT-Creation-Date: 2017-12-04 09:00+0000\n"
 "PO-Revision-Date: 2017-10-26 15:46+0000\n"
 "Last-Translator: Alban Vidal <alban.vidal at zordhak.fr>\n"
 "Language-Team: French <https://hosted.weblate.org/projects/linux-containers/"
@@ -485,7 +485,7 @@ msgstr "Création de %s"
 msgid "Creating the container"
 msgstr "Création du conteneur"
 
-#: lxc/cluster.go:157
+#: lxc/cluster.go:183
 msgid "DATABASE"
 msgstr ""
 
@@ -642,7 +642,7 @@ msgstr "Empreinte : %s"
 msgid "Force pseudo-terminal allocation"
 msgstr "Forcer l'allocation de pseudo-terminal "
 
-#: lxc/cluster.go:37
+#: lxc/cluster.go:41
 msgid "Force removing a node, even if degraded"
 msgstr ""
 
@@ -843,7 +843,7 @@ msgstr ""
 msgid "Must supply container name for: "
 msgstr "Vous devez fournir le nom d'un conteneur pour : "
 
-#: lxc/cluster.go:155 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
+#: lxc/cluster.go:181 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
 #: lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
 msgid "NAME"
 msgstr "NOM"
@@ -902,11 +902,16 @@ msgstr "Aucun périphérique existant pour ce réseau"
 msgid "No fingerprint specified."
 msgstr "Aucune empreinte n'a été indiquée."
 
-#: lxc/cluster.go:116
+#: lxc/cluster.go:142
 #, fuzzy, c-format
 msgid "Node %s removed"
 msgstr "Profil %s supprimé de %s"
 
+#: lxc/cluster.go:118
+#, fuzzy, c-format
+msgid "Node %s renamed to %s"
+msgstr "Profil %s ajouté à %s"
+
 #: lxc/init.go:149
 #, fuzzy
 msgid "Node name"
@@ -1146,7 +1151,7 @@ msgstr "INSTANTANÉS"
 msgid "SOURCE"
 msgstr "SOURCE"
 
-#: lxc/cluster.go:158 lxc/list.go:469
+#: lxc/cluster.go:184 lxc/list.go:469
 msgid "STATE"
 msgstr "ÉTAT"
 
@@ -1427,7 +1432,7 @@ msgstr "Type : persistant"
 msgid "UPLOAD DATE"
 msgstr "DATE DE PUBLICATION"
 
-#: lxc/cluster.go:156 lxc/remote.go:410
+#: lxc/cluster.go:182 lxc/remote.go:410
 msgid "URL"
 msgstr "URL"
 
@@ -1464,7 +1469,7 @@ msgstr ""
 msgid "Usage: lxc <command> [options]"
 msgstr "Utilisation : lxc <commande> [options]"
 
-#: lxc/cluster.go:21
+#: lxc/cluster.go:22
 msgid ""
 "Usage: lxc cluster <subcommand> [options]\n"
 "\n"
@@ -1476,6 +1481,9 @@ msgid ""
 "lxc cluster show [<remote>:]<node>\n"
 "    Show details of a node.\n"
 "\n"
+"lxc cluster rename [<remote>:]<node> <new-name>\n"
+"    Rename a cluster node.\n"
+"\n"
 "lxc cluster delete [<remote>:]<node> [--force]\n"
 "    Delete a node from the cluster."
 msgstr ""
diff --git a/po/id.po b/po/id.po
index 47bd4546a..99ffcd30c 100644
--- a/po/id.po
+++ b/po/id.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: lxd\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2017-12-04 08:59+0000\n"
+"POT-Creation-Date: 2017-12-04 09:00+0000\n"
 "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
 "Last-Translator: Automatically generated\n"
 "Language-Team: none\n"
@@ -380,7 +380,7 @@ msgstr ""
 msgid "Creating the container"
 msgstr ""
 
-#: lxc/cluster.go:157
+#: lxc/cluster.go:183
 msgid "DATABASE"
 msgstr ""
 
@@ -533,7 +533,7 @@ msgstr ""
 msgid "Force pseudo-terminal allocation"
 msgstr ""
 
-#: lxc/cluster.go:37
+#: lxc/cluster.go:41
 msgid "Force removing a node, even if degraded"
 msgstr ""
 
@@ -724,7 +724,7 @@ msgstr ""
 msgid "Must supply container name for: "
 msgstr ""
 
-#: lxc/cluster.go:155 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
+#: lxc/cluster.go:181 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
 #: lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
 msgid "NAME"
 msgstr ""
@@ -781,11 +781,16 @@ msgstr ""
 msgid "No fingerprint specified."
 msgstr ""
 
-#: lxc/cluster.go:116
+#: lxc/cluster.go:142
 #, c-format
 msgid "Node %s removed"
 msgstr ""
 
+#: lxc/cluster.go:118
+#, c-format
+msgid "Node %s renamed to %s"
+msgstr ""
+
 #: lxc/init.go:149
 msgid "Node name"
 msgstr ""
@@ -1020,7 +1025,7 @@ msgstr ""
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/cluster.go:158 lxc/list.go:469
+#: lxc/cluster.go:184 lxc/list.go:469
 msgid "STATE"
 msgstr ""
 
@@ -1284,7 +1289,7 @@ msgstr ""
 msgid "UPLOAD DATE"
 msgstr ""
 
-#: lxc/cluster.go:156 lxc/remote.go:410
+#: lxc/cluster.go:182 lxc/remote.go:410
 msgid "URL"
 msgstr ""
 
@@ -1318,7 +1323,7 @@ msgstr ""
 msgid "Usage: lxc <command> [options]"
 msgstr ""
 
-#: lxc/cluster.go:21
+#: lxc/cluster.go:22
 msgid ""
 "Usage: lxc cluster <subcommand> [options]\n"
 "\n"
@@ -1330,6 +1335,9 @@ msgid ""
 "lxc cluster show [<remote>:]<node>\n"
 "    Show details of a node.\n"
 "\n"
+"lxc cluster rename [<remote>:]<node> <new-name>\n"
+"    Rename a cluster node.\n"
+"\n"
 "lxc cluster delete [<remote>:]<node> [--force]\n"
 "    Delete a node from the cluster."
 msgstr ""
diff --git a/po/it.po b/po/it.po
index 41c6d6bd4..ee90596fc 100644
--- a/po/it.po
+++ b/po/it.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: lxd\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2017-12-04 08:59+0000\n"
+"POT-Creation-Date: 2017-12-04 09:00+0000\n"
 "PO-Revision-Date: 2017-08-18 14:22+0000\n"
 "Last-Translator: Alberto Donato <alberto.donato at gmail.com>\n"
 "Language-Team: Italian <https://hosted.weblate.org/projects/linux-containers/"
@@ -405,7 +405,7 @@ msgstr "Creazione di %s in corso"
 msgid "Creating the container"
 msgstr "Creazione del container in corso"
 
-#: lxc/cluster.go:157
+#: lxc/cluster.go:183
 msgid "DATABASE"
 msgstr ""
 
@@ -559,7 +559,7 @@ msgstr ""
 msgid "Force pseudo-terminal allocation"
 msgstr ""
 
-#: lxc/cluster.go:37
+#: lxc/cluster.go:41
 msgid "Force removing a node, even if degraded"
 msgstr ""
 
@@ -750,7 +750,7 @@ msgstr ""
 msgid "Must supply container name for: "
 msgstr ""
 
-#: lxc/cluster.go:155 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
+#: lxc/cluster.go:181 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
 #: lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
 msgid "NAME"
 msgstr ""
@@ -807,11 +807,16 @@ msgstr ""
 msgid "No fingerprint specified."
 msgstr ""
 
-#: lxc/cluster.go:116
+#: lxc/cluster.go:142
 #, c-format
 msgid "Node %s removed"
 msgstr ""
 
+#: lxc/cluster.go:118
+#, c-format
+msgid "Node %s renamed to %s"
+msgstr ""
+
 #: lxc/init.go:149
 msgid "Node name"
 msgstr ""
@@ -1046,7 +1051,7 @@ msgstr ""
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/cluster.go:158 lxc/list.go:469
+#: lxc/cluster.go:184 lxc/list.go:469
 msgid "STATE"
 msgstr ""
 
@@ -1310,7 +1315,7 @@ msgstr ""
 msgid "UPLOAD DATE"
 msgstr ""
 
-#: lxc/cluster.go:156 lxc/remote.go:410
+#: lxc/cluster.go:182 lxc/remote.go:410
 msgid "URL"
 msgstr ""
 
@@ -1344,7 +1349,7 @@ msgstr ""
 msgid "Usage: lxc <command> [options]"
 msgstr ""
 
-#: lxc/cluster.go:21
+#: lxc/cluster.go:22
 msgid ""
 "Usage: lxc cluster <subcommand> [options]\n"
 "\n"
@@ -1356,6 +1361,9 @@ msgid ""
 "lxc cluster show [<remote>:]<node>\n"
 "    Show details of a node.\n"
 "\n"
+"lxc cluster rename [<remote>:]<node> <new-name>\n"
+"    Rename a cluster node.\n"
+"\n"
 "lxc cluster delete [<remote>:]<node> [--force]\n"
 "    Delete a node from the cluster."
 msgstr ""
diff --git a/po/ja.po b/po/ja.po
index 73b4b86ec..8a1f9fea3 100644
--- a/po/ja.po
+++ b/po/ja.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: LXD\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2017-12-04 08:59+0000\n"
+"POT-Creation-Date: 2017-12-04 09:00+0000\n"
 "PO-Revision-Date: 2017-09-28 20:29+0000\n"
 "Last-Translator: KATOH Yasufumi <karma at jazz.email.ne.jp>\n"
 "Language-Team: Japanese <https://hosted.weblate.org/projects/linux-"
@@ -386,7 +386,7 @@ msgstr "%s を作成中"
 msgid "Creating the container"
 msgstr "コンテナを作成中"
 
-#: lxc/cluster.go:157
+#: lxc/cluster.go:183
 msgid "DATABASE"
 msgstr ""
 
@@ -541,7 +541,7 @@ msgstr "証明書のフィンガープリント: %s"
 msgid "Force pseudo-terminal allocation"
 msgstr "強制的に擬似端末を割り当てます"
 
-#: lxc/cluster.go:37
+#: lxc/cluster.go:41
 msgid "Force removing a node, even if degraded"
 msgstr ""
 
@@ -735,7 +735,7 @@ msgstr "ディレクトリからのインポートは root で実行する必要
 msgid "Must supply container name for: "
 msgstr "コンテナ名を指定する必要があります: "
 
-#: lxc/cluster.go:155 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
+#: lxc/cluster.go:181 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
 #: lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
 msgid "NAME"
 msgstr ""
@@ -792,11 +792,16 @@ msgstr "このストレージボリュームに対するデバイスがありま
 msgid "No fingerprint specified."
 msgstr "フィンガープリントが指定されていません。"
 
-#: lxc/cluster.go:116
+#: lxc/cluster.go:142
 #, fuzzy, c-format
 msgid "Node %s removed"
 msgstr "プロファイル %s が %s から削除されました"
 
+#: lxc/cluster.go:118
+#, fuzzy, c-format
+msgid "Node %s renamed to %s"
+msgstr "プロファイル %s が %s に追加されました"
+
 #: lxc/init.go:149
 #, fuzzy
 msgid "Node name"
@@ -1033,7 +1038,7 @@ msgstr ""
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/cluster.go:158 lxc/list.go:469
+#: lxc/cluster.go:184 lxc/list.go:469
 msgid "STATE"
 msgstr ""
 
@@ -1311,7 +1316,7 @@ msgstr "タイプ: persistent"
 msgid "UPLOAD DATE"
 msgstr ""
 
-#: lxc/cluster.go:156 lxc/remote.go:410
+#: lxc/cluster.go:182 lxc/remote.go:410
 msgid "URL"
 msgstr ""
 
@@ -1348,7 +1353,7 @@ msgstr ""
 msgid "Usage: lxc <command> [options]"
 msgstr "使い方: lxc <コマンド> [オプション]"
 
-#: lxc/cluster.go:21
+#: lxc/cluster.go:22
 msgid ""
 "Usage: lxc cluster <subcommand> [options]\n"
 "\n"
@@ -1360,6 +1365,9 @@ msgid ""
 "lxc cluster show [<remote>:]<node>\n"
 "    Show details of a node.\n"
 "\n"
+"lxc cluster rename [<remote>:]<node> <new-name>\n"
+"    Rename a cluster node.\n"
+"\n"
 "lxc cluster delete [<remote>:]<node> [--force]\n"
 "    Delete a node from the cluster."
 msgstr ""
diff --git a/po/lxd.pot b/po/lxd.pot
index e8cdff180..49ddc1810 100644
--- a/po/lxd.pot
+++ b/po/lxd.pot
@@ -7,7 +7,7 @@
 msgid   ""
 msgstr  "Project-Id-Version: lxd\n"
         "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-        "POT-Creation-Date: 2017-12-04 08:59+0000\n"
+        "POT-Creation-Date: 2017-12-04 09:00+0000\n"
         "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
         "Last-Translator: FULL NAME <EMAIL at ADDRESS>\n"
         "Language-Team: LANGUAGE <LL at li.org>\n"
@@ -372,7 +372,7 @@ msgstr  ""
 msgid   "Creating the container"
 msgstr  ""
 
-#: lxc/cluster.go:157
+#: lxc/cluster.go:183
 msgid   "DATABASE"
 msgstr  ""
 
@@ -524,7 +524,7 @@ msgstr  ""
 msgid   "Force pseudo-terminal allocation"
 msgstr  ""
 
-#: lxc/cluster.go:37
+#: lxc/cluster.go:41
 msgid   "Force removing a node, even if degraded"
 msgstr  ""
 
@@ -715,7 +715,7 @@ msgstr  ""
 msgid   "Must supply container name for: "
 msgstr  ""
 
-#: lxc/cluster.go:155 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573 lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
+#: lxc/cluster.go:181 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573 lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
 msgid   "NAME"
 msgstr  ""
 
@@ -771,11 +771,16 @@ msgstr  ""
 msgid   "No fingerprint specified."
 msgstr  ""
 
-#: lxc/cluster.go:116
+#: lxc/cluster.go:142
 #, c-format
 msgid   "Node %s removed"
 msgstr  ""
 
+#: lxc/cluster.go:118
+#, c-format
+msgid   "Node %s renamed to %s"
+msgstr  ""
+
 #: lxc/init.go:149
 msgid   "Node name"
 msgstr  ""
@@ -1009,7 +1014,7 @@ msgstr  ""
 msgid   "SOURCE"
 msgstr  ""
 
-#: lxc/cluster.go:158 lxc/list.go:469
+#: lxc/cluster.go:184 lxc/list.go:469
 msgid   "STATE"
 msgstr  ""
 
@@ -1269,7 +1274,7 @@ msgstr  ""
 msgid   "UPLOAD DATE"
 msgstr  ""
 
-#: lxc/cluster.go:156 lxc/remote.go:410
+#: lxc/cluster.go:182 lxc/remote.go:410
 msgid   "URL"
 msgstr  ""
 
@@ -1302,7 +1307,7 @@ msgstr  ""
 msgid   "Usage: lxc <command> [options]"
 msgstr  ""
 
-#: lxc/cluster.go:21
+#: lxc/cluster.go:22
 msgid   "Usage: lxc cluster <subcommand> [options]\n"
         "\n"
         "Manage cluster nodes.\n"
@@ -1313,6 +1318,9 @@ msgid   "Usage: lxc cluster <subcommand> [options]\n"
         "lxc cluster show [<remote>:]<node>\n"
         "    Show details of a node.\n"
         "\n"
+        "lxc cluster rename [<remote>:]<node> <new-name>\n"
+        "    Rename a cluster node.\n"
+        "\n"
         "lxc cluster delete [<remote>:]<node> [--force]\n"
         "    Delete a node from the cluster."
 msgstr  ""
diff --git a/po/nb_NO.po b/po/nb_NO.po
index 47eb6e555..391cc26b5 100644
--- a/po/nb_NO.po
+++ b/po/nb_NO.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: lxd\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2017-12-04 08:59+0000\n"
+"POT-Creation-Date: 2017-12-04 09:00+0000\n"
 "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
 "Last-Translator: Automatically generated\n"
 "Language-Team: none\n"
@@ -380,7 +380,7 @@ msgstr ""
 msgid "Creating the container"
 msgstr ""
 
-#: lxc/cluster.go:157
+#: lxc/cluster.go:183
 msgid "DATABASE"
 msgstr ""
 
@@ -533,7 +533,7 @@ msgstr ""
 msgid "Force pseudo-terminal allocation"
 msgstr ""
 
-#: lxc/cluster.go:37
+#: lxc/cluster.go:41
 msgid "Force removing a node, even if degraded"
 msgstr ""
 
@@ -724,7 +724,7 @@ msgstr ""
 msgid "Must supply container name for: "
 msgstr ""
 
-#: lxc/cluster.go:155 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
+#: lxc/cluster.go:181 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
 #: lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
 msgid "NAME"
 msgstr ""
@@ -781,11 +781,16 @@ msgstr ""
 msgid "No fingerprint specified."
 msgstr ""
 
-#: lxc/cluster.go:116
+#: lxc/cluster.go:142
 #, c-format
 msgid "Node %s removed"
 msgstr ""
 
+#: lxc/cluster.go:118
+#, c-format
+msgid "Node %s renamed to %s"
+msgstr ""
+
 #: lxc/init.go:149
 msgid "Node name"
 msgstr ""
@@ -1020,7 +1025,7 @@ msgstr ""
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/cluster.go:158 lxc/list.go:469
+#: lxc/cluster.go:184 lxc/list.go:469
 msgid "STATE"
 msgstr ""
 
@@ -1284,7 +1289,7 @@ msgstr ""
 msgid "UPLOAD DATE"
 msgstr ""
 
-#: lxc/cluster.go:156 lxc/remote.go:410
+#: lxc/cluster.go:182 lxc/remote.go:410
 msgid "URL"
 msgstr ""
 
@@ -1318,7 +1323,7 @@ msgstr ""
 msgid "Usage: lxc <command> [options]"
 msgstr ""
 
-#: lxc/cluster.go:21
+#: lxc/cluster.go:22
 msgid ""
 "Usage: lxc cluster <subcommand> [options]\n"
 "\n"
@@ -1330,6 +1335,9 @@ msgid ""
 "lxc cluster show [<remote>:]<node>\n"
 "    Show details of a node.\n"
 "\n"
+"lxc cluster rename [<remote>:]<node> <new-name>\n"
+"    Rename a cluster node.\n"
+"\n"
 "lxc cluster delete [<remote>:]<node> [--force]\n"
 "    Delete a node from the cluster."
 msgstr ""
diff --git a/po/nl.po b/po/nl.po
index c45c353b4..f8c257ac2 100644
--- a/po/nl.po
+++ b/po/nl.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: lxd\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2017-12-04 08:59+0000\n"
+"POT-Creation-Date: 2017-12-04 09:00+0000\n"
 "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
 "Last-Translator: Automatically generated\n"
 "Language-Team: none\n"
@@ -380,7 +380,7 @@ msgstr ""
 msgid "Creating the container"
 msgstr ""
 
-#: lxc/cluster.go:157
+#: lxc/cluster.go:183
 msgid "DATABASE"
 msgstr ""
 
@@ -533,7 +533,7 @@ msgstr ""
 msgid "Force pseudo-terminal allocation"
 msgstr ""
 
-#: lxc/cluster.go:37
+#: lxc/cluster.go:41
 msgid "Force removing a node, even if degraded"
 msgstr ""
 
@@ -724,7 +724,7 @@ msgstr ""
 msgid "Must supply container name for: "
 msgstr ""
 
-#: lxc/cluster.go:155 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
+#: lxc/cluster.go:181 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
 #: lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
 msgid "NAME"
 msgstr ""
@@ -781,11 +781,16 @@ msgstr ""
 msgid "No fingerprint specified."
 msgstr ""
 
-#: lxc/cluster.go:116
+#: lxc/cluster.go:142
 #, c-format
 msgid "Node %s removed"
 msgstr ""
 
+#: lxc/cluster.go:118
+#, c-format
+msgid "Node %s renamed to %s"
+msgstr ""
+
 #: lxc/init.go:149
 msgid "Node name"
 msgstr ""
@@ -1020,7 +1025,7 @@ msgstr ""
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/cluster.go:158 lxc/list.go:469
+#: lxc/cluster.go:184 lxc/list.go:469
 msgid "STATE"
 msgstr ""
 
@@ -1284,7 +1289,7 @@ msgstr ""
 msgid "UPLOAD DATE"
 msgstr ""
 
-#: lxc/cluster.go:156 lxc/remote.go:410
+#: lxc/cluster.go:182 lxc/remote.go:410
 msgid "URL"
 msgstr ""
 
@@ -1318,7 +1323,7 @@ msgstr ""
 msgid "Usage: lxc <command> [options]"
 msgstr ""
 
-#: lxc/cluster.go:21
+#: lxc/cluster.go:22
 msgid ""
 "Usage: lxc cluster <subcommand> [options]\n"
 "\n"
@@ -1330,6 +1335,9 @@ msgid ""
 "lxc cluster show [<remote>:]<node>\n"
 "    Show details of a node.\n"
 "\n"
+"lxc cluster rename [<remote>:]<node> <new-name>\n"
+"    Rename a cluster node.\n"
+"\n"
 "lxc cluster delete [<remote>:]<node> [--force]\n"
 "    Delete a node from the cluster."
 msgstr ""
diff --git a/po/pt_BR.po b/po/pt_BR.po
index e090ce3e4..2938e6088 100644
--- a/po/pt_BR.po
+++ b/po/pt_BR.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: lxd\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2017-12-04 08:59+0000\n"
+"POT-Creation-Date: 2017-12-04 09:00+0000\n"
 "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
 "Last-Translator: Automatically generated\n"
 "Language-Team: none\n"
@@ -380,7 +380,7 @@ msgstr ""
 msgid "Creating the container"
 msgstr ""
 
-#: lxc/cluster.go:157
+#: lxc/cluster.go:183
 msgid "DATABASE"
 msgstr ""
 
@@ -533,7 +533,7 @@ msgstr ""
 msgid "Force pseudo-terminal allocation"
 msgstr ""
 
-#: lxc/cluster.go:37
+#: lxc/cluster.go:41
 msgid "Force removing a node, even if degraded"
 msgstr ""
 
@@ -724,7 +724,7 @@ msgstr ""
 msgid "Must supply container name for: "
 msgstr ""
 
-#: lxc/cluster.go:155 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
+#: lxc/cluster.go:181 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
 #: lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
 msgid "NAME"
 msgstr ""
@@ -781,11 +781,16 @@ msgstr ""
 msgid "No fingerprint specified."
 msgstr ""
 
-#: lxc/cluster.go:116
+#: lxc/cluster.go:142
 #, c-format
 msgid "Node %s removed"
 msgstr ""
 
+#: lxc/cluster.go:118
+#, c-format
+msgid "Node %s renamed to %s"
+msgstr ""
+
 #: lxc/init.go:149
 msgid "Node name"
 msgstr ""
@@ -1020,7 +1025,7 @@ msgstr ""
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/cluster.go:158 lxc/list.go:469
+#: lxc/cluster.go:184 lxc/list.go:469
 msgid "STATE"
 msgstr ""
 
@@ -1284,7 +1289,7 @@ msgstr ""
 msgid "UPLOAD DATE"
 msgstr ""
 
-#: lxc/cluster.go:156 lxc/remote.go:410
+#: lxc/cluster.go:182 lxc/remote.go:410
 msgid "URL"
 msgstr ""
 
@@ -1318,7 +1323,7 @@ msgstr ""
 msgid "Usage: lxc <command> [options]"
 msgstr ""
 
-#: lxc/cluster.go:21
+#: lxc/cluster.go:22
 msgid ""
 "Usage: lxc cluster <subcommand> [options]\n"
 "\n"
@@ -1330,6 +1335,9 @@ msgid ""
 "lxc cluster show [<remote>:]<node>\n"
 "    Show details of a node.\n"
 "\n"
+"lxc cluster rename [<remote>:]<node> <new-name>\n"
+"    Rename a cluster node.\n"
+"\n"
 "lxc cluster delete [<remote>:]<node> [--force]\n"
 "    Delete a node from the cluster."
 msgstr ""
diff --git a/po/ru.po b/po/ru.po
index f8f6f5b90..4cbba4cc1 100644
--- a/po/ru.po
+++ b/po/ru.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: lxd\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2017-12-04 08:59+0000\n"
+"POT-Creation-Date: 2017-12-04 09:00+0000\n"
 "PO-Revision-Date: 2017-09-05 16:48+0000\n"
 "Last-Translator: Ilya Yakimavets <ilya.yakimavets at backend.expert>\n"
 "Language-Team: Russian <https://hosted.weblate.org/projects/linux-containers/"
@@ -468,7 +468,7 @@ msgstr ""
 msgid "Creating the container"
 msgstr ""
 
-#: lxc/cluster.go:157
+#: lxc/cluster.go:183
 msgid "DATABASE"
 msgstr ""
 
@@ -622,7 +622,7 @@ msgstr ""
 msgid "Force pseudo-terminal allocation"
 msgstr ""
 
-#: lxc/cluster.go:37
+#: lxc/cluster.go:41
 msgid "Force removing a node, even if degraded"
 msgstr ""
 
@@ -814,7 +814,7 @@ msgstr ""
 msgid "Must supply container name for: "
 msgstr ""
 
-#: lxc/cluster.go:155 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
+#: lxc/cluster.go:181 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
 #: lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
 msgid "NAME"
 msgstr ""
@@ -872,11 +872,16 @@ msgstr ""
 msgid "No fingerprint specified."
 msgstr ""
 
-#: lxc/cluster.go:116
+#: lxc/cluster.go:142
 #, c-format
 msgid "Node %s removed"
 msgstr ""
 
+#: lxc/cluster.go:118
+#, c-format
+msgid "Node %s renamed to %s"
+msgstr ""
+
 #: lxc/init.go:149
 msgid "Node name"
 msgstr ""
@@ -1111,7 +1116,7 @@ msgstr ""
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/cluster.go:158 lxc/list.go:469
+#: lxc/cluster.go:184 lxc/list.go:469
 msgid "STATE"
 msgstr ""
 
@@ -1375,7 +1380,7 @@ msgstr ""
 msgid "UPLOAD DATE"
 msgstr ""
 
-#: lxc/cluster.go:156 lxc/remote.go:410
+#: lxc/cluster.go:182 lxc/remote.go:410
 msgid "URL"
 msgstr ""
 
@@ -1412,7 +1417,7 @@ msgstr ""
 msgid "Usage: lxc <command> [options]"
 msgstr ""
 
-#: lxc/cluster.go:21
+#: lxc/cluster.go:22
 msgid ""
 "Usage: lxc cluster <subcommand> [options]\n"
 "\n"
@@ -1424,6 +1429,9 @@ msgid ""
 "lxc cluster show [<remote>:]<node>\n"
 "    Show details of a node.\n"
 "\n"
+"lxc cluster rename [<remote>:]<node> <new-name>\n"
+"    Rename a cluster node.\n"
+"\n"
 "lxc cluster delete [<remote>:]<node> [--force]\n"
 "    Delete a node from the cluster."
 msgstr ""
diff --git a/po/sr.po b/po/sr.po
index d0d496232..fad733349 100644
--- a/po/sr.po
+++ b/po/sr.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: lxd\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2017-12-04 08:59+0000\n"
+"POT-Creation-Date: 2017-12-04 09:00+0000\n"
 "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
 "Last-Translator: Automatically generated\n"
 "Language-Team: none\n"
@@ -380,7 +380,7 @@ msgstr ""
 msgid "Creating the container"
 msgstr ""
 
-#: lxc/cluster.go:157
+#: lxc/cluster.go:183
 msgid "DATABASE"
 msgstr ""
 
@@ -533,7 +533,7 @@ msgstr ""
 msgid "Force pseudo-terminal allocation"
 msgstr ""
 
-#: lxc/cluster.go:37
+#: lxc/cluster.go:41
 msgid "Force removing a node, even if degraded"
 msgstr ""
 
@@ -724,7 +724,7 @@ msgstr ""
 msgid "Must supply container name for: "
 msgstr ""
 
-#: lxc/cluster.go:155 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
+#: lxc/cluster.go:181 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
 #: lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
 msgid "NAME"
 msgstr ""
@@ -781,11 +781,16 @@ msgstr ""
 msgid "No fingerprint specified."
 msgstr ""
 
-#: lxc/cluster.go:116
+#: lxc/cluster.go:142
 #, c-format
 msgid "Node %s removed"
 msgstr ""
 
+#: lxc/cluster.go:118
+#, c-format
+msgid "Node %s renamed to %s"
+msgstr ""
+
 #: lxc/init.go:149
 msgid "Node name"
 msgstr ""
@@ -1020,7 +1025,7 @@ msgstr ""
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/cluster.go:158 lxc/list.go:469
+#: lxc/cluster.go:184 lxc/list.go:469
 msgid "STATE"
 msgstr ""
 
@@ -1284,7 +1289,7 @@ msgstr ""
 msgid "UPLOAD DATE"
 msgstr ""
 
-#: lxc/cluster.go:156 lxc/remote.go:410
+#: lxc/cluster.go:182 lxc/remote.go:410
 msgid "URL"
 msgstr ""
 
@@ -1318,7 +1323,7 @@ msgstr ""
 msgid "Usage: lxc <command> [options]"
 msgstr ""
 
-#: lxc/cluster.go:21
+#: lxc/cluster.go:22
 msgid ""
 "Usage: lxc cluster <subcommand> [options]\n"
 "\n"
@@ -1330,6 +1335,9 @@ msgid ""
 "lxc cluster show [<remote>:]<node>\n"
 "    Show details of a node.\n"
 "\n"
+"lxc cluster rename [<remote>:]<node> <new-name>\n"
+"    Rename a cluster node.\n"
+"\n"
 "lxc cluster delete [<remote>:]<node> [--force]\n"
 "    Delete a node from the cluster."
 msgstr ""
diff --git a/po/sv.po b/po/sv.po
index 975cba0c7..672ac6bea 100644
--- a/po/sv.po
+++ b/po/sv.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: lxd\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2017-12-04 08:59+0000\n"
+"POT-Creation-Date: 2017-12-04 09:00+0000\n"
 "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
 "Last-Translator: Automatically generated\n"
 "Language-Team: none\n"
@@ -380,7 +380,7 @@ msgstr ""
 msgid "Creating the container"
 msgstr ""
 
-#: lxc/cluster.go:157
+#: lxc/cluster.go:183
 msgid "DATABASE"
 msgstr ""
 
@@ -533,7 +533,7 @@ msgstr ""
 msgid "Force pseudo-terminal allocation"
 msgstr ""
 
-#: lxc/cluster.go:37
+#: lxc/cluster.go:41
 msgid "Force removing a node, even if degraded"
 msgstr ""
 
@@ -724,7 +724,7 @@ msgstr ""
 msgid "Must supply container name for: "
 msgstr ""
 
-#: lxc/cluster.go:155 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
+#: lxc/cluster.go:181 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
 #: lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
 msgid "NAME"
 msgstr ""
@@ -781,11 +781,16 @@ msgstr ""
 msgid "No fingerprint specified."
 msgstr ""
 
-#: lxc/cluster.go:116
+#: lxc/cluster.go:142
 #, c-format
 msgid "Node %s removed"
 msgstr ""
 
+#: lxc/cluster.go:118
+#, c-format
+msgid "Node %s renamed to %s"
+msgstr ""
+
 #: lxc/init.go:149
 msgid "Node name"
 msgstr ""
@@ -1020,7 +1025,7 @@ msgstr ""
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/cluster.go:158 lxc/list.go:469
+#: lxc/cluster.go:184 lxc/list.go:469
 msgid "STATE"
 msgstr ""
 
@@ -1284,7 +1289,7 @@ msgstr ""
 msgid "UPLOAD DATE"
 msgstr ""
 
-#: lxc/cluster.go:156 lxc/remote.go:410
+#: lxc/cluster.go:182 lxc/remote.go:410
 msgid "URL"
 msgstr ""
 
@@ -1318,7 +1323,7 @@ msgstr ""
 msgid "Usage: lxc <command> [options]"
 msgstr ""
 
-#: lxc/cluster.go:21
+#: lxc/cluster.go:22
 msgid ""
 "Usage: lxc cluster <subcommand> [options]\n"
 "\n"
@@ -1330,6 +1335,9 @@ msgid ""
 "lxc cluster show [<remote>:]<node>\n"
 "    Show details of a node.\n"
 "\n"
+"lxc cluster rename [<remote>:]<node> <new-name>\n"
+"    Rename a cluster node.\n"
+"\n"
 "lxc cluster delete [<remote>:]<node> [--force]\n"
 "    Delete a node from the cluster."
 msgstr ""
diff --git a/po/tr.po b/po/tr.po
index 3f1eb8ae2..267e5b7fd 100644
--- a/po/tr.po
+++ b/po/tr.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: lxd\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2017-12-04 08:59+0000\n"
+"POT-Creation-Date: 2017-12-04 09:00+0000\n"
 "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
 "Last-Translator: Automatically generated\n"
 "Language-Team: none\n"
@@ -380,7 +380,7 @@ msgstr ""
 msgid "Creating the container"
 msgstr ""
 
-#: lxc/cluster.go:157
+#: lxc/cluster.go:183
 msgid "DATABASE"
 msgstr ""
 
@@ -533,7 +533,7 @@ msgstr ""
 msgid "Force pseudo-terminal allocation"
 msgstr ""
 
-#: lxc/cluster.go:37
+#: lxc/cluster.go:41
 msgid "Force removing a node, even if degraded"
 msgstr ""
 
@@ -724,7 +724,7 @@ msgstr ""
 msgid "Must supply container name for: "
 msgstr ""
 
-#: lxc/cluster.go:155 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
+#: lxc/cluster.go:181 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
 #: lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
 msgid "NAME"
 msgstr ""
@@ -781,11 +781,16 @@ msgstr ""
 msgid "No fingerprint specified."
 msgstr ""
 
-#: lxc/cluster.go:116
+#: lxc/cluster.go:142
 #, c-format
 msgid "Node %s removed"
 msgstr ""
 
+#: lxc/cluster.go:118
+#, c-format
+msgid "Node %s renamed to %s"
+msgstr ""
+
 #: lxc/init.go:149
 msgid "Node name"
 msgstr ""
@@ -1020,7 +1025,7 @@ msgstr ""
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/cluster.go:158 lxc/list.go:469
+#: lxc/cluster.go:184 lxc/list.go:469
 msgid "STATE"
 msgstr ""
 
@@ -1284,7 +1289,7 @@ msgstr ""
 msgid "UPLOAD DATE"
 msgstr ""
 
-#: lxc/cluster.go:156 lxc/remote.go:410
+#: lxc/cluster.go:182 lxc/remote.go:410
 msgid "URL"
 msgstr ""
 
@@ -1318,7 +1323,7 @@ msgstr ""
 msgid "Usage: lxc <command> [options]"
 msgstr ""
 
-#: lxc/cluster.go:21
+#: lxc/cluster.go:22
 msgid ""
 "Usage: lxc cluster <subcommand> [options]\n"
 "\n"
@@ -1330,6 +1335,9 @@ msgid ""
 "lxc cluster show [<remote>:]<node>\n"
 "    Show details of a node.\n"
 "\n"
+"lxc cluster rename [<remote>:]<node> <new-name>\n"
+"    Rename a cluster node.\n"
+"\n"
 "lxc cluster delete [<remote>:]<node> [--force]\n"
 "    Delete a node from the cluster."
 msgstr ""
diff --git a/po/zh.po b/po/zh.po
index d35b9bcf1..a767ef35e 100644
--- a/po/zh.po
+++ b/po/zh.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: lxd\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2017-12-04 08:59+0000\n"
+"POT-Creation-Date: 2017-12-04 09:00+0000\n"
 "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
 "Last-Translator: Automatically generated\n"
 "Language-Team: none\n"
@@ -380,7 +380,7 @@ msgstr ""
 msgid "Creating the container"
 msgstr ""
 
-#: lxc/cluster.go:157
+#: lxc/cluster.go:183
 msgid "DATABASE"
 msgstr ""
 
@@ -533,7 +533,7 @@ msgstr ""
 msgid "Force pseudo-terminal allocation"
 msgstr ""
 
-#: lxc/cluster.go:37
+#: lxc/cluster.go:41
 msgid "Force removing a node, even if degraded"
 msgstr ""
 
@@ -724,7 +724,7 @@ msgstr ""
 msgid "Must supply container name for: "
 msgstr ""
 
-#: lxc/cluster.go:155 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
+#: lxc/cluster.go:181 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
 #: lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
 msgid "NAME"
 msgstr ""
@@ -781,11 +781,16 @@ msgstr ""
 msgid "No fingerprint specified."
 msgstr ""
 
-#: lxc/cluster.go:116
+#: lxc/cluster.go:142
 #, c-format
 msgid "Node %s removed"
 msgstr ""
 
+#: lxc/cluster.go:118
+#, c-format
+msgid "Node %s renamed to %s"
+msgstr ""
+
 #: lxc/init.go:149
 msgid "Node name"
 msgstr ""
@@ -1020,7 +1025,7 @@ msgstr ""
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/cluster.go:158 lxc/list.go:469
+#: lxc/cluster.go:184 lxc/list.go:469
 msgid "STATE"
 msgstr ""
 
@@ -1284,7 +1289,7 @@ msgstr ""
 msgid "UPLOAD DATE"
 msgstr ""
 
-#: lxc/cluster.go:156 lxc/remote.go:410
+#: lxc/cluster.go:182 lxc/remote.go:410
 msgid "URL"
 msgstr ""
 
@@ -1318,7 +1323,7 @@ msgstr ""
 msgid "Usage: lxc <command> [options]"
 msgstr ""
 
-#: lxc/cluster.go:21
+#: lxc/cluster.go:22
 msgid ""
 "Usage: lxc cluster <subcommand> [options]\n"
 "\n"
@@ -1330,6 +1335,9 @@ msgid ""
 "lxc cluster show [<remote>:]<node>\n"
 "    Show details of a node.\n"
 "\n"
+"lxc cluster rename [<remote>:]<node> <new-name>\n"
+"    Rename a cluster node.\n"
+"\n"
 "lxc cluster delete [<remote>:]<node> [--force]\n"
 "    Delete a node from the cluster."
 msgstr ""
diff --git a/po/zh_Hans.po b/po/zh_Hans.po
index ce61c7c88..017650fa7 100644
--- a/po/zh_Hans.po
+++ b/po/zh_Hans.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: lxd\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2017-12-04 08:59+0000\n"
+"POT-Creation-Date: 2017-12-04 09:00+0000\n"
 "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
 "Last-Translator: Automatically generated\n"
 "Language-Team: none\n"
@@ -380,7 +380,7 @@ msgstr ""
 msgid "Creating the container"
 msgstr ""
 
-#: lxc/cluster.go:157
+#: lxc/cluster.go:183
 msgid "DATABASE"
 msgstr ""
 
@@ -533,7 +533,7 @@ msgstr ""
 msgid "Force pseudo-terminal allocation"
 msgstr ""
 
-#: lxc/cluster.go:37
+#: lxc/cluster.go:41
 msgid "Force removing a node, even if degraded"
 msgstr ""
 
@@ -724,7 +724,7 @@ msgstr ""
 msgid "Must supply container name for: "
 msgstr ""
 
-#: lxc/cluster.go:155 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
+#: lxc/cluster.go:181 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
 #: lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
 msgid "NAME"
 msgstr ""
@@ -781,11 +781,16 @@ msgstr ""
 msgid "No fingerprint specified."
 msgstr ""
 
-#: lxc/cluster.go:116
+#: lxc/cluster.go:142
 #, c-format
 msgid "Node %s removed"
 msgstr ""
 
+#: lxc/cluster.go:118
+#, c-format
+msgid "Node %s renamed to %s"
+msgstr ""
+
 #: lxc/init.go:149
 msgid "Node name"
 msgstr ""
@@ -1020,7 +1025,7 @@ msgstr ""
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/cluster.go:158 lxc/list.go:469
+#: lxc/cluster.go:184 lxc/list.go:469
 msgid "STATE"
 msgstr ""
 
@@ -1284,7 +1289,7 @@ msgstr ""
 msgid "UPLOAD DATE"
 msgstr ""
 
-#: lxc/cluster.go:156 lxc/remote.go:410
+#: lxc/cluster.go:182 lxc/remote.go:410
 msgid "URL"
 msgstr ""
 
@@ -1318,7 +1323,7 @@ msgstr ""
 msgid "Usage: lxc <command> [options]"
 msgstr ""
 
-#: lxc/cluster.go:21
+#: lxc/cluster.go:22
 msgid ""
 "Usage: lxc cluster <subcommand> [options]\n"
 "\n"
@@ -1330,6 +1335,9 @@ msgid ""
 "lxc cluster show [<remote>:]<node>\n"
 "    Show details of a node.\n"
 "\n"
+"lxc cluster rename [<remote>:]<node> <new-name>\n"
+"    Rename a cluster node.\n"
+"\n"
 "lxc cluster delete [<remote>:]<node> [--force]\n"
 "    Delete a node from the cluster."
 msgstr ""
diff --git a/shared/api/cluster.go b/shared/api/cluster.go
index 5b653e869..1320a1ff4 100644
--- a/shared/api/cluster.go
+++ b/shared/api/cluster.go
@@ -37,7 +37,16 @@ type RaftNode struct {
 	Address string `json:"address" yaml:"address"`
 }
 
+// NodePost represents the fields required to rename a LXD node.
+//
+// API extension: clustering
+type NodePost struct {
+	Name string `json:"name" yaml:"name"`
+}
+
 // Node represents the a LXD node in the cluster.
+//
+// API extension: clustering
 type Node struct {
 	Name     string `json:"name" yaml:"name"`
 	URL      string `json:"url" yaml:"url"`
diff --git a/test/suites/clustering.sh b/test/suites/clustering.sh
index a0dd6a27f..cdbec3671 100644
--- a/test/suites/clustering.sh
+++ b/test/suites/clustering.sh
@@ -82,8 +82,11 @@ test_clustering() {
   # notified.
   LXD_DIR="${LXD_TWO_DIR}" lxc network delete "${bridge}"
 
+  # Rename a node using the pre-existing name.
+  LXD_DIR="${LXD_THREE_DIR}" lxc cluster rename node4 node5
+
   # Remove a node gracefully.
-  LXD_DIR="${LXD_FOUR_DIR}" lxc cluster delete node4
+  LXD_DIR="${LXD_FOUR_DIR}" lxc cluster delete node5
 
   LXD_DIR="${LXD_FOUR_DIR}" lxd shutdown
   LXD_DIR="${LXD_THREE_DIR}" lxd shutdown

From 710c4e03334deee9610d4834284adcf6da29c734 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Wed, 22 Nov 2017 08:44:42 +0000
Subject: [PATCH 098/116] Make GET /1.0/containers fetch container status from
 cluster nodes

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/containers_get.go     | 118 +++++++++++++++++++++++++++++++++++++++-------
 lxd/db/containers.go      |  45 ++++++++++++++++++
 lxd/db/containers_test.go |  48 +++++++++++++++++++
 lxd/db/node.go            |   6 ++-
 test/suites/clustering.sh |  25 +++++++++-
 5 files changed, 221 insertions(+), 21 deletions(-)
 create mode 100644 lxd/db/containers_test.go

diff --git a/lxd/containers_get.go b/lxd/containers_get.go
index 376190b86..425ef8590 100644
--- a/lxd/containers_get.go
+++ b/lxd/containers_get.go
@@ -3,19 +3,24 @@ package main
 import (
 	"fmt"
 	"net/http"
+	"sort"
+	"sync"
 	"time"
 
+	"github.com/lxc/lxd/lxd/cluster"
 	"github.com/lxc/lxd/lxd/db"
 	"github.com/lxc/lxd/lxd/state"
 	"github.com/lxc/lxd/lxd/util"
+	"github.com/lxc/lxd/shared"
 	"github.com/lxc/lxd/shared/api"
 	"github.com/lxc/lxd/shared/logger"
 	"github.com/lxc/lxd/shared/version"
+	"github.com/pkg/errors"
 )
 
 func containersGet(d *Daemon, r *http.Request) Response {
 	for i := 0; i < 100; i++ {
-		result, err := doContainersGet(d.State(), util.IsRecursionRequest(r))
+		result, err := doContainersGet(d, r)
 		if err == nil {
 			return SyncResponse(true, result)
 		}
@@ -33,38 +38,80 @@ func containersGet(d *Daemon, r *http.Request) Response {
 	return InternalError(fmt.Errorf("DB is locked"))
 }
 
-func doContainersGet(s *state.State, recursion bool) (interface{}, error) {
-	result, err := s.Cluster.ContainersList(db.CTypeRegular)
+func doContainersGet(d *Daemon, r *http.Request) (interface{}, error) {
+	var result map[string][]string
+	err := d.cluster.Transaction(func(tx *db.ClusterTx) error {
+		var err error
+		result, err = tx.ContainersListByNodeAddress()
+		return err
+	})
 	if err != nil {
-		return nil, err
+		return []string{}, err
 	}
 
+	recursion := util.IsRecursionRequest(r)
 	resultString := []string{}
 	resultList := []*api.Container{}
-	if err != nil {
-		return []string{}, err
+	resultMu := sync.Mutex{}
+
+	resultAppend := func(name string, c *api.Container, err error) {
+		if err != nil {
+			c = &api.Container{
+				Name:       name,
+				Status:     api.Error.String(),
+				StatusCode: api.Error}
+		}
+		resultMu.Lock()
+		resultList = append(resultList, c)
+		resultMu.Unlock()
 	}
 
-	for _, container := range result {
-		if !recursion {
-			url := fmt.Sprintf("/%s/containers/%s", version.APIVersion, container)
-			resultString = append(resultString, url)
-		} else {
-			c, err := doContainerGet(s, container)
-			if err != nil {
-				c = &api.Container{
-					Name:       container,
-					Status:     api.Error.String(),
-					StatusCode: api.Error}
+	wg := sync.WaitGroup{}
+	for address, containers := range result {
+		// Mark containers on unavailable nodes as down
+		if recursion && address == "0.0.0.0" {
+			for _, container := range containers {
+				resultAppend(container, nil, fmt.Errorf("unavailable"))
 			}
-			resultList = append(resultList, c)
+		}
+
+		// For recursion requests we need to fetch the state of remote
+		// containers from their respective nodes.
+		if recursion && address != "" && !isClusterNotification(r) {
+			wg.Add(1)
+			go func(address string) {
+				cert := d.endpoints.NetworkCert()
+				cs, err := doContainersGetFromNode(address, cert)
+				for _, c := range cs {
+					resultAppend(c.Name, &c, err)
+				}
+				wg.Done()
+			}(address)
+			continue
+		}
+
+		for _, container := range containers {
+			if !recursion {
+				url := fmt.Sprintf("/%s/containers/%s", version.APIVersion, container)
+				resultString = append(resultString, url)
+				continue
+			}
+
+			c, err := doContainerGet(d.State(), container)
+			resultAppend(container, c, err)
 		}
 	}
+	wg.Wait()
 
 	if !recursion {
 		return resultString, nil
 	}
 
+	// Sort the result list by name.
+	sort.Slice(resultList, func(i, j int) bool {
+		return resultList[i].Name < resultList[j].Name
+	})
+
 	return resultList, nil
 }
 
@@ -81,3 +128,38 @@ func doContainerGet(s *state.State, cname string) (*api.Container, error) {
 
 	return cts.(*api.Container), nil
 }
+
+// Fetch information about the containers on the given remote node, using the
+// rest API and with a timeout of 30 seconds.
+func doContainersGetFromNode(node string, cert *shared.CertInfo) ([]api.Container, error) {
+	f := func() ([]api.Container, error) {
+		client, err := cluster.Connect(node, cert, true)
+		if err != nil {
+			return nil, errors.Wrapf(err, "failed to connect to node %s", node)
+		}
+		containers, err := client.GetContainers()
+		if err != nil {
+			return nil, errors.Wrapf(err, "failed to get containers from node %s", node)
+		}
+		return containers, nil
+	}
+
+	timeout := time.After(30 * time.Second)
+	done := make(chan struct{})
+
+	var containers []api.Container
+	var err error
+
+	go func() {
+		containers, err = f()
+		done <- struct{}{}
+	}()
+
+	select {
+	case <-timeout:
+		err = fmt.Errorf("timeout getting containers from node %s", node)
+	case <-done:
+	}
+
+	return containers, err
+}
diff --git a/lxd/db/containers.go b/lxd/db/containers.go
index 994b187d3..a2ddcbc58 100644
--- a/lxd/db/containers.go
+++ b/lxd/db/containers.go
@@ -84,6 +84,51 @@ SELECT nodes.id, nodes.address
 	return address, nil
 }
 
+// ContainersListByNodeAddress returns the names of all containers grouped by
+// cluster node address.
+//
+// The node address of containers running on the local node is set to the empty
+// string, to distinguish it from remote nodes.
+//
+// Containers whose node is down are addeded to the special address "0.0.0.0".
+func (c *ClusterTx) ContainersListByNodeAddress() (map[string][]string, error) {
+	stmt := `
+SELECT containers.name, nodes.id, nodes.address, nodes.heartbeat
+  FROM containers JOIN nodes ON nodes.id = containers.node_id
+  WHERE containers.type=?
+  ORDER BY containers.id
+`
+	rows, err := c.tx.Query(stmt, CTypeRegular)
+	if err != nil {
+		return nil, err
+	}
+	defer rows.Close()
+
+	result := map[string][]string{}
+
+	for i := 0; rows.Next(); i++ {
+		var name, nodeAddress string
+		var nodeID int64
+		var nodeHeartbeat time.Time
+		err := rows.Scan(&name, &nodeID, &nodeAddress, &nodeHeartbeat)
+		if err != nil {
+			return nil, err
+		}
+		if nodeID == c.nodeID {
+			nodeAddress = ""
+		} else if nodeIsDown(nodeHeartbeat) {
+			nodeAddress = "0.0.0.0"
+		}
+		result[nodeAddress] = append(result[nodeAddress], name)
+	}
+
+	err = rows.Err()
+	if err != nil {
+		return nil, err
+	}
+	return result, nil
+}
+
 func (c *Cluster) ContainerRemove(name string) error {
 	id, err := c.ContainerId(name)
 	if err != nil {
diff --git a/lxd/db/containers_test.go b/lxd/db/containers_test.go
new file mode 100644
index 000000000..a5a68c444
--- /dev/null
+++ b/lxd/db/containers_test.go
@@ -0,0 +1,48 @@
+package db_test
+
+import (
+	"testing"
+	"time"
+
+	"github.com/lxc/lxd/lxd/db"
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
+)
+
+// Containers are grouped by node address.
+func TestContainersListByNodeAddress(t *testing.T) {
+	tx, cleanup := db.NewTestClusterTx(t)
+	defer cleanup()
+
+	nodeID1 := int64(1) // This is the default local node
+
+	nodeID2, err := tx.NodeAdd("node2", "1.2.3.4:666")
+	require.NoError(t, err)
+
+	nodeID3, err := tx.NodeAdd("node3", "5.6.7.8:666")
+	require.NoError(t, err)
+	require.NoError(t, tx.NodeHeartbeat("5.6.7.8:666", time.Now().Add(-time.Minute)))
+
+	addContainer(t, tx, nodeID2, "c1")
+	addContainer(t, tx, nodeID1, "c2")
+	addContainer(t, tx, nodeID3, "c3")
+	addContainer(t, tx, nodeID2, "c4")
+
+	result, err := tx.ContainersListByNodeAddress()
+	require.NoError(t, err)
+	assert.Equal(
+		t,
+		map[string][]string{
+			"":            {"c2"},
+			"1.2.3.4:666": {"c1", "c4"},
+			"0.0.0.0":     {"c3"},
+		}, result)
+}
+
+func addContainer(t *testing.T, tx *db.ClusterTx, nodeID int64, name string) {
+	stmt := `
+INSERT INTO containers(node_id, name, architecture, type) VALUES (?, ?, 1, ?)
+`
+	_, err := tx.Tx().Exec(stmt, nodeID, name, db.CTypeRegular)
+	require.NoError(t, err)
+}
diff --git a/lxd/db/node.go b/lxd/db/node.go
index 01d151f5d..2982bd884 100644
--- a/lxd/db/node.go
+++ b/lxd/db/node.go
@@ -24,7 +24,7 @@ type NodeInfo struct {
 // IsDown returns true if the last heartbeat time of the node is older than 20
 // seconds.
 func (n NodeInfo) IsDown() bool {
-	return n.Heartbeat.Before(time.Now().Add(-20 * time.Second))
+	return nodeIsDown(n.Heartbeat)
 }
 
 // NodeByAddress returns the node with the given network address.
@@ -216,3 +216,7 @@ func (c *ClusterTx) NodeClear(id int64) error {
 
 	return nil
 }
+
+func nodeIsDown(heartbeat time.Time) bool {
+	return heartbeat.Before(time.Now().Add(-20 * time.Second))
+}
diff --git a/test/suites/clustering.sh b/test/suites/clustering.sh
index cdbec3671..bab1576d2 100644
--- a/test/suites/clustering.sh
+++ b/test/suites/clustering.sh
@@ -121,21 +121,42 @@ test_clustering_containers() {
   ns2="${prefix}2"
   spawn_lxd_and_join_cluster "${ns2}" "${bridge}" "${cert}" 2 1 "${LXD_TWO_DIR}"
 
-  # Init a container on a node2, using a client connected to node1
+  # Spawn a third node
+  setup_clustering_netns 3
+  LXD_THREE_DIR=$(mktemp -d -p "${TEST_DIR}" XXX)
+  chmod +x "${LXD_THREE_DIR}"
+  ns3="${prefix}3"
+  spawn_lxd_and_join_cluster "${ns3}" "${bridge}" "${cert}" 3 1 "${LXD_THREE_DIR}"
+
+  # Init a container on node2, using a client connected to node1
   LXD_DIR="${LXD_TWO_DIR}" ensure_import_testimage
   LXD_DIR="${LXD_ONE_DIR}" lxc init --target node2 testimage foo
-  LXD_DIR="${LXD_TWO_DIR}" lxc list | grep -q foo
+
+  # The container is visible through both nodes
+  LXD_DIR="${LXD_ONE_DIR}" lxc list | grep foo | grep -q STOPPED
+  LXD_DIR="${LXD_TWO_DIR}" lxc list | grep foo | grep -q STOPPED
+
+  # A Node: field indicates on which node the container is running
   LXD_DIR="${LXD_ONE_DIR}" lxc info foo | grep -q "Node: node2"
 
+  # Start and stop the container via node1
   LXD_DIR="${LXD_ONE_DIR}" lxc start foo
   LXD_DIR="${LXD_TWO_DIR}" lxc info foo | grep -q "Status: Running"
+  LXD_DIR="${LXD_ONE_DIR}" lxc list | grep foo | grep -q RUNNING
   LXD_DIR="${LXD_ONE_DIR}" lxc stop foo
 
   LXD_DIR="${LXD_TWO_DIR}" lxc network delete "${bridge}"
 
+  # Shutdown node 2, wait for it to be considered offline, and list
+  # containers.
   LXD_DIR="${LXD_TWO_DIR}" lxd shutdown
+  sleep 22
+  LXD_DIR="${LXD_ONE_DIR}" lxc list | grep foo | grep -q ERROR
+
+  LXD_DIR="${LXD_THREE_DIR}" lxd shutdown
   LXD_DIR="${LXD_ONE_DIR}" lxd shutdown
   sleep 2
+  rm -f "${LXD_THREE_DIR}/unix.socket"
   rm -f "${LXD_TWO_DIR}/unix.socket"
   rm -f "${LXD_ONE_DIR}/unix.socket"
 }

From 52dd4665f6c6c7edf182c1f0c8f3418cb7777b1a Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Thu, 23 Nov 2017 07:30:21 +0000
Subject: [PATCH 099/116] Add Clustered field to api.ServerEnvironment

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 client/interfaces.go           |  1 +
 client/lxd_server.go           |  5 +++++
 lxd/api_1.0.go                 | 16 +++++++++++++++-
 lxd/api_cluster_test.go        |  4 ++++
 lxd/daemon_integration_test.go |  2 ++
 shared/api/server.go           |  3 +++
 6 files changed, 30 insertions(+), 1 deletion(-)

diff --git a/client/interfaces.go b/client/interfaces.go
index 9ef0030a0..7a56436bf 100644
--- a/client/interfaces.go
+++ b/client/interfaces.go
@@ -47,6 +47,7 @@ type ContainerServer interface {
 	UpdateServer(server api.ServerPut, ETag string) (err error)
 	HasExtension(extension string) (exists bool)
 	RequireAuthenticated(authenticated bool)
+	IsClustered() (clustered bool)
 	ClusterTargetNode(name string) ContainerServer
 
 	// Certificate functions
diff --git a/client/lxd_server.go b/client/lxd_server.go
index 84b401deb..1c4ef5959 100644
--- a/client/lxd_server.go
+++ b/client/lxd_server.go
@@ -61,6 +61,11 @@ func (r *ProtocolLXD) HasExtension(extension string) bool {
 	return false
 }
 
+// IsClustered returns true if the server is part of a LXD cluster.
+func (r *ProtocolLXD) IsClustered() bool {
+	return r.server.Environment.Clustered
+}
+
 // GetServerResources returns the resources available to a given LXD server
 func (r *ProtocolLXD) GetServerResources() (*api.Resources, error) {
 	if !r.HasExtension("resources") {
diff --git a/lxd/api_1.0.go b/lxd/api_1.0.go
index 9111682f7..5209a5cb7 100644
--- a/lxd/api_1.0.go
+++ b/lxd/api_1.0.go
@@ -109,6 +109,19 @@ func api10Get(d *Daemon, r *http.Request) Response {
 		return InternalError(err)
 	}
 
+	clustered := false
+	err = d.db.Transaction(func(tx *db.NodeTx) error {
+		addresses, err := tx.RaftNodeAddresses()
+		if err != nil {
+			return err
+		}
+		clustered = len(addresses) > 0
+		return nil
+	})
+	if err != nil {
+		return SmartError(err)
+	}
+
 	certificate := string(d.endpoints.NetworkPublicKey())
 	var certificateFingerprint string
 	if certificate != "" {
@@ -140,7 +153,8 @@ func api10Get(d *Daemon, r *http.Request) Response {
 		KernelVersion:          uname.Release,
 		Server:                 "lxd",
 		ServerPid:              os.Getpid(),
-		ServerVersion:          version.Version}
+		ServerVersion:          version.Version,
+		Clustered:              clustered}
 
 	drivers := readStoragePoolDriversCache()
 	for _, driver := range drivers {
diff --git a/lxd/api_cluster_test.go b/lxd/api_cluster_test.go
index c77cc6ca7..e4f187478 100644
--- a/lxd/api_cluster_test.go
+++ b/lxd/api_cluster_test.go
@@ -26,6 +26,10 @@ func TestCluster_Bootstrap(t *testing.T) {
 	op, err := client.BootstrapCluster("buzz")
 	require.NoError(t, err)
 	require.NoError(t, op.Wait())
+
+	_, _, err = client.GetServer()
+	require.NoError(t, err)
+	assert.True(t, client.IsClustered())
 }
 
 // A LXD node which is already configured for networking can join an existing
diff --git a/lxd/daemon_integration_test.go b/lxd/daemon_integration_test.go
index 2012dc657..0cdf6a06d 100644
--- a/lxd/daemon_integration_test.go
+++ b/lxd/daemon_integration_test.go
@@ -20,6 +20,8 @@ func TestIntegration_UnixSocket(t *testing.T) {
 	server, _, err := client.GetServer()
 	require.NoError(t, err)
 	assert.Equal(t, "trusted", server.Auth)
+	assert.False(t, server.Environment.Clustered)
+	assert.False(t, client.IsClustered())
 }
 
 // Create a new daemon for testing.
diff --git a/shared/api/server.go b/shared/api/server.go
index c89426d59..570041533 100644
--- a/shared/api/server.go
+++ b/shared/api/server.go
@@ -16,6 +16,9 @@ type ServerEnvironment struct {
 	ServerVersion          string   `json:"server_version" yaml:"server_version"`
 	Storage                string   `json:"storage" yaml:"storage"`
 	StorageVersion         string   `json:"storage_version" yaml:"storage_version"`
+
+	// API extension: clustering
+	Clustered bool `json:"clustered" yaml:"clustered"`
 }
 
 // ServerPut represents the modifiable fields of a LXD server configuration

From be5c146ca9e0e529feeb40b9febc7ee2e9f5d1bc Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Thu, 23 Nov 2017 08:10:53 +0000
Subject: [PATCH 100/116] Add NODE column to 'lxc list' when clustered

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxc/list.go               | 30 +++++++++++++++++++++++++-----
 po/de.po                  | 42 ++++++++++++++++++++++++------------------
 po/el.po                  | 42 ++++++++++++++++++++++++------------------
 po/fr.po                  | 42 ++++++++++++++++++++++++------------------
 po/id.po                  | 42 ++++++++++++++++++++++++------------------
 po/it.po                  | 42 ++++++++++++++++++++++++------------------
 po/ja.po                  | 43 +++++++++++++++++++++++++------------------
 po/lxd.pot                | 42 ++++++++++++++++++++++++------------------
 po/nb_NO.po               | 42 ++++++++++++++++++++++++------------------
 po/nl.po                  | 42 ++++++++++++++++++++++++------------------
 po/pt_BR.po               | 42 ++++++++++++++++++++++++------------------
 po/ru.po                  | 42 ++++++++++++++++++++++++------------------
 po/sr.po                  | 42 ++++++++++++++++++++++++------------------
 po/sv.po                  | 42 ++++++++++++++++++++++++------------------
 po/tr.po                  | 42 ++++++++++++++++++++++++------------------
 po/zh.po                  | 42 ++++++++++++++++++++++++------------------
 po/zh_Hans.po             | 42 ++++++++++++++++++++++++------------------
 test/main.sh              |  3 ++-
 test/suites/clustering.sh |  3 ++-
 19 files changed, 414 insertions(+), 295 deletions(-)

diff --git a/lxc/list.go b/lxc/list.go
index 9599e61e1..6e1dedf2d 100644
--- a/lxc/list.go
+++ b/lxc/list.go
@@ -103,6 +103,8 @@ Pre-defined column shorthand chars:
 
 	t - Type (persistent or ephemeral)
 
+	N - Node hosting the container
+
 Custom columns are defined with "key[:name][:maxWidth]":
 
 	KEY: The (extended) config key to display
@@ -124,9 +126,11 @@ lxc list -c ns,user.comment:comment
 	List images with their running state and user comment. `)
 }
 
+const defaultColumns = "ns46tSN"
+
 func (c *listCmd) flags() {
-	gnuflag.StringVar(&c.columnsRaw, "c", "ns46tS", i18n.G("Columns"))
-	gnuflag.StringVar(&c.columnsRaw, "columns", "ns46tS", i18n.G("Columns"))
+	gnuflag.StringVar(&c.columnsRaw, "c", defaultColumns, i18n.G("Columns"))
+	gnuflag.StringVar(&c.columnsRaw, "columns", defaultColumns, i18n.G("Columns"))
 	gnuflag.StringVar(&c.format, "format", "table", i18n.G("Format (csv|json|table|yaml)"))
 	gnuflag.BoolVar(&c.fast, "fast", false, i18n.G("Fast mode (same as --columns=nsacPt)"))
 }
@@ -446,7 +450,7 @@ func (c *listCmd) run(conf *config.Config, args []string) error {
 		cts = append(cts, cinfo)
 	}
 
-	columns, err := c.parseColumns()
+	columns, err := c.parseColumns(d.IsClustered())
 	if err != nil {
 		return err
 	}
@@ -454,7 +458,7 @@ func (c *listCmd) run(conf *config.Config, args []string) error {
 	return c.listContainers(conf, remote, cts, filters, columns)
 }
 
-func (c *listCmd) parseColumns() ([]column, error) {
+func (c *listCmd) parseColumns(clustered bool) ([]column, error) {
 	columnsShorthandMap := map[rune]column{
 		'4': {i18n.G("IPV4"), c.IP4ColumnData, true, false},
 		'6': {i18n.G("IPV6"), c.IP6ColumnData, true, false},
@@ -472,7 +476,7 @@ func (c *listCmd) parseColumns() ([]column, error) {
 	}
 
 	if c.fast {
-		if c.columnsRaw != "ns46tS" {
+		if c.columnsRaw != defaultColumns {
 			// --columns was specified too
 			return nil, fmt.Errorf("Can't specify --fast with --columns")
 		} else {
@@ -480,6 +484,18 @@ func (c *listCmd) parseColumns() ([]column, error) {
 		}
 	}
 
+	if clustered {
+		columnsShorthandMap['N'] = column{
+			i18n.G("NODE"), c.nodeColumnData, false, false}
+	} else {
+		if c.columnsRaw != defaultColumns {
+			if strings.ContainsAny(c.columnsRaw, "N") {
+				return nil, fmt.Errorf("Can't specify column N when not clustered")
+			}
+		}
+		c.columnsRaw = strings.Replace(c.columnsRaw, "N", "", -1)
+	}
+
 	columnList := strings.Split(c.columnsRaw, ",")
 
 	columns := []column{}
@@ -675,3 +691,7 @@ func (c *listCmd) LastUsedColumnData(cInfo api.Container, cState *api.ContainerS
 
 	return ""
 }
+
+func (c *listCmd) nodeColumnData(cInfo api.Container, cState *api.ContainerState, cSnaps []api.ContainerSnapshot) string {
+	return cInfo.Node
+}
diff --git a/po/de.po b/po/de.po
index 382df103f..1cf22ea85 100644
--- a/po/de.po
+++ b/po/de.po
@@ -291,7 +291,7 @@ msgstr ""
 msgid "ARCH"
 msgstr ""
 
-#: lxc/list.go:461
+#: lxc/list.go:465
 msgid "ARCHITECTURE"
 msgstr ""
 
@@ -367,7 +367,7 @@ msgstr " Prozessorauslastung:"
 msgid "CREATED"
 msgstr "ERSTELLT AM"
 
-#: lxc/list.go:462
+#: lxc/list.go:466
 msgid "CREATED AT"
 msgstr "ERSTELLT AM"
 
@@ -416,7 +416,7 @@ msgstr "Fingerabdruck des Zertifikats: % x\n"
 msgid "Client certificate stored at server: "
 msgstr "Gespeichertes Nutzerzertifikat auf dem Server: "
 
-#: lxc/image.go:174 lxc/image.go:175 lxc/list.go:128 lxc/list.go:129
+#: lxc/image.go:174 lxc/image.go:175 lxc/list.go:132 lxc/list.go:133
 msgid "Columns"
 msgstr ""
 
@@ -502,7 +502,7 @@ msgstr "kann nicht zum selben Container Namen kopieren"
 msgid "DATABASE"
 msgstr ""
 
-#: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:463 lxc/network.go:525
+#: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:467 lxc/network.go:525
 #: lxc/storage.go:682 lxc/storage.go:793
 msgid "DESCRIPTION"
 msgstr ""
@@ -547,7 +547,7 @@ msgstr ""
 msgid "Disk usage:"
 msgstr " Prozessorauslastung:"
 
-#: lxc/list.go:619
+#: lxc/list.go:635
 msgid "EPHEMERAL"
 msgstr ""
 
@@ -638,7 +638,7 @@ msgstr ""
 msgid "Failed to walk path for %s: %s"
 msgstr ""
 
-#: lxc/list.go:131
+#: lxc/list.go:135
 msgid "Fast mode (same as --columns=nsacPt)"
 msgstr ""
 
@@ -675,7 +675,7 @@ msgstr ""
 msgid "Force using the local unix socket"
 msgstr ""
 
-#: lxc/image.go:180 lxc/list.go:130
+#: lxc/image.go:180 lxc/list.go:134
 msgid "Format (csv|json|table|yaml)"
 msgstr ""
 
@@ -688,11 +688,11 @@ msgstr "Generiere Nutzerzertifikat. Dies kann wenige Minuten dauern...\n"
 msgid "ID"
 msgstr ""
 
-#: lxc/list.go:459
+#: lxc/list.go:463
 msgid "IPV4"
 msgstr ""
 
-#: lxc/list.go:460
+#: lxc/list.go:464
 msgid "IPV6"
 msgstr ""
 
@@ -784,7 +784,7 @@ msgstr ""
 msgid "Keep the image up to date after initial copy"
 msgstr ""
 
-#: lxc/list.go:464
+#: lxc/list.go:468
 msgid "LAST USED AT"
 msgstr ""
 
@@ -857,7 +857,7 @@ msgstr ""
 msgid "Must supply container name for: "
 msgstr "der Name des Ursprung Containers muss angegeben werden"
 
-#: lxc/cluster.go:181 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
+#: lxc/cluster.go:181 lxc/list.go:469 lxc/network.go:522 lxc/profile.go:573
 #: lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
 msgid "NAME"
 msgstr ""
@@ -866,6 +866,10 @@ msgstr ""
 msgid "NO"
 msgstr ""
 
+#: lxc/list.go:489
+msgid "NODE"
+msgstr ""
+
 #: lxc/info.go:117
 #, c-format
 msgid "Name: %s"
@@ -966,15 +970,15 @@ msgstr ""
 msgid "Override the terminal mode (auto, interactive or non-interactive)"
 msgstr ""
 
-#: lxc/list.go:621
+#: lxc/list.go:637
 msgid "PERSISTENT"
 msgstr ""
 
-#: lxc/list.go:466
+#: lxc/list.go:470
 msgid "PID"
 msgstr ""
 
-#: lxc/list.go:467
+#: lxc/list.go:471
 msgid "PROFILES"
 msgstr ""
 
@@ -1162,7 +1166,7 @@ msgstr ""
 msgid "SIZE"
 msgstr ""
 
-#: lxc/list.go:468
+#: lxc/list.go:472
 msgid "SNAPSHOTS"
 msgstr ""
 
@@ -1170,7 +1174,7 @@ msgstr ""
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/cluster.go:184 lxc/list.go:469
+#: lxc/cluster.go:184 lxc/list.go:473
 msgid "STATE"
 msgstr ""
 
@@ -1182,7 +1186,7 @@ msgstr ""
 msgid "STATUS"
 msgstr ""
 
-#: lxc/list.go:471
+#: lxc/list.go:475
 msgid "STORAGE POOL"
 msgstr ""
 
@@ -1329,7 +1333,7 @@ msgstr ""
 msgid "Swap (peak)"
 msgstr ""
 
-#: lxc/list.go:470 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:791
+#: lxc/list.go:474 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:791
 msgid "TYPE"
 msgstr ""
 
@@ -2000,6 +2004,8 @@ msgid ""
 "\n"
 "\tt - Type (persistent or ephemeral)\n"
 "\n"
+"\tN - Node hosting the container\n"
+"\n"
 "Custom columns are defined with \"key[:name][:maxWidth]\":\n"
 "\n"
 "\tKEY: The (extended) config key to display\n"
diff --git a/po/el.po b/po/el.po
index 9691fad16..802e492f1 100644
--- a/po/el.po
+++ b/po/el.po
@@ -186,7 +186,7 @@ msgstr ""
 msgid "ARCH"
 msgstr ""
 
-#: lxc/list.go:461
+#: lxc/list.go:465
 msgid "ARCHITECTURE"
 msgstr ""
 
@@ -260,7 +260,7 @@ msgstr "  Χρήση CPU:"
 msgid "CREATED"
 msgstr ""
 
-#: lxc/list.go:462
+#: lxc/list.go:466
 msgid "CREATED AT"
 msgstr ""
 
@@ -305,7 +305,7 @@ msgstr ""
 msgid "Client certificate stored at server: "
 msgstr ""
 
-#: lxc/image.go:174 lxc/image.go:175 lxc/list.go:128 lxc/list.go:129
+#: lxc/image.go:174 lxc/image.go:175 lxc/list.go:132 lxc/list.go:133
 msgid "Columns"
 msgstr ""
 
@@ -388,7 +388,7 @@ msgstr ""
 msgid "DATABASE"
 msgstr ""
 
-#: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:463 lxc/network.go:525
+#: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:467 lxc/network.go:525
 #: lxc/storage.go:682 lxc/storage.go:793
 msgid "DESCRIPTION"
 msgstr ""
@@ -433,7 +433,7 @@ msgstr ""
 msgid "Disk usage:"
 msgstr "  Χρήση CPU:"
 
-#: lxc/list.go:619
+#: lxc/list.go:635
 msgid "EPHEMERAL"
 msgstr ""
 
@@ -521,7 +521,7 @@ msgstr ""
 msgid "Failed to walk path for %s: %s"
 msgstr ""
 
-#: lxc/list.go:131
+#: lxc/list.go:135
 msgid "Fast mode (same as --columns=nsacPt)"
 msgstr ""
 
@@ -554,7 +554,7 @@ msgstr ""
 msgid "Force using the local unix socket"
 msgstr ""
 
-#: lxc/image.go:180 lxc/list.go:130
+#: lxc/image.go:180 lxc/list.go:134
 msgid "Format (csv|json|table|yaml)"
 msgstr ""
 
@@ -566,11 +566,11 @@ msgstr ""
 msgid "ID"
 msgstr ""
 
-#: lxc/list.go:459
+#: lxc/list.go:463
 msgid "IPV4"
 msgstr ""
 
-#: lxc/list.go:460
+#: lxc/list.go:464
 msgid "IPV6"
 msgstr ""
 
@@ -660,7 +660,7 @@ msgstr ""
 msgid "Keep the image up to date after initial copy"
 msgstr ""
 
-#: lxc/list.go:464
+#: lxc/list.go:468
 msgid "LAST USED AT"
 msgstr ""
 
@@ -730,7 +730,7 @@ msgstr ""
 msgid "Must supply container name for: "
 msgstr ""
 
-#: lxc/cluster.go:181 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
+#: lxc/cluster.go:181 lxc/list.go:469 lxc/network.go:522 lxc/profile.go:573
 #: lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
 msgid "NAME"
 msgstr ""
@@ -739,6 +739,10 @@ msgstr ""
 msgid "NO"
 msgstr ""
 
+#: lxc/list.go:489
+msgid "NODE"
+msgstr ""
+
 #: lxc/info.go:117
 #, c-format
 msgid "Name: %s"
@@ -836,15 +840,15 @@ msgstr ""
 msgid "Override the terminal mode (auto, interactive or non-interactive)"
 msgstr ""
 
-#: lxc/list.go:621
+#: lxc/list.go:637
 msgid "PERSISTENT"
 msgstr ""
 
-#: lxc/list.go:466
+#: lxc/list.go:470
 msgid "PID"
 msgstr ""
 
-#: lxc/list.go:467
+#: lxc/list.go:471
 msgid "PROFILES"
 msgstr ""
 
@@ -1024,7 +1028,7 @@ msgstr ""
 msgid "SIZE"
 msgstr ""
 
-#: lxc/list.go:468
+#: lxc/list.go:472
 msgid "SNAPSHOTS"
 msgstr ""
 
@@ -1032,7 +1036,7 @@ msgstr ""
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/cluster.go:184 lxc/list.go:469
+#: lxc/cluster.go:184 lxc/list.go:473
 msgid "STATE"
 msgstr ""
 
@@ -1044,7 +1048,7 @@ msgstr ""
 msgid "STATUS"
 msgstr ""
 
-#: lxc/list.go:471
+#: lxc/list.go:475
 msgid "STORAGE POOL"
 msgstr ""
 
@@ -1185,7 +1189,7 @@ msgstr ""
 msgid "Swap (peak)"
 msgstr ""
 
-#: lxc/list.go:470 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:791
+#: lxc/list.go:474 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:791
 msgid "TYPE"
 msgstr ""
 
@@ -1764,6 +1768,8 @@ msgid ""
 "\n"
 "\tt - Type (persistent or ephemeral)\n"
 "\n"
+"\tN - Node hosting the container\n"
+"\n"
 "Custom columns are defined with \"key[:name][:maxWidth]\":\n"
 "\n"
 "\tKEY: The (extended) config key to display\n"
diff --git a/po/fr.po b/po/fr.po
index 114db0c9a..53920289c 100644
--- a/po/fr.po
+++ b/po/fr.po
@@ -282,7 +282,7 @@ msgstr "ALIAS"
 msgid "ARCH"
 msgstr "ARCH"
 
-#: lxc/list.go:461
+#: lxc/list.go:465
 msgid "ARCHITECTURE"
 msgstr "ARCHITECTURE"
 
@@ -357,7 +357,7 @@ msgstr "CPU utilisé :"
 msgid "CREATED"
 msgstr "CRÉÉ À"
 
-#: lxc/list.go:462
+#: lxc/list.go:466
 msgid "CREATED AT"
 msgstr "CRÉÉ À"
 
@@ -405,7 +405,7 @@ msgstr "Empreinte du certificat : %x"
 msgid "Client certificate stored at server: "
 msgstr "Certificat client enregistré sur le serveur : "
 
-#: lxc/image.go:174 lxc/image.go:175 lxc/list.go:128 lxc/list.go:129
+#: lxc/image.go:174 lxc/image.go:175 lxc/list.go:132 lxc/list.go:133
 msgid "Columns"
 msgstr "Colonnes"
 
@@ -489,7 +489,7 @@ msgstr "Création du conteneur"
 msgid "DATABASE"
 msgstr ""
 
-#: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:463 lxc/network.go:525
+#: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:467 lxc/network.go:525
 #: lxc/storage.go:682 lxc/storage.go:793
 msgid "DESCRIPTION"
 msgstr "DESCRIPTION"
@@ -534,7 +534,7 @@ msgstr "Désactiver stdin (lecture à partir de /dev/null)"
 msgid "Disk usage:"
 msgstr "  Disque utilisé :"
 
-#: lxc/list.go:619
+#: lxc/list.go:635
 msgid "EPHEMERAL"
 msgstr "ÉPHÉMÈRE"
 
@@ -624,7 +624,7 @@ msgstr ""
 msgid "Failed to walk path for %s: %s"
 msgstr ""
 
-#: lxc/list.go:131
+#: lxc/list.go:135
 #, fuzzy
 msgid "Fast mode (same as --columns=nsacPt)"
 msgstr "Mode rapide (identique à --columns=nsacPt"
@@ -659,7 +659,7 @@ msgstr "Forcer la suppression des conteneurs arrêtés"
 msgid "Force using the local unix socket"
 msgstr "Forcer l'utilisation de la socket unix locale"
 
-#: lxc/image.go:180 lxc/list.go:130
+#: lxc/image.go:180 lxc/list.go:134
 msgid "Format (csv|json|table|yaml)"
 msgstr ""
 
@@ -672,11 +672,11 @@ msgstr "Génération d'un certificat client. Ceci peut prendre une minute…"
 msgid "ID"
 msgstr "PID"
 
-#: lxc/list.go:459
+#: lxc/list.go:463
 msgid "IPV4"
 msgstr "IPv4"
 
-#: lxc/list.go:460
+#: lxc/list.go:464
 msgid "IPV6"
 msgstr "IPv6"
 
@@ -771,7 +771,7 @@ msgstr "IPs :"
 msgid "Keep the image up to date after initial copy"
 msgstr "Garder l'image à jour après la copie initiale"
 
-#: lxc/list.go:464
+#: lxc/list.go:468
 msgid "LAST USED AT"
 msgstr "DERNIÈRE UTILISATION À"
 
@@ -843,7 +843,7 @@ msgstr ""
 msgid "Must supply container name for: "
 msgstr "Vous devez fournir le nom d'un conteneur pour : "
 
-#: lxc/cluster.go:181 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
+#: lxc/cluster.go:181 lxc/list.go:469 lxc/network.go:522 lxc/profile.go:573
 #: lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
 msgid "NAME"
 msgstr "NOM"
@@ -852,6 +852,10 @@ msgstr "NOM"
 msgid "NO"
 msgstr "NON"
 
+#: lxc/list.go:489
+msgid "NODE"
+msgstr ""
+
 #: lxc/info.go:117
 #, c-format
 msgid "Name: %s"
@@ -951,15 +955,15 @@ msgstr "Options :"
 msgid "Override the terminal mode (auto, interactive or non-interactive)"
 msgstr "Surcharger le mode terminal (auto, interactif ou non-interactif)"
 
-#: lxc/list.go:621
+#: lxc/list.go:637
 msgid "PERSISTENT"
 msgstr "PERSISTANT"
 
-#: lxc/list.go:466
+#: lxc/list.go:470
 msgid "PID"
 msgstr "PID"
 
-#: lxc/list.go:467
+#: lxc/list.go:471
 msgid "PROFILES"
 msgstr "PROFILS"
 
@@ -1143,7 +1147,7 @@ msgstr "Récupération de l'image : %s"
 msgid "SIZE"
 msgstr "TAILLE"
 
-#: lxc/list.go:468
+#: lxc/list.go:472
 msgid "SNAPSHOTS"
 msgstr "INSTANTANÉS"
 
@@ -1151,7 +1155,7 @@ msgstr "INSTANTANÉS"
 msgid "SOURCE"
 msgstr "SOURCE"
 
-#: lxc/cluster.go:184 lxc/list.go:469
+#: lxc/cluster.go:184 lxc/list.go:473
 msgid "STATE"
 msgstr "ÉTAT"
 
@@ -1164,7 +1168,7 @@ msgstr "STATIQUE"
 msgid "STATUS"
 msgstr "ÉTAT"
 
-#: lxc/list.go:471
+#: lxc/list.go:475
 msgid "STORAGE POOL"
 msgstr "ENSEMBLE DE STOCKAGE"
 
@@ -1309,7 +1313,7 @@ msgstr "Swap (courant)"
 msgid "Swap (peak)"
 msgstr "Swap (pointe)"
 
-#: lxc/list.go:470 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:791
+#: lxc/list.go:474 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:791
 msgid "TYPE"
 msgstr "TYPE"
 
@@ -2122,6 +2126,8 @@ msgid ""
 "\n"
 "\tt - Type (persistent or ephemeral)\n"
 "\n"
+"\tN - Node hosting the container\n"
+"\n"
 "Custom columns are defined with \"key[:name][:maxWidth]\":\n"
 "\n"
 "\tKEY: The (extended) config key to display\n"
diff --git a/po/id.po b/po/id.po
index 99ffcd30c..775f49de0 100644
--- a/po/id.po
+++ b/po/id.po
@@ -183,7 +183,7 @@ msgstr ""
 msgid "ARCH"
 msgstr ""
 
-#: lxc/list.go:461
+#: lxc/list.go:465
 msgid "ARCHITECTURE"
 msgstr ""
 
@@ -256,7 +256,7 @@ msgstr ""
 msgid "CREATED"
 msgstr ""
 
-#: lxc/list.go:462
+#: lxc/list.go:466
 msgid "CREATED AT"
 msgstr ""
 
@@ -301,7 +301,7 @@ msgstr ""
 msgid "Client certificate stored at server: "
 msgstr ""
 
-#: lxc/image.go:174 lxc/image.go:175 lxc/list.go:128 lxc/list.go:129
+#: lxc/image.go:174 lxc/image.go:175 lxc/list.go:132 lxc/list.go:133
 msgid "Columns"
 msgstr ""
 
@@ -384,7 +384,7 @@ msgstr ""
 msgid "DATABASE"
 msgstr ""
 
-#: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:463 lxc/network.go:525
+#: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:467 lxc/network.go:525
 #: lxc/storage.go:682 lxc/storage.go:793
 msgid "DESCRIPTION"
 msgstr ""
@@ -428,7 +428,7 @@ msgstr ""
 msgid "Disk usage:"
 msgstr ""
 
-#: lxc/list.go:619
+#: lxc/list.go:635
 msgid "EPHEMERAL"
 msgstr ""
 
@@ -516,7 +516,7 @@ msgstr ""
 msgid "Failed to walk path for %s: %s"
 msgstr ""
 
-#: lxc/list.go:131
+#: lxc/list.go:135
 msgid "Fast mode (same as --columns=nsacPt)"
 msgstr ""
 
@@ -549,7 +549,7 @@ msgstr ""
 msgid "Force using the local unix socket"
 msgstr ""
 
-#: lxc/image.go:180 lxc/list.go:130
+#: lxc/image.go:180 lxc/list.go:134
 msgid "Format (csv|json|table|yaml)"
 msgstr ""
 
@@ -561,11 +561,11 @@ msgstr ""
 msgid "ID"
 msgstr ""
 
-#: lxc/list.go:459
+#: lxc/list.go:463
 msgid "IPV4"
 msgstr ""
 
-#: lxc/list.go:460
+#: lxc/list.go:464
 msgid "IPV6"
 msgstr ""
 
@@ -655,7 +655,7 @@ msgstr ""
 msgid "Keep the image up to date after initial copy"
 msgstr ""
 
-#: lxc/list.go:464
+#: lxc/list.go:468
 msgid "LAST USED AT"
 msgstr ""
 
@@ -724,7 +724,7 @@ msgstr ""
 msgid "Must supply container name for: "
 msgstr ""
 
-#: lxc/cluster.go:181 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
+#: lxc/cluster.go:181 lxc/list.go:469 lxc/network.go:522 lxc/profile.go:573
 #: lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
 msgid "NAME"
 msgstr ""
@@ -733,6 +733,10 @@ msgstr ""
 msgid "NO"
 msgstr ""
 
+#: lxc/list.go:489
+msgid "NODE"
+msgstr ""
+
 #: lxc/info.go:117
 #, c-format
 msgid "Name: %s"
@@ -829,15 +833,15 @@ msgstr ""
 msgid "Override the terminal mode (auto, interactive or non-interactive)"
 msgstr ""
 
-#: lxc/list.go:621
+#: lxc/list.go:637
 msgid "PERSISTENT"
 msgstr ""
 
-#: lxc/list.go:466
+#: lxc/list.go:470
 msgid "PID"
 msgstr ""
 
-#: lxc/list.go:467
+#: lxc/list.go:471
 msgid "PROFILES"
 msgstr ""
 
@@ -1017,7 +1021,7 @@ msgstr ""
 msgid "SIZE"
 msgstr ""
 
-#: lxc/list.go:468
+#: lxc/list.go:472
 msgid "SNAPSHOTS"
 msgstr ""
 
@@ -1025,7 +1029,7 @@ msgstr ""
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/cluster.go:184 lxc/list.go:469
+#: lxc/cluster.go:184 lxc/list.go:473
 msgid "STATE"
 msgstr ""
 
@@ -1037,7 +1041,7 @@ msgstr ""
 msgid "STATUS"
 msgstr ""
 
-#: lxc/list.go:471
+#: lxc/list.go:475
 msgid "STORAGE POOL"
 msgstr ""
 
@@ -1178,7 +1182,7 @@ msgstr ""
 msgid "Swap (peak)"
 msgstr ""
 
-#: lxc/list.go:470 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:791
+#: lxc/list.go:474 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:791
 msgid "TYPE"
 msgstr ""
 
@@ -1757,6 +1761,8 @@ msgid ""
 "\n"
 "\tt - Type (persistent or ephemeral)\n"
 "\n"
+"\tN - Node hosting the container\n"
+"\n"
 "Custom columns are defined with \"key[:name][:maxWidth]\":\n"
 "\n"
 "\tKEY: The (extended) config key to display\n"
diff --git a/po/it.po b/po/it.po
index ee90596fc..86c034965 100644
--- a/po/it.po
+++ b/po/it.po
@@ -207,7 +207,7 @@ msgstr "ALIAS"
 msgid "ARCH"
 msgstr "ARCH"
 
-#: lxc/list.go:461
+#: lxc/list.go:465
 msgid "ARCHITECTURE"
 msgstr "ARCHITETTURA"
 
@@ -281,7 +281,7 @@ msgstr "Utilizzo CPU:"
 msgid "CREATED"
 msgstr "CREATO IL"
 
-#: lxc/list.go:462
+#: lxc/list.go:466
 msgid "CREATED AT"
 msgstr "CREATO IL"
 
@@ -326,7 +326,7 @@ msgstr ""
 msgid "Client certificate stored at server: "
 msgstr "Certificato del client salvato dal server: "
 
-#: lxc/image.go:174 lxc/image.go:175 lxc/list.go:128 lxc/list.go:129
+#: lxc/image.go:174 lxc/image.go:175 lxc/list.go:132 lxc/list.go:133
 msgid "Columns"
 msgstr "Colonne"
 
@@ -409,7 +409,7 @@ msgstr "Creazione del container in corso"
 msgid "DATABASE"
 msgstr ""
 
-#: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:463 lxc/network.go:525
+#: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:467 lxc/network.go:525
 #: lxc/storage.go:682 lxc/storage.go:793
 msgid "DESCRIPTION"
 msgstr "DESCRIZIONE"
@@ -453,7 +453,7 @@ msgstr ""
 msgid "Disk usage:"
 msgstr "Utilizzo disco:"
 
-#: lxc/list.go:619
+#: lxc/list.go:635
 msgid "EPHEMERAL"
 msgstr ""
 
@@ -541,7 +541,7 @@ msgstr ""
 msgid "Failed to walk path for %s: %s"
 msgstr ""
 
-#: lxc/list.go:131
+#: lxc/list.go:135
 msgid "Fast mode (same as --columns=nsacPt)"
 msgstr ""
 
@@ -575,7 +575,7 @@ msgstr ""
 msgid "Force using the local unix socket"
 msgstr ""
 
-#: lxc/image.go:180 lxc/list.go:130
+#: lxc/image.go:180 lxc/list.go:134
 msgid "Format (csv|json|table|yaml)"
 msgstr ""
 
@@ -587,11 +587,11 @@ msgstr ""
 msgid "ID"
 msgstr ""
 
-#: lxc/list.go:459
+#: lxc/list.go:463
 msgid "IPV4"
 msgstr ""
 
-#: lxc/list.go:460
+#: lxc/list.go:464
 msgid "IPV6"
 msgstr ""
 
@@ -681,7 +681,7 @@ msgstr ""
 msgid "Keep the image up to date after initial copy"
 msgstr ""
 
-#: lxc/list.go:464
+#: lxc/list.go:468
 msgid "LAST USED AT"
 msgstr ""
 
@@ -750,7 +750,7 @@ msgstr ""
 msgid "Must supply container name for: "
 msgstr ""
 
-#: lxc/cluster.go:181 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
+#: lxc/cluster.go:181 lxc/list.go:469 lxc/network.go:522 lxc/profile.go:573
 #: lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
 msgid "NAME"
 msgstr ""
@@ -759,6 +759,10 @@ msgstr ""
 msgid "NO"
 msgstr ""
 
+#: lxc/list.go:489
+msgid "NODE"
+msgstr ""
+
 #: lxc/info.go:117
 #, c-format
 msgid "Name: %s"
@@ -855,15 +859,15 @@ msgstr ""
 msgid "Override the terminal mode (auto, interactive or non-interactive)"
 msgstr ""
 
-#: lxc/list.go:621
+#: lxc/list.go:637
 msgid "PERSISTENT"
 msgstr ""
 
-#: lxc/list.go:466
+#: lxc/list.go:470
 msgid "PID"
 msgstr ""
 
-#: lxc/list.go:467
+#: lxc/list.go:471
 msgid "PROFILES"
 msgstr ""
 
@@ -1043,7 +1047,7 @@ msgstr ""
 msgid "SIZE"
 msgstr ""
 
-#: lxc/list.go:468
+#: lxc/list.go:472
 msgid "SNAPSHOTS"
 msgstr ""
 
@@ -1051,7 +1055,7 @@ msgstr ""
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/cluster.go:184 lxc/list.go:469
+#: lxc/cluster.go:184 lxc/list.go:473
 msgid "STATE"
 msgstr ""
 
@@ -1063,7 +1067,7 @@ msgstr ""
 msgid "STATUS"
 msgstr ""
 
-#: lxc/list.go:471
+#: lxc/list.go:475
 msgid "STORAGE POOL"
 msgstr ""
 
@@ -1204,7 +1208,7 @@ msgstr ""
 msgid "Swap (peak)"
 msgstr ""
 
-#: lxc/list.go:470 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:791
+#: lxc/list.go:474 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:791
 msgid "TYPE"
 msgstr ""
 
@@ -1783,6 +1787,8 @@ msgid ""
 "\n"
 "\tt - Type (persistent or ephemeral)\n"
 "\n"
+"\tN - Node hosting the container\n"
+"\n"
 "Custom columns are defined with \"key[:name][:maxWidth]\":\n"
 "\n"
 "\tKEY: The (extended) config key to display\n"
diff --git a/po/ja.po b/po/ja.po
index 8a1f9fea3..dc5b4740f 100644
--- a/po/ja.po
+++ b/po/ja.po
@@ -187,7 +187,7 @@ msgstr ""
 msgid "ARCH"
 msgstr ""
 
-#: lxc/list.go:461
+#: lxc/list.go:465
 msgid "ARCHITECTURE"
 msgstr ""
 
@@ -260,7 +260,7 @@ msgstr "CPU使用量:"
 msgid "CREATED"
 msgstr ""
 
-#: lxc/list.go:462
+#: lxc/list.go:466
 msgid "CREATED AT"
 msgstr ""
 
@@ -307,7 +307,7 @@ msgstr "証明書のフィンガープリント: %s"
 msgid "Client certificate stored at server: "
 msgstr "クライアント証明書がサーバに格納されました: "
 
-#: lxc/image.go:174 lxc/image.go:175 lxc/list.go:128 lxc/list.go:129
+#: lxc/image.go:174 lxc/image.go:175 lxc/list.go:132 lxc/list.go:133
 msgid "Columns"
 msgstr "カラムレイアウト"
 
@@ -390,7 +390,7 @@ msgstr "コンテナを作成中"
 msgid "DATABASE"
 msgstr ""
 
-#: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:463 lxc/network.go:525
+#: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:467 lxc/network.go:525
 #: lxc/storage.go:682 lxc/storage.go:793
 msgid "DESCRIPTION"
 msgstr ""
@@ -434,7 +434,7 @@ msgstr "標準入力を無効にします (/dev/null から読み込みます)"
 msgid "Disk usage:"
 msgstr "ディスク使用量:"
 
-#: lxc/list.go:619
+#: lxc/list.go:635
 msgid "EPHEMERAL"
 msgstr ""
 
@@ -522,7 +522,7 @@ msgstr "エイリアス %s の削除に失敗しました"
 msgid "Failed to walk path for %s: %s"
 msgstr "パス %s にアクセスできませんでした: %s"
 
-#: lxc/list.go:131
+#: lxc/list.go:135
 #, fuzzy
 msgid "Fast mode (same as --columns=nsacPt)"
 msgstr "Fast モード (--columns=nsacPt と同じ)"
@@ -557,7 +557,7 @@ msgstr "稼働中のコンテナを強制的に削除します"
 msgid "Force using the local unix socket"
 msgstr "強制的にローカルのUNIXソケットを使います"
 
-#: lxc/image.go:180 lxc/list.go:130
+#: lxc/image.go:180 lxc/list.go:134
 msgid "Format (csv|json|table|yaml)"
 msgstr "フォーマット (csv|json|table|yaml)"
 
@@ -570,11 +570,11 @@ msgstr "クライアント証明書を生成します。1分ぐらいかかり
 msgid "ID"
 msgstr "PID"
 
-#: lxc/list.go:459
+#: lxc/list.go:463
 msgid "IPV4"
 msgstr "IPV4"
 
-#: lxc/list.go:460
+#: lxc/list.go:464
 msgid "IPV6"
 msgstr "IPV6"
 
@@ -664,7 +664,7 @@ msgstr "IPアドレス:"
 msgid "Keep the image up to date after initial copy"
 msgstr "最初にコピーした後も常にイメージを最新の状態に保つ"
 
-#: lxc/list.go:464
+#: lxc/list.go:468
 msgid "LAST USED AT"
 msgstr ""
 
@@ -735,7 +735,7 @@ msgstr "ディレクトリからのインポートは root で実行する必要
 msgid "Must supply container name for: "
 msgstr "コンテナ名を指定する必要があります: "
 
-#: lxc/cluster.go:181 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
+#: lxc/cluster.go:181 lxc/list.go:469 lxc/network.go:522 lxc/profile.go:573
 #: lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
 msgid "NAME"
 msgstr ""
@@ -744,6 +744,10 @@ msgstr ""
 msgid "NO"
 msgstr ""
 
+#: lxc/list.go:489
+msgid "NODE"
+msgstr ""
+
 #: lxc/info.go:117
 #, c-format
 msgid "Name: %s"
@@ -841,15 +845,15 @@ msgstr "オプション:"
 msgid "Override the terminal mode (auto, interactive or non-interactive)"
 msgstr "ターミナルモードを上書きします (auto, interactive, non-interactive)"
 
-#: lxc/list.go:621
+#: lxc/list.go:637
 msgid "PERSISTENT"
 msgstr ""
 
-#: lxc/list.go:466
+#: lxc/list.go:470
 msgid "PID"
 msgstr "PID"
 
-#: lxc/list.go:467
+#: lxc/list.go:471
 msgid "PROFILES"
 msgstr ""
 
@@ -1030,7 +1034,7 @@ msgstr "イメージの取得中: %s"
 msgid "SIZE"
 msgstr ""
 
-#: lxc/list.go:468
+#: lxc/list.go:472
 msgid "SNAPSHOTS"
 msgstr ""
 
@@ -1038,7 +1042,7 @@ msgstr ""
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/cluster.go:184 lxc/list.go:469
+#: lxc/cluster.go:184 lxc/list.go:473
 msgid "STATE"
 msgstr ""
 
@@ -1050,7 +1054,7 @@ msgstr ""
 msgid "STATUS"
 msgstr ""
 
-#: lxc/list.go:471
+#: lxc/list.go:475
 msgid "STORAGE POOL"
 msgstr ""
 
@@ -1192,7 +1196,7 @@ msgstr "Swap (現在値)"
 msgid "Swap (peak)"
 msgstr "Swap (ピーク)"
 
-#: lxc/list.go:470 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:791
+#: lxc/list.go:474 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:791
 msgid "TYPE"
 msgstr ""
 
@@ -2008,6 +2012,7 @@ msgstr ""
 "    lxc launch ubuntu:16.04 u1"
 
 #: lxc/list.go:44
+#, fuzzy
 msgid ""
 "Usage: lxc list [<remote>:] [filters] [--format csv|json|table|yaml] [-c "
 "<columns>] [--fast]\n"
@@ -2075,6 +2080,8 @@ msgid ""
 "\n"
 "\tt - Type (persistent or ephemeral)\n"
 "\n"
+"\tN - Node hosting the container\n"
+"\n"
 "Custom columns are defined with \"key[:name][:maxWidth]\":\n"
 "\n"
 "\tKEY: The (extended) config key to display\n"
diff --git a/po/lxd.pot b/po/lxd.pot
index 49ddc1810..35717c06f 100644
--- a/po/lxd.pot
+++ b/po/lxd.pot
@@ -176,7 +176,7 @@ msgstr  ""
 msgid   "ARCH"
 msgstr  ""
 
-#: lxc/list.go:461
+#: lxc/list.go:465
 msgid   "ARCHITECTURE"
 msgstr  ""
 
@@ -249,7 +249,7 @@ msgstr  ""
 msgid   "CREATED"
 msgstr  ""
 
-#: lxc/list.go:462
+#: lxc/list.go:466
 msgid   "CREATED AT"
 msgstr  ""
 
@@ -294,7 +294,7 @@ msgstr  ""
 msgid   "Client certificate stored at server: "
 msgstr  ""
 
-#: lxc/image.go:174 lxc/image.go:175 lxc/list.go:128 lxc/list.go:129
+#: lxc/image.go:174 lxc/image.go:175 lxc/list.go:132 lxc/list.go:133
 msgid   "Columns"
 msgstr  ""
 
@@ -376,7 +376,7 @@ msgstr  ""
 msgid   "DATABASE"
 msgstr  ""
 
-#: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:463 lxc/network.go:525 lxc/storage.go:682 lxc/storage.go:793
+#: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:467 lxc/network.go:525 lxc/storage.go:682 lxc/storage.go:793
 msgid   "DESCRIPTION"
 msgstr  ""
 
@@ -419,7 +419,7 @@ msgstr  ""
 msgid   "Disk usage:"
 msgstr  ""
 
-#: lxc/list.go:619
+#: lxc/list.go:635
 msgid   "EPHEMERAL"
 msgstr  ""
 
@@ -507,7 +507,7 @@ msgstr  ""
 msgid   "Failed to walk path for %s: %s"
 msgstr  ""
 
-#: lxc/list.go:131
+#: lxc/list.go:135
 msgid   "Fast mode (same as --columns=nsacPt)"
 msgstr  ""
 
@@ -540,7 +540,7 @@ msgstr  ""
 msgid   "Force using the local unix socket"
 msgstr  ""
 
-#: lxc/image.go:180 lxc/list.go:130
+#: lxc/image.go:180 lxc/list.go:134
 msgid   "Format (csv|json|table|yaml)"
 msgstr  ""
 
@@ -552,11 +552,11 @@ msgstr  ""
 msgid   "ID"
 msgstr  ""
 
-#: lxc/list.go:459
+#: lxc/list.go:463
 msgid   "IPV4"
 msgstr  ""
 
-#: lxc/list.go:460
+#: lxc/list.go:464
 msgid   "IPV6"
 msgstr  ""
 
@@ -646,7 +646,7 @@ msgstr  ""
 msgid   "Keep the image up to date after initial copy"
 msgstr  ""
 
-#: lxc/list.go:464
+#: lxc/list.go:468
 msgid   "LAST USED AT"
 msgstr  ""
 
@@ -715,7 +715,7 @@ msgstr  ""
 msgid   "Must supply container name for: "
 msgstr  ""
 
-#: lxc/cluster.go:181 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573 lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
+#: lxc/cluster.go:181 lxc/list.go:469 lxc/network.go:522 lxc/profile.go:573 lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
 msgid   "NAME"
 msgstr  ""
 
@@ -723,6 +723,10 @@ msgstr  ""
 msgid   "NO"
 msgstr  ""
 
+#: lxc/list.go:489
+msgid   "NODE"
+msgstr  ""
+
 #: lxc/info.go:117
 #, c-format
 msgid   "Name: %s"
@@ -819,15 +823,15 @@ msgstr  ""
 msgid   "Override the terminal mode (auto, interactive or non-interactive)"
 msgstr  ""
 
-#: lxc/list.go:621
+#: lxc/list.go:637
 msgid   "PERSISTENT"
 msgstr  ""
 
-#: lxc/list.go:466
+#: lxc/list.go:470
 msgid   "PID"
 msgstr  ""
 
-#: lxc/list.go:467
+#: lxc/list.go:471
 msgid   "PROFILES"
 msgstr  ""
 
@@ -1006,7 +1010,7 @@ msgstr  ""
 msgid   "SIZE"
 msgstr  ""
 
-#: lxc/list.go:468
+#: lxc/list.go:472
 msgid   "SNAPSHOTS"
 msgstr  ""
 
@@ -1014,7 +1018,7 @@ msgstr  ""
 msgid   "SOURCE"
 msgstr  ""
 
-#: lxc/cluster.go:184 lxc/list.go:469
+#: lxc/cluster.go:184 lxc/list.go:473
 msgid   "STATE"
 msgstr  ""
 
@@ -1026,7 +1030,7 @@ msgstr  ""
 msgid   "STATUS"
 msgstr  ""
 
-#: lxc/list.go:471
+#: lxc/list.go:475
 msgid   "STORAGE POOL"
 msgstr  ""
 
@@ -1167,7 +1171,7 @@ msgstr  ""
 msgid   "Swap (peak)"
 msgstr  ""
 
-#: lxc/list.go:470 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:791
+#: lxc/list.go:474 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:791
 msgid   "TYPE"
 msgstr  ""
 
@@ -1693,6 +1697,8 @@ msgid   "Usage: lxc list [<remote>:] [filters] [--format csv|json|table|yaml] [-
         "\n"
         "	t - Type (persistent or ephemeral)\n"
         "\n"
+        "	N - Node hosting the container\n"
+        "\n"
         "Custom columns are defined with \"key[:name][:maxWidth]\":\n"
         "\n"
         "	KEY: The (extended) config key to display\n"
diff --git a/po/nb_NO.po b/po/nb_NO.po
index 391cc26b5..ef5376b0e 100644
--- a/po/nb_NO.po
+++ b/po/nb_NO.po
@@ -183,7 +183,7 @@ msgstr ""
 msgid "ARCH"
 msgstr ""
 
-#: lxc/list.go:461
+#: lxc/list.go:465
 msgid "ARCHITECTURE"
 msgstr ""
 
@@ -256,7 +256,7 @@ msgstr ""
 msgid "CREATED"
 msgstr ""
 
-#: lxc/list.go:462
+#: lxc/list.go:466
 msgid "CREATED AT"
 msgstr ""
 
@@ -301,7 +301,7 @@ msgstr ""
 msgid "Client certificate stored at server: "
 msgstr ""
 
-#: lxc/image.go:174 lxc/image.go:175 lxc/list.go:128 lxc/list.go:129
+#: lxc/image.go:174 lxc/image.go:175 lxc/list.go:132 lxc/list.go:133
 msgid "Columns"
 msgstr ""
 
@@ -384,7 +384,7 @@ msgstr ""
 msgid "DATABASE"
 msgstr ""
 
-#: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:463 lxc/network.go:525
+#: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:467 lxc/network.go:525
 #: lxc/storage.go:682 lxc/storage.go:793
 msgid "DESCRIPTION"
 msgstr ""
@@ -428,7 +428,7 @@ msgstr ""
 msgid "Disk usage:"
 msgstr ""
 
-#: lxc/list.go:619
+#: lxc/list.go:635
 msgid "EPHEMERAL"
 msgstr ""
 
@@ -516,7 +516,7 @@ msgstr ""
 msgid "Failed to walk path for %s: %s"
 msgstr ""
 
-#: lxc/list.go:131
+#: lxc/list.go:135
 msgid "Fast mode (same as --columns=nsacPt)"
 msgstr ""
 
@@ -549,7 +549,7 @@ msgstr ""
 msgid "Force using the local unix socket"
 msgstr ""
 
-#: lxc/image.go:180 lxc/list.go:130
+#: lxc/image.go:180 lxc/list.go:134
 msgid "Format (csv|json|table|yaml)"
 msgstr ""
 
@@ -561,11 +561,11 @@ msgstr ""
 msgid "ID"
 msgstr ""
 
-#: lxc/list.go:459
+#: lxc/list.go:463
 msgid "IPV4"
 msgstr ""
 
-#: lxc/list.go:460
+#: lxc/list.go:464
 msgid "IPV6"
 msgstr ""
 
@@ -655,7 +655,7 @@ msgstr ""
 msgid "Keep the image up to date after initial copy"
 msgstr ""
 
-#: lxc/list.go:464
+#: lxc/list.go:468
 msgid "LAST USED AT"
 msgstr ""
 
@@ -724,7 +724,7 @@ msgstr ""
 msgid "Must supply container name for: "
 msgstr ""
 
-#: lxc/cluster.go:181 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
+#: lxc/cluster.go:181 lxc/list.go:469 lxc/network.go:522 lxc/profile.go:573
 #: lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
 msgid "NAME"
 msgstr ""
@@ -733,6 +733,10 @@ msgstr ""
 msgid "NO"
 msgstr ""
 
+#: lxc/list.go:489
+msgid "NODE"
+msgstr ""
+
 #: lxc/info.go:117
 #, c-format
 msgid "Name: %s"
@@ -829,15 +833,15 @@ msgstr ""
 msgid "Override the terminal mode (auto, interactive or non-interactive)"
 msgstr ""
 
-#: lxc/list.go:621
+#: lxc/list.go:637
 msgid "PERSISTENT"
 msgstr ""
 
-#: lxc/list.go:466
+#: lxc/list.go:470
 msgid "PID"
 msgstr ""
 
-#: lxc/list.go:467
+#: lxc/list.go:471
 msgid "PROFILES"
 msgstr ""
 
@@ -1017,7 +1021,7 @@ msgstr ""
 msgid "SIZE"
 msgstr ""
 
-#: lxc/list.go:468
+#: lxc/list.go:472
 msgid "SNAPSHOTS"
 msgstr ""
 
@@ -1025,7 +1029,7 @@ msgstr ""
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/cluster.go:184 lxc/list.go:469
+#: lxc/cluster.go:184 lxc/list.go:473
 msgid "STATE"
 msgstr ""
 
@@ -1037,7 +1041,7 @@ msgstr ""
 msgid "STATUS"
 msgstr ""
 
-#: lxc/list.go:471
+#: lxc/list.go:475
 msgid "STORAGE POOL"
 msgstr ""
 
@@ -1178,7 +1182,7 @@ msgstr ""
 msgid "Swap (peak)"
 msgstr ""
 
-#: lxc/list.go:470 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:791
+#: lxc/list.go:474 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:791
 msgid "TYPE"
 msgstr ""
 
@@ -1757,6 +1761,8 @@ msgid ""
 "\n"
 "\tt - Type (persistent or ephemeral)\n"
 "\n"
+"\tN - Node hosting the container\n"
+"\n"
 "Custom columns are defined with \"key[:name][:maxWidth]\":\n"
 "\n"
 "\tKEY: The (extended) config key to display\n"
diff --git a/po/nl.po b/po/nl.po
index f8c257ac2..06673fd9a 100644
--- a/po/nl.po
+++ b/po/nl.po
@@ -183,7 +183,7 @@ msgstr ""
 msgid "ARCH"
 msgstr ""
 
-#: lxc/list.go:461
+#: lxc/list.go:465
 msgid "ARCHITECTURE"
 msgstr ""
 
@@ -256,7 +256,7 @@ msgstr ""
 msgid "CREATED"
 msgstr ""
 
-#: lxc/list.go:462
+#: lxc/list.go:466
 msgid "CREATED AT"
 msgstr ""
 
@@ -301,7 +301,7 @@ msgstr ""
 msgid "Client certificate stored at server: "
 msgstr ""
 
-#: lxc/image.go:174 lxc/image.go:175 lxc/list.go:128 lxc/list.go:129
+#: lxc/image.go:174 lxc/image.go:175 lxc/list.go:132 lxc/list.go:133
 msgid "Columns"
 msgstr ""
 
@@ -384,7 +384,7 @@ msgstr ""
 msgid "DATABASE"
 msgstr ""
 
-#: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:463 lxc/network.go:525
+#: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:467 lxc/network.go:525
 #: lxc/storage.go:682 lxc/storage.go:793
 msgid "DESCRIPTION"
 msgstr ""
@@ -428,7 +428,7 @@ msgstr ""
 msgid "Disk usage:"
 msgstr ""
 
-#: lxc/list.go:619
+#: lxc/list.go:635
 msgid "EPHEMERAL"
 msgstr ""
 
@@ -516,7 +516,7 @@ msgstr ""
 msgid "Failed to walk path for %s: %s"
 msgstr ""
 
-#: lxc/list.go:131
+#: lxc/list.go:135
 msgid "Fast mode (same as --columns=nsacPt)"
 msgstr ""
 
@@ -549,7 +549,7 @@ msgstr ""
 msgid "Force using the local unix socket"
 msgstr ""
 
-#: lxc/image.go:180 lxc/list.go:130
+#: lxc/image.go:180 lxc/list.go:134
 msgid "Format (csv|json|table|yaml)"
 msgstr ""
 
@@ -561,11 +561,11 @@ msgstr ""
 msgid "ID"
 msgstr ""
 
-#: lxc/list.go:459
+#: lxc/list.go:463
 msgid "IPV4"
 msgstr ""
 
-#: lxc/list.go:460
+#: lxc/list.go:464
 msgid "IPV6"
 msgstr ""
 
@@ -655,7 +655,7 @@ msgstr ""
 msgid "Keep the image up to date after initial copy"
 msgstr ""
 
-#: lxc/list.go:464
+#: lxc/list.go:468
 msgid "LAST USED AT"
 msgstr ""
 
@@ -724,7 +724,7 @@ msgstr ""
 msgid "Must supply container name for: "
 msgstr ""
 
-#: lxc/cluster.go:181 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
+#: lxc/cluster.go:181 lxc/list.go:469 lxc/network.go:522 lxc/profile.go:573
 #: lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
 msgid "NAME"
 msgstr ""
@@ -733,6 +733,10 @@ msgstr ""
 msgid "NO"
 msgstr ""
 
+#: lxc/list.go:489
+msgid "NODE"
+msgstr ""
+
 #: lxc/info.go:117
 #, c-format
 msgid "Name: %s"
@@ -829,15 +833,15 @@ msgstr ""
 msgid "Override the terminal mode (auto, interactive or non-interactive)"
 msgstr ""
 
-#: lxc/list.go:621
+#: lxc/list.go:637
 msgid "PERSISTENT"
 msgstr ""
 
-#: lxc/list.go:466
+#: lxc/list.go:470
 msgid "PID"
 msgstr ""
 
-#: lxc/list.go:467
+#: lxc/list.go:471
 msgid "PROFILES"
 msgstr ""
 
@@ -1017,7 +1021,7 @@ msgstr ""
 msgid "SIZE"
 msgstr ""
 
-#: lxc/list.go:468
+#: lxc/list.go:472
 msgid "SNAPSHOTS"
 msgstr ""
 
@@ -1025,7 +1029,7 @@ msgstr ""
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/cluster.go:184 lxc/list.go:469
+#: lxc/cluster.go:184 lxc/list.go:473
 msgid "STATE"
 msgstr ""
 
@@ -1037,7 +1041,7 @@ msgstr ""
 msgid "STATUS"
 msgstr ""
 
-#: lxc/list.go:471
+#: lxc/list.go:475
 msgid "STORAGE POOL"
 msgstr ""
 
@@ -1178,7 +1182,7 @@ msgstr ""
 msgid "Swap (peak)"
 msgstr ""
 
-#: lxc/list.go:470 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:791
+#: lxc/list.go:474 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:791
 msgid "TYPE"
 msgstr ""
 
@@ -1757,6 +1761,8 @@ msgid ""
 "\n"
 "\tt - Type (persistent or ephemeral)\n"
 "\n"
+"\tN - Node hosting the container\n"
+"\n"
 "Custom columns are defined with \"key[:name][:maxWidth]\":\n"
 "\n"
 "\tKEY: The (extended) config key to display\n"
diff --git a/po/pt_BR.po b/po/pt_BR.po
index 2938e6088..82f511ab5 100644
--- a/po/pt_BR.po
+++ b/po/pt_BR.po
@@ -183,7 +183,7 @@ msgstr ""
 msgid "ARCH"
 msgstr ""
 
-#: lxc/list.go:461
+#: lxc/list.go:465
 msgid "ARCHITECTURE"
 msgstr ""
 
@@ -256,7 +256,7 @@ msgstr ""
 msgid "CREATED"
 msgstr ""
 
-#: lxc/list.go:462
+#: lxc/list.go:466
 msgid "CREATED AT"
 msgstr ""
 
@@ -301,7 +301,7 @@ msgstr ""
 msgid "Client certificate stored at server: "
 msgstr ""
 
-#: lxc/image.go:174 lxc/image.go:175 lxc/list.go:128 lxc/list.go:129
+#: lxc/image.go:174 lxc/image.go:175 lxc/list.go:132 lxc/list.go:133
 msgid "Columns"
 msgstr ""
 
@@ -384,7 +384,7 @@ msgstr ""
 msgid "DATABASE"
 msgstr ""
 
-#: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:463 lxc/network.go:525
+#: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:467 lxc/network.go:525
 #: lxc/storage.go:682 lxc/storage.go:793
 msgid "DESCRIPTION"
 msgstr ""
@@ -428,7 +428,7 @@ msgstr ""
 msgid "Disk usage:"
 msgstr ""
 
-#: lxc/list.go:619
+#: lxc/list.go:635
 msgid "EPHEMERAL"
 msgstr ""
 
@@ -516,7 +516,7 @@ msgstr ""
 msgid "Failed to walk path for %s: %s"
 msgstr ""
 
-#: lxc/list.go:131
+#: lxc/list.go:135
 msgid "Fast mode (same as --columns=nsacPt)"
 msgstr ""
 
@@ -549,7 +549,7 @@ msgstr ""
 msgid "Force using the local unix socket"
 msgstr ""
 
-#: lxc/image.go:180 lxc/list.go:130
+#: lxc/image.go:180 lxc/list.go:134
 msgid "Format (csv|json|table|yaml)"
 msgstr ""
 
@@ -561,11 +561,11 @@ msgstr ""
 msgid "ID"
 msgstr ""
 
-#: lxc/list.go:459
+#: lxc/list.go:463
 msgid "IPV4"
 msgstr ""
 
-#: lxc/list.go:460
+#: lxc/list.go:464
 msgid "IPV6"
 msgstr ""
 
@@ -655,7 +655,7 @@ msgstr ""
 msgid "Keep the image up to date after initial copy"
 msgstr ""
 
-#: lxc/list.go:464
+#: lxc/list.go:468
 msgid "LAST USED AT"
 msgstr ""
 
@@ -724,7 +724,7 @@ msgstr ""
 msgid "Must supply container name for: "
 msgstr ""
 
-#: lxc/cluster.go:181 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
+#: lxc/cluster.go:181 lxc/list.go:469 lxc/network.go:522 lxc/profile.go:573
 #: lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
 msgid "NAME"
 msgstr ""
@@ -733,6 +733,10 @@ msgstr ""
 msgid "NO"
 msgstr ""
 
+#: lxc/list.go:489
+msgid "NODE"
+msgstr ""
+
 #: lxc/info.go:117
 #, c-format
 msgid "Name: %s"
@@ -829,15 +833,15 @@ msgstr ""
 msgid "Override the terminal mode (auto, interactive or non-interactive)"
 msgstr ""
 
-#: lxc/list.go:621
+#: lxc/list.go:637
 msgid "PERSISTENT"
 msgstr ""
 
-#: lxc/list.go:466
+#: lxc/list.go:470
 msgid "PID"
 msgstr ""
 
-#: lxc/list.go:467
+#: lxc/list.go:471
 msgid "PROFILES"
 msgstr ""
 
@@ -1017,7 +1021,7 @@ msgstr ""
 msgid "SIZE"
 msgstr ""
 
-#: lxc/list.go:468
+#: lxc/list.go:472
 msgid "SNAPSHOTS"
 msgstr ""
 
@@ -1025,7 +1029,7 @@ msgstr ""
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/cluster.go:184 lxc/list.go:469
+#: lxc/cluster.go:184 lxc/list.go:473
 msgid "STATE"
 msgstr ""
 
@@ -1037,7 +1041,7 @@ msgstr ""
 msgid "STATUS"
 msgstr ""
 
-#: lxc/list.go:471
+#: lxc/list.go:475
 msgid "STORAGE POOL"
 msgstr ""
 
@@ -1178,7 +1182,7 @@ msgstr ""
 msgid "Swap (peak)"
 msgstr ""
 
-#: lxc/list.go:470 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:791
+#: lxc/list.go:474 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:791
 msgid "TYPE"
 msgstr ""
 
@@ -1757,6 +1761,8 @@ msgid ""
 "\n"
 "\tt - Type (persistent or ephemeral)\n"
 "\n"
+"\tN - Node hosting the container\n"
+"\n"
 "Custom columns are defined with \"key[:name][:maxWidth]\":\n"
 "\n"
 "\tKEY: The (extended) config key to display\n"
diff --git a/po/ru.po b/po/ru.po
index 4cbba4cc1..9210abcba 100644
--- a/po/ru.po
+++ b/po/ru.po
@@ -269,7 +269,7 @@ msgstr "ПСЕВДОНИМ"
 msgid "ARCH"
 msgstr "ARCH"
 
-#: lxc/list.go:461
+#: lxc/list.go:465
 msgid "ARCHITECTURE"
 msgstr "АРХИТЕКТУРА"
 
@@ -344,7 +344,7 @@ msgstr " Использование ЦП:"
 msgid "CREATED"
 msgstr "СОЗДАН"
 
-#: lxc/list.go:462
+#: lxc/list.go:466
 msgid "CREATED AT"
 msgstr "СОЗДАН"
 
@@ -389,7 +389,7 @@ msgstr ""
 msgid "Client certificate stored at server: "
 msgstr "Сертификат клиента хранится на сервере: "
 
-#: lxc/image.go:174 lxc/image.go:175 lxc/list.go:128 lxc/list.go:129
+#: lxc/image.go:174 lxc/image.go:175 lxc/list.go:132 lxc/list.go:133
 msgid "Columns"
 msgstr "Столбцы"
 
@@ -472,7 +472,7 @@ msgstr ""
 msgid "DATABASE"
 msgstr ""
 
-#: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:463 lxc/network.go:525
+#: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:467 lxc/network.go:525
 #: lxc/storage.go:682 lxc/storage.go:793
 msgid "DESCRIPTION"
 msgstr ""
@@ -517,7 +517,7 @@ msgstr ""
 msgid "Disk usage:"
 msgstr " Использование диска:"
 
-#: lxc/list.go:619
+#: lxc/list.go:635
 msgid "EPHEMERAL"
 msgstr ""
 
@@ -605,7 +605,7 @@ msgstr ""
 msgid "Failed to walk path for %s: %s"
 msgstr ""
 
-#: lxc/list.go:131
+#: lxc/list.go:135
 msgid "Fast mode (same as --columns=nsacPt)"
 msgstr ""
 
@@ -638,7 +638,7 @@ msgstr ""
 msgid "Force using the local unix socket"
 msgstr ""
 
-#: lxc/image.go:180 lxc/list.go:130
+#: lxc/image.go:180 lxc/list.go:134
 msgid "Format (csv|json|table|yaml)"
 msgstr ""
 
@@ -650,11 +650,11 @@ msgstr ""
 msgid "ID"
 msgstr ""
 
-#: lxc/list.go:459
+#: lxc/list.go:463
 msgid "IPV4"
 msgstr ""
 
-#: lxc/list.go:460
+#: lxc/list.go:464
 msgid "IPV6"
 msgstr ""
 
@@ -744,7 +744,7 @@ msgstr ""
 msgid "Keep the image up to date after initial copy"
 msgstr ""
 
-#: lxc/list.go:464
+#: lxc/list.go:468
 msgid "LAST USED AT"
 msgstr ""
 
@@ -814,7 +814,7 @@ msgstr ""
 msgid "Must supply container name for: "
 msgstr ""
 
-#: lxc/cluster.go:181 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
+#: lxc/cluster.go:181 lxc/list.go:469 lxc/network.go:522 lxc/profile.go:573
 #: lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
 msgid "NAME"
 msgstr ""
@@ -823,6 +823,10 @@ msgstr ""
 msgid "NO"
 msgstr ""
 
+#: lxc/list.go:489
+msgid "NODE"
+msgstr ""
+
 #: lxc/info.go:117
 #, c-format
 msgid "Name: %s"
@@ -920,15 +924,15 @@ msgstr ""
 msgid "Override the terminal mode (auto, interactive or non-interactive)"
 msgstr ""
 
-#: lxc/list.go:621
+#: lxc/list.go:637
 msgid "PERSISTENT"
 msgstr ""
 
-#: lxc/list.go:466
+#: lxc/list.go:470
 msgid "PID"
 msgstr ""
 
-#: lxc/list.go:467
+#: lxc/list.go:471
 msgid "PROFILES"
 msgstr ""
 
@@ -1108,7 +1112,7 @@ msgstr ""
 msgid "SIZE"
 msgstr ""
 
-#: lxc/list.go:468
+#: lxc/list.go:472
 msgid "SNAPSHOTS"
 msgstr ""
 
@@ -1116,7 +1120,7 @@ msgstr ""
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/cluster.go:184 lxc/list.go:469
+#: lxc/cluster.go:184 lxc/list.go:473
 msgid "STATE"
 msgstr ""
 
@@ -1128,7 +1132,7 @@ msgstr ""
 msgid "STATUS"
 msgstr ""
 
-#: lxc/list.go:471
+#: lxc/list.go:475
 msgid "STORAGE POOL"
 msgstr ""
 
@@ -1269,7 +1273,7 @@ msgstr ""
 msgid "Swap (peak)"
 msgstr ""
 
-#: lxc/list.go:470 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:791
+#: lxc/list.go:474 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:791
 msgid "TYPE"
 msgstr ""
 
@@ -1856,6 +1860,8 @@ msgid ""
 "\n"
 "\tt - Type (persistent or ephemeral)\n"
 "\n"
+"\tN - Node hosting the container\n"
+"\n"
 "Custom columns are defined with \"key[:name][:maxWidth]\":\n"
 "\n"
 "\tKEY: The (extended) config key to display\n"
diff --git a/po/sr.po b/po/sr.po
index fad733349..bb124ee16 100644
--- a/po/sr.po
+++ b/po/sr.po
@@ -183,7 +183,7 @@ msgstr ""
 msgid "ARCH"
 msgstr ""
 
-#: lxc/list.go:461
+#: lxc/list.go:465
 msgid "ARCHITECTURE"
 msgstr ""
 
@@ -256,7 +256,7 @@ msgstr ""
 msgid "CREATED"
 msgstr ""
 
-#: lxc/list.go:462
+#: lxc/list.go:466
 msgid "CREATED AT"
 msgstr ""
 
@@ -301,7 +301,7 @@ msgstr ""
 msgid "Client certificate stored at server: "
 msgstr ""
 
-#: lxc/image.go:174 lxc/image.go:175 lxc/list.go:128 lxc/list.go:129
+#: lxc/image.go:174 lxc/image.go:175 lxc/list.go:132 lxc/list.go:133
 msgid "Columns"
 msgstr ""
 
@@ -384,7 +384,7 @@ msgstr ""
 msgid "DATABASE"
 msgstr ""
 
-#: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:463 lxc/network.go:525
+#: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:467 lxc/network.go:525
 #: lxc/storage.go:682 lxc/storage.go:793
 msgid "DESCRIPTION"
 msgstr ""
@@ -428,7 +428,7 @@ msgstr ""
 msgid "Disk usage:"
 msgstr ""
 
-#: lxc/list.go:619
+#: lxc/list.go:635
 msgid "EPHEMERAL"
 msgstr ""
 
@@ -516,7 +516,7 @@ msgstr ""
 msgid "Failed to walk path for %s: %s"
 msgstr ""
 
-#: lxc/list.go:131
+#: lxc/list.go:135
 msgid "Fast mode (same as --columns=nsacPt)"
 msgstr ""
 
@@ -549,7 +549,7 @@ msgstr ""
 msgid "Force using the local unix socket"
 msgstr ""
 
-#: lxc/image.go:180 lxc/list.go:130
+#: lxc/image.go:180 lxc/list.go:134
 msgid "Format (csv|json|table|yaml)"
 msgstr ""
 
@@ -561,11 +561,11 @@ msgstr ""
 msgid "ID"
 msgstr ""
 
-#: lxc/list.go:459
+#: lxc/list.go:463
 msgid "IPV4"
 msgstr ""
 
-#: lxc/list.go:460
+#: lxc/list.go:464
 msgid "IPV6"
 msgstr ""
 
@@ -655,7 +655,7 @@ msgstr ""
 msgid "Keep the image up to date after initial copy"
 msgstr ""
 
-#: lxc/list.go:464
+#: lxc/list.go:468
 msgid "LAST USED AT"
 msgstr ""
 
@@ -724,7 +724,7 @@ msgstr ""
 msgid "Must supply container name for: "
 msgstr ""
 
-#: lxc/cluster.go:181 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
+#: lxc/cluster.go:181 lxc/list.go:469 lxc/network.go:522 lxc/profile.go:573
 #: lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
 msgid "NAME"
 msgstr ""
@@ -733,6 +733,10 @@ msgstr ""
 msgid "NO"
 msgstr ""
 
+#: lxc/list.go:489
+msgid "NODE"
+msgstr ""
+
 #: lxc/info.go:117
 #, c-format
 msgid "Name: %s"
@@ -829,15 +833,15 @@ msgstr ""
 msgid "Override the terminal mode (auto, interactive or non-interactive)"
 msgstr ""
 
-#: lxc/list.go:621
+#: lxc/list.go:637
 msgid "PERSISTENT"
 msgstr ""
 
-#: lxc/list.go:466
+#: lxc/list.go:470
 msgid "PID"
 msgstr ""
 
-#: lxc/list.go:467
+#: lxc/list.go:471
 msgid "PROFILES"
 msgstr ""
 
@@ -1017,7 +1021,7 @@ msgstr ""
 msgid "SIZE"
 msgstr ""
 
-#: lxc/list.go:468
+#: lxc/list.go:472
 msgid "SNAPSHOTS"
 msgstr ""
 
@@ -1025,7 +1029,7 @@ msgstr ""
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/cluster.go:184 lxc/list.go:469
+#: lxc/cluster.go:184 lxc/list.go:473
 msgid "STATE"
 msgstr ""
 
@@ -1037,7 +1041,7 @@ msgstr ""
 msgid "STATUS"
 msgstr ""
 
-#: lxc/list.go:471
+#: lxc/list.go:475
 msgid "STORAGE POOL"
 msgstr ""
 
@@ -1178,7 +1182,7 @@ msgstr ""
 msgid "Swap (peak)"
 msgstr ""
 
-#: lxc/list.go:470 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:791
+#: lxc/list.go:474 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:791
 msgid "TYPE"
 msgstr ""
 
@@ -1757,6 +1761,8 @@ msgid ""
 "\n"
 "\tt - Type (persistent or ephemeral)\n"
 "\n"
+"\tN - Node hosting the container\n"
+"\n"
 "Custom columns are defined with \"key[:name][:maxWidth]\":\n"
 "\n"
 "\tKEY: The (extended) config key to display\n"
diff --git a/po/sv.po b/po/sv.po
index 672ac6bea..81b4289d2 100644
--- a/po/sv.po
+++ b/po/sv.po
@@ -183,7 +183,7 @@ msgstr ""
 msgid "ARCH"
 msgstr ""
 
-#: lxc/list.go:461
+#: lxc/list.go:465
 msgid "ARCHITECTURE"
 msgstr ""
 
@@ -256,7 +256,7 @@ msgstr ""
 msgid "CREATED"
 msgstr ""
 
-#: lxc/list.go:462
+#: lxc/list.go:466
 msgid "CREATED AT"
 msgstr ""
 
@@ -301,7 +301,7 @@ msgstr ""
 msgid "Client certificate stored at server: "
 msgstr ""
 
-#: lxc/image.go:174 lxc/image.go:175 lxc/list.go:128 lxc/list.go:129
+#: lxc/image.go:174 lxc/image.go:175 lxc/list.go:132 lxc/list.go:133
 msgid "Columns"
 msgstr ""
 
@@ -384,7 +384,7 @@ msgstr ""
 msgid "DATABASE"
 msgstr ""
 
-#: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:463 lxc/network.go:525
+#: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:467 lxc/network.go:525
 #: lxc/storage.go:682 lxc/storage.go:793
 msgid "DESCRIPTION"
 msgstr ""
@@ -428,7 +428,7 @@ msgstr ""
 msgid "Disk usage:"
 msgstr ""
 
-#: lxc/list.go:619
+#: lxc/list.go:635
 msgid "EPHEMERAL"
 msgstr ""
 
@@ -516,7 +516,7 @@ msgstr ""
 msgid "Failed to walk path for %s: %s"
 msgstr ""
 
-#: lxc/list.go:131
+#: lxc/list.go:135
 msgid "Fast mode (same as --columns=nsacPt)"
 msgstr ""
 
@@ -549,7 +549,7 @@ msgstr ""
 msgid "Force using the local unix socket"
 msgstr ""
 
-#: lxc/image.go:180 lxc/list.go:130
+#: lxc/image.go:180 lxc/list.go:134
 msgid "Format (csv|json|table|yaml)"
 msgstr ""
 
@@ -561,11 +561,11 @@ msgstr ""
 msgid "ID"
 msgstr ""
 
-#: lxc/list.go:459
+#: lxc/list.go:463
 msgid "IPV4"
 msgstr ""
 
-#: lxc/list.go:460
+#: lxc/list.go:464
 msgid "IPV6"
 msgstr ""
 
@@ -655,7 +655,7 @@ msgstr ""
 msgid "Keep the image up to date after initial copy"
 msgstr ""
 
-#: lxc/list.go:464
+#: lxc/list.go:468
 msgid "LAST USED AT"
 msgstr ""
 
@@ -724,7 +724,7 @@ msgstr ""
 msgid "Must supply container name for: "
 msgstr ""
 
-#: lxc/cluster.go:181 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
+#: lxc/cluster.go:181 lxc/list.go:469 lxc/network.go:522 lxc/profile.go:573
 #: lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
 msgid "NAME"
 msgstr ""
@@ -733,6 +733,10 @@ msgstr ""
 msgid "NO"
 msgstr ""
 
+#: lxc/list.go:489
+msgid "NODE"
+msgstr ""
+
 #: lxc/info.go:117
 #, c-format
 msgid "Name: %s"
@@ -829,15 +833,15 @@ msgstr ""
 msgid "Override the terminal mode (auto, interactive or non-interactive)"
 msgstr ""
 
-#: lxc/list.go:621
+#: lxc/list.go:637
 msgid "PERSISTENT"
 msgstr ""
 
-#: lxc/list.go:466
+#: lxc/list.go:470
 msgid "PID"
 msgstr ""
 
-#: lxc/list.go:467
+#: lxc/list.go:471
 msgid "PROFILES"
 msgstr ""
 
@@ -1017,7 +1021,7 @@ msgstr ""
 msgid "SIZE"
 msgstr ""
 
-#: lxc/list.go:468
+#: lxc/list.go:472
 msgid "SNAPSHOTS"
 msgstr ""
 
@@ -1025,7 +1029,7 @@ msgstr ""
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/cluster.go:184 lxc/list.go:469
+#: lxc/cluster.go:184 lxc/list.go:473
 msgid "STATE"
 msgstr ""
 
@@ -1037,7 +1041,7 @@ msgstr ""
 msgid "STATUS"
 msgstr ""
 
-#: lxc/list.go:471
+#: lxc/list.go:475
 msgid "STORAGE POOL"
 msgstr ""
 
@@ -1178,7 +1182,7 @@ msgstr ""
 msgid "Swap (peak)"
 msgstr ""
 
-#: lxc/list.go:470 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:791
+#: lxc/list.go:474 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:791
 msgid "TYPE"
 msgstr ""
 
@@ -1757,6 +1761,8 @@ msgid ""
 "\n"
 "\tt - Type (persistent or ephemeral)\n"
 "\n"
+"\tN - Node hosting the container\n"
+"\n"
 "Custom columns are defined with \"key[:name][:maxWidth]\":\n"
 "\n"
 "\tKEY: The (extended) config key to display\n"
diff --git a/po/tr.po b/po/tr.po
index 267e5b7fd..dda48eabc 100644
--- a/po/tr.po
+++ b/po/tr.po
@@ -183,7 +183,7 @@ msgstr ""
 msgid "ARCH"
 msgstr ""
 
-#: lxc/list.go:461
+#: lxc/list.go:465
 msgid "ARCHITECTURE"
 msgstr ""
 
@@ -256,7 +256,7 @@ msgstr ""
 msgid "CREATED"
 msgstr ""
 
-#: lxc/list.go:462
+#: lxc/list.go:466
 msgid "CREATED AT"
 msgstr ""
 
@@ -301,7 +301,7 @@ msgstr ""
 msgid "Client certificate stored at server: "
 msgstr ""
 
-#: lxc/image.go:174 lxc/image.go:175 lxc/list.go:128 lxc/list.go:129
+#: lxc/image.go:174 lxc/image.go:175 lxc/list.go:132 lxc/list.go:133
 msgid "Columns"
 msgstr ""
 
@@ -384,7 +384,7 @@ msgstr ""
 msgid "DATABASE"
 msgstr ""
 
-#: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:463 lxc/network.go:525
+#: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:467 lxc/network.go:525
 #: lxc/storage.go:682 lxc/storage.go:793
 msgid "DESCRIPTION"
 msgstr ""
@@ -428,7 +428,7 @@ msgstr ""
 msgid "Disk usage:"
 msgstr ""
 
-#: lxc/list.go:619
+#: lxc/list.go:635
 msgid "EPHEMERAL"
 msgstr ""
 
@@ -516,7 +516,7 @@ msgstr ""
 msgid "Failed to walk path for %s: %s"
 msgstr ""
 
-#: lxc/list.go:131
+#: lxc/list.go:135
 msgid "Fast mode (same as --columns=nsacPt)"
 msgstr ""
 
@@ -549,7 +549,7 @@ msgstr ""
 msgid "Force using the local unix socket"
 msgstr ""
 
-#: lxc/image.go:180 lxc/list.go:130
+#: lxc/image.go:180 lxc/list.go:134
 msgid "Format (csv|json|table|yaml)"
 msgstr ""
 
@@ -561,11 +561,11 @@ msgstr ""
 msgid "ID"
 msgstr ""
 
-#: lxc/list.go:459
+#: lxc/list.go:463
 msgid "IPV4"
 msgstr ""
 
-#: lxc/list.go:460
+#: lxc/list.go:464
 msgid "IPV6"
 msgstr ""
 
@@ -655,7 +655,7 @@ msgstr ""
 msgid "Keep the image up to date after initial copy"
 msgstr ""
 
-#: lxc/list.go:464
+#: lxc/list.go:468
 msgid "LAST USED AT"
 msgstr ""
 
@@ -724,7 +724,7 @@ msgstr ""
 msgid "Must supply container name for: "
 msgstr ""
 
-#: lxc/cluster.go:181 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
+#: lxc/cluster.go:181 lxc/list.go:469 lxc/network.go:522 lxc/profile.go:573
 #: lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
 msgid "NAME"
 msgstr ""
@@ -733,6 +733,10 @@ msgstr ""
 msgid "NO"
 msgstr ""
 
+#: lxc/list.go:489
+msgid "NODE"
+msgstr ""
+
 #: lxc/info.go:117
 #, c-format
 msgid "Name: %s"
@@ -829,15 +833,15 @@ msgstr ""
 msgid "Override the terminal mode (auto, interactive or non-interactive)"
 msgstr ""
 
-#: lxc/list.go:621
+#: lxc/list.go:637
 msgid "PERSISTENT"
 msgstr ""
 
-#: lxc/list.go:466
+#: lxc/list.go:470
 msgid "PID"
 msgstr ""
 
-#: lxc/list.go:467
+#: lxc/list.go:471
 msgid "PROFILES"
 msgstr ""
 
@@ -1017,7 +1021,7 @@ msgstr ""
 msgid "SIZE"
 msgstr ""
 
-#: lxc/list.go:468
+#: lxc/list.go:472
 msgid "SNAPSHOTS"
 msgstr ""
 
@@ -1025,7 +1029,7 @@ msgstr ""
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/cluster.go:184 lxc/list.go:469
+#: lxc/cluster.go:184 lxc/list.go:473
 msgid "STATE"
 msgstr ""
 
@@ -1037,7 +1041,7 @@ msgstr ""
 msgid "STATUS"
 msgstr ""
 
-#: lxc/list.go:471
+#: lxc/list.go:475
 msgid "STORAGE POOL"
 msgstr ""
 
@@ -1178,7 +1182,7 @@ msgstr ""
 msgid "Swap (peak)"
 msgstr ""
 
-#: lxc/list.go:470 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:791
+#: lxc/list.go:474 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:791
 msgid "TYPE"
 msgstr ""
 
@@ -1757,6 +1761,8 @@ msgid ""
 "\n"
 "\tt - Type (persistent or ephemeral)\n"
 "\n"
+"\tN - Node hosting the container\n"
+"\n"
 "Custom columns are defined with \"key[:name][:maxWidth]\":\n"
 "\n"
 "\tKEY: The (extended) config key to display\n"
diff --git a/po/zh.po b/po/zh.po
index a767ef35e..19c6b27f8 100644
--- a/po/zh.po
+++ b/po/zh.po
@@ -183,7 +183,7 @@ msgstr ""
 msgid "ARCH"
 msgstr ""
 
-#: lxc/list.go:461
+#: lxc/list.go:465
 msgid "ARCHITECTURE"
 msgstr ""
 
@@ -256,7 +256,7 @@ msgstr ""
 msgid "CREATED"
 msgstr ""
 
-#: lxc/list.go:462
+#: lxc/list.go:466
 msgid "CREATED AT"
 msgstr ""
 
@@ -301,7 +301,7 @@ msgstr ""
 msgid "Client certificate stored at server: "
 msgstr ""
 
-#: lxc/image.go:174 lxc/image.go:175 lxc/list.go:128 lxc/list.go:129
+#: lxc/image.go:174 lxc/image.go:175 lxc/list.go:132 lxc/list.go:133
 msgid "Columns"
 msgstr ""
 
@@ -384,7 +384,7 @@ msgstr ""
 msgid "DATABASE"
 msgstr ""
 
-#: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:463 lxc/network.go:525
+#: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:467 lxc/network.go:525
 #: lxc/storage.go:682 lxc/storage.go:793
 msgid "DESCRIPTION"
 msgstr ""
@@ -428,7 +428,7 @@ msgstr ""
 msgid "Disk usage:"
 msgstr ""
 
-#: lxc/list.go:619
+#: lxc/list.go:635
 msgid "EPHEMERAL"
 msgstr ""
 
@@ -516,7 +516,7 @@ msgstr ""
 msgid "Failed to walk path for %s: %s"
 msgstr ""
 
-#: lxc/list.go:131
+#: lxc/list.go:135
 msgid "Fast mode (same as --columns=nsacPt)"
 msgstr ""
 
@@ -549,7 +549,7 @@ msgstr ""
 msgid "Force using the local unix socket"
 msgstr ""
 
-#: lxc/image.go:180 lxc/list.go:130
+#: lxc/image.go:180 lxc/list.go:134
 msgid "Format (csv|json|table|yaml)"
 msgstr ""
 
@@ -561,11 +561,11 @@ msgstr ""
 msgid "ID"
 msgstr ""
 
-#: lxc/list.go:459
+#: lxc/list.go:463
 msgid "IPV4"
 msgstr ""
 
-#: lxc/list.go:460
+#: lxc/list.go:464
 msgid "IPV6"
 msgstr ""
 
@@ -655,7 +655,7 @@ msgstr ""
 msgid "Keep the image up to date after initial copy"
 msgstr ""
 
-#: lxc/list.go:464
+#: lxc/list.go:468
 msgid "LAST USED AT"
 msgstr ""
 
@@ -724,7 +724,7 @@ msgstr ""
 msgid "Must supply container name for: "
 msgstr ""
 
-#: lxc/cluster.go:181 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
+#: lxc/cluster.go:181 lxc/list.go:469 lxc/network.go:522 lxc/profile.go:573
 #: lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
 msgid "NAME"
 msgstr ""
@@ -733,6 +733,10 @@ msgstr ""
 msgid "NO"
 msgstr ""
 
+#: lxc/list.go:489
+msgid "NODE"
+msgstr ""
+
 #: lxc/info.go:117
 #, c-format
 msgid "Name: %s"
@@ -829,15 +833,15 @@ msgstr ""
 msgid "Override the terminal mode (auto, interactive or non-interactive)"
 msgstr ""
 
-#: lxc/list.go:621
+#: lxc/list.go:637
 msgid "PERSISTENT"
 msgstr ""
 
-#: lxc/list.go:466
+#: lxc/list.go:470
 msgid "PID"
 msgstr ""
 
-#: lxc/list.go:467
+#: lxc/list.go:471
 msgid "PROFILES"
 msgstr ""
 
@@ -1017,7 +1021,7 @@ msgstr ""
 msgid "SIZE"
 msgstr ""
 
-#: lxc/list.go:468
+#: lxc/list.go:472
 msgid "SNAPSHOTS"
 msgstr ""
 
@@ -1025,7 +1029,7 @@ msgstr ""
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/cluster.go:184 lxc/list.go:469
+#: lxc/cluster.go:184 lxc/list.go:473
 msgid "STATE"
 msgstr ""
 
@@ -1037,7 +1041,7 @@ msgstr ""
 msgid "STATUS"
 msgstr ""
 
-#: lxc/list.go:471
+#: lxc/list.go:475
 msgid "STORAGE POOL"
 msgstr ""
 
@@ -1178,7 +1182,7 @@ msgstr ""
 msgid "Swap (peak)"
 msgstr ""
 
-#: lxc/list.go:470 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:791
+#: lxc/list.go:474 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:791
 msgid "TYPE"
 msgstr ""
 
@@ -1757,6 +1761,8 @@ msgid ""
 "\n"
 "\tt - Type (persistent or ephemeral)\n"
 "\n"
+"\tN - Node hosting the container\n"
+"\n"
 "Custom columns are defined with \"key[:name][:maxWidth]\":\n"
 "\n"
 "\tKEY: The (extended) config key to display\n"
diff --git a/po/zh_Hans.po b/po/zh_Hans.po
index 017650fa7..86f400bb0 100644
--- a/po/zh_Hans.po
+++ b/po/zh_Hans.po
@@ -183,7 +183,7 @@ msgstr ""
 msgid "ARCH"
 msgstr ""
 
-#: lxc/list.go:461
+#: lxc/list.go:465
 msgid "ARCHITECTURE"
 msgstr ""
 
@@ -256,7 +256,7 @@ msgstr ""
 msgid "CREATED"
 msgstr ""
 
-#: lxc/list.go:462
+#: lxc/list.go:466
 msgid "CREATED AT"
 msgstr ""
 
@@ -301,7 +301,7 @@ msgstr ""
 msgid "Client certificate stored at server: "
 msgstr ""
 
-#: lxc/image.go:174 lxc/image.go:175 lxc/list.go:128 lxc/list.go:129
+#: lxc/image.go:174 lxc/image.go:175 lxc/list.go:132 lxc/list.go:133
 msgid "Columns"
 msgstr ""
 
@@ -384,7 +384,7 @@ msgstr ""
 msgid "DATABASE"
 msgstr ""
 
-#: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:463 lxc/network.go:525
+#: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:467 lxc/network.go:525
 #: lxc/storage.go:682 lxc/storage.go:793
 msgid "DESCRIPTION"
 msgstr ""
@@ -428,7 +428,7 @@ msgstr ""
 msgid "Disk usage:"
 msgstr ""
 
-#: lxc/list.go:619
+#: lxc/list.go:635
 msgid "EPHEMERAL"
 msgstr ""
 
@@ -516,7 +516,7 @@ msgstr ""
 msgid "Failed to walk path for %s: %s"
 msgstr ""
 
-#: lxc/list.go:131
+#: lxc/list.go:135
 msgid "Fast mode (same as --columns=nsacPt)"
 msgstr ""
 
@@ -549,7 +549,7 @@ msgstr ""
 msgid "Force using the local unix socket"
 msgstr ""
 
-#: lxc/image.go:180 lxc/list.go:130
+#: lxc/image.go:180 lxc/list.go:134
 msgid "Format (csv|json|table|yaml)"
 msgstr ""
 
@@ -561,11 +561,11 @@ msgstr ""
 msgid "ID"
 msgstr ""
 
-#: lxc/list.go:459
+#: lxc/list.go:463
 msgid "IPV4"
 msgstr ""
 
-#: lxc/list.go:460
+#: lxc/list.go:464
 msgid "IPV6"
 msgstr ""
 
@@ -655,7 +655,7 @@ msgstr ""
 msgid "Keep the image up to date after initial copy"
 msgstr ""
 
-#: lxc/list.go:464
+#: lxc/list.go:468
 msgid "LAST USED AT"
 msgstr ""
 
@@ -724,7 +724,7 @@ msgstr ""
 msgid "Must supply container name for: "
 msgstr ""
 
-#: lxc/cluster.go:181 lxc/list.go:465 lxc/network.go:522 lxc/profile.go:573
+#: lxc/cluster.go:181 lxc/list.go:469 lxc/network.go:522 lxc/profile.go:573
 #: lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
 msgid "NAME"
 msgstr ""
@@ -733,6 +733,10 @@ msgstr ""
 msgid "NO"
 msgstr ""
 
+#: lxc/list.go:489
+msgid "NODE"
+msgstr ""
+
 #: lxc/info.go:117
 #, c-format
 msgid "Name: %s"
@@ -829,15 +833,15 @@ msgstr ""
 msgid "Override the terminal mode (auto, interactive or non-interactive)"
 msgstr ""
 
-#: lxc/list.go:621
+#: lxc/list.go:637
 msgid "PERSISTENT"
 msgstr ""
 
-#: lxc/list.go:466
+#: lxc/list.go:470
 msgid "PID"
 msgstr ""
 
-#: lxc/list.go:467
+#: lxc/list.go:471
 msgid "PROFILES"
 msgstr ""
 
@@ -1017,7 +1021,7 @@ msgstr ""
 msgid "SIZE"
 msgstr ""
 
-#: lxc/list.go:468
+#: lxc/list.go:472
 msgid "SNAPSHOTS"
 msgstr ""
 
@@ -1025,7 +1029,7 @@ msgstr ""
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/cluster.go:184 lxc/list.go:469
+#: lxc/cluster.go:184 lxc/list.go:473
 msgid "STATE"
 msgstr ""
 
@@ -1037,7 +1041,7 @@ msgstr ""
 msgid "STATUS"
 msgstr ""
 
-#: lxc/list.go:471
+#: lxc/list.go:475
 msgid "STORAGE POOL"
 msgstr ""
 
@@ -1178,7 +1182,7 @@ msgstr ""
 msgid "Swap (peak)"
 msgstr ""
 
-#: lxc/list.go:470 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:791
+#: lxc/list.go:474 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:791
 msgid "TYPE"
 msgstr ""
 
@@ -1757,6 +1761,8 @@ msgid ""
 "\n"
 "\tt - Type (persistent or ephemeral)\n"
 "\n"
+"\tN - Node hosting the container\n"
+"\n"
 "Custom columns are defined with \"key[:name][:maxWidth]\":\n"
 "\n"
 "\tKEY: The (extended) config key to display\n"
diff --git a/test/main.sh b/test/main.sh
index 0fe316193..748a40502 100755
--- a/test/main.sh
+++ b/test/main.sh
@@ -193,7 +193,8 @@ run_test test_resources "resources"
 run_test test_kernel_limits "kernel limits"
 run_test test_macaroon_auth "macaroon authentication"
 run_test test_console "console"
-run_test test_clustering "clustering"
+run_test test_clustering_membership "clustering"
+run_test test_clustering_containers "clustering"
 
 # shellcheck disable=SC2034
 TEST_RESULT=success
diff --git a/test/suites/clustering.sh b/test/suites/clustering.sh
index bab1576d2..8b39b5dd3 100644
--- a/test/suites/clustering.sh
+++ b/test/suites/clustering.sh
@@ -1,4 +1,4 @@
-test_clustering() {
+test_clustering_membership() {
   setup_clustering_bridge
   prefix="lxd$$"
   bridge="${prefix}"
@@ -134,6 +134,7 @@ test_clustering_containers() {
 
   # The container is visible through both nodes
   LXD_DIR="${LXD_ONE_DIR}" lxc list | grep foo | grep -q STOPPED
+  LXD_DIR="${LXD_ONE_DIR}" lxc list | grep foo | grep -q node2
   LXD_DIR="${LXD_TWO_DIR}" lxc list | grep foo | grep -q STOPPED
 
   # A Node: field indicates on which node the container is running

From bbf536c267c7b486fa57d1ca546f9867025b179d Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Mon, 4 Dec 2017 09:27:00 +0000
Subject: [PATCH 101/116] Fixed clustering integration tests

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 test/main.sh              | 4 ++--
 test/suites/clustering.sh | 6 ++++++
 2 files changed, 8 insertions(+), 2 deletions(-)

diff --git a/test/main.sh b/test/main.sh
index 748a40502..364ff0339 100755
--- a/test/main.sh
+++ b/test/main.sh
@@ -193,8 +193,8 @@ run_test test_resources "resources"
 run_test test_kernel_limits "kernel limits"
 run_test test_macaroon_auth "macaroon authentication"
 run_test test_console "console"
-run_test test_clustering_membership "clustering"
-run_test test_clustering_containers "clustering"
+run_test test_clustering_membership "clustering membership"
+run_test test_clustering_containers "clustering containers"
 
 # shellcheck disable=SC2034
 TEST_RESULT=success
diff --git a/test/suites/clustering.sh b/test/suites/clustering.sh
index 8b39b5dd3..5c90dc980 100644
--- a/test/suites/clustering.sh
+++ b/test/suites/clustering.sh
@@ -98,6 +98,9 @@ test_clustering_membership() {
   rm -f "${LXD_THREE_DIR}/unix.socket"
   rm -f "${LXD_TWO_DIR}/unix.socket"
   rm -f "${LXD_ONE_DIR}/unix.socket"
+
+  teardown_clustering_netns
+  teardown_clustering_bridge
 }
 
 test_clustering_containers() {
@@ -160,4 +163,7 @@ test_clustering_containers() {
   rm -f "${LXD_THREE_DIR}/unix.socket"
   rm -f "${LXD_TWO_DIR}/unix.socket"
   rm -f "${LXD_ONE_DIR}/unix.socket"
+
+  teardown_clustering_netns
+  teardown_clustering_bridge
 }

From 96ded5f5a574b8825a4bff2dcb1e08f5f7fe56f8 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Tue, 5 Dec 2017 11:59:36 +0000
Subject: [PATCH 102/116] Add storage_pools_nodes table and storage_pools.state
 column

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/db/cluster/schema.go      | 11 ++++++++++-
 lxd/db/cluster/update.go      | 18 ++++++++++++++++++
 lxd/db/cluster/update_test.go | 19 +++++++++++++++++++
 lxd/db/storage_pools.go       | 16 +++++++++++++++-
 test/includes/lxd.sh          |  1 +
 5 files changed, 63 insertions(+), 2 deletions(-)

diff --git a/lxd/db/cluster/schema.go b/lxd/db/cluster/schema.go
index 222760113..d83efc798 100644
--- a/lxd/db/cluster/schema.go
+++ b/lxd/db/cluster/schema.go
@@ -184,6 +184,7 @@ CREATE TABLE storage_pools (
     name TEXT NOT NULL,
     driver TEXT NOT NULL,
     description TEXT,
+    state INTEGER NOT NULL DEFAULT 0,
     UNIQUE (name)
 );
 CREATE TABLE storage_pools_config (
@@ -196,6 +197,14 @@ CREATE TABLE storage_pools_config (
     FOREIGN KEY (storage_pool_id) REFERENCES storage_pools (id) ON DELETE CASCADE,
     FOREIGN KEY (node_id) REFERENCES nodes (id) ON DELETE CASCADE
 );
+CREATE TABLE storage_pools_nodes (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    storage_pool_id INTEGER NOT NULL,
+    node_id INTEGER NOT NULL,
+    UNIQUE (storage_pool_id, node_id),
+    FOREIGN KEY (storage_pool_id) REFERENCES storage_pools (id) ON DELETE CASCADE,
+    FOREIGN KEY (node_id) REFERENCES nodes (id) ON DELETE CASCADE
+);
 CREATE TABLE storage_volumes (
     id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
     name TEXT NOT NULL,
@@ -216,5 +225,5 @@ CREATE TABLE storage_volumes_config (
     FOREIGN KEY (node_id) REFERENCES nodes (id) ON DELETE CASCADE
 );
 
-INSERT INTO schema (version, updated_at) VALUES (3, strftime("%s"))
+INSERT INTO schema (version, updated_at) VALUES (4, strftime("%s"))
 `
diff --git a/lxd/db/cluster/update.go b/lxd/db/cluster/update.go
index 92c19aed0..bfd66506c 100644
--- a/lxd/db/cluster/update.go
+++ b/lxd/db/cluster/update.go
@@ -26,6 +26,24 @@ var updates = map[int]schema.Update{
 	1: updateFromV0,
 	2: updateFromV1,
 	3: updateFromV2,
+	4: updateFromV3,
+}
+
+func updateFromV3(tx *sql.Tx) error {
+	stmt := `
+CREATE TABLE storage_pools_nodes (
+    id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+    storage_pool_id INTEGER NOT NULL,
+    node_id INTEGER NOT NULL,
+    UNIQUE (storage_pool_id, node_id),
+    FOREIGN KEY (storage_pool_id) REFERENCES storage_pools (id) ON DELETE CASCADE,
+    FOREIGN KEY (node_id) REFERENCES nodes (id) ON DELETE CASCADE
+);
+ALTER TABLE storage_pools ADD COLUMN state INTEGER NOT NULL DEFAULT 0;
+UPDATE storage_pools SET state = 1;
+`
+	_, err := tx.Exec(stmt)
+	return err
 }
 
 func updateFromV2(tx *sql.Tx) error {
diff --git a/lxd/db/cluster/update_test.go b/lxd/db/cluster/update_test.go
index 646886505..d54ac340f 100644
--- a/lxd/db/cluster/update_test.go
+++ b/lxd/db/cluster/update_test.go
@@ -205,3 +205,22 @@ func TestUpdateFromV2(t *testing.T) {
 	require.NoError(t, err)
 	assert.Equal(t, int64(0), n)
 }
+
+func TestUpdateFromV3(t *testing.T) {
+	schema := cluster.Schema()
+	db, err := schema.ExerciseUpdate(4, nil)
+	require.NoError(t, err)
+
+	_, err = db.Exec("INSERT INTO nodes VALUES (1, 'c1', '', '1.1.1.1', 666, 999, ?)", time.Now())
+	require.NoError(t, err)
+
+	_, err = db.Exec("INSERT INTO storage_pools VALUES (1, 'p1', 'zfs', '', 0)")
+	require.NoError(t, err)
+
+	_, err = db.Exec("INSERT INTO storage_pools_nodes VALUES (1, 1, 1)")
+	require.NoError(t, err)
+
+	// Unique constraint on storage_pool_id/node_id
+	_, err = db.Exec("INSERT INTO storage_pools_nodes VALUES (1, 1, 1)")
+	require.Error(t, err)
+}
diff --git a/lxd/db/storage_pools.go b/lxd/db/storage_pools.go
index a32263e24..498c8685e 100644
--- a/lxd/db/storage_pools.go
+++ b/lxd/db/storage_pools.go
@@ -62,6 +62,12 @@ func (c *ClusterTx) StoragePoolConfigAdd(poolID, nodeID int64, config map[string
 	return storagePoolConfigAdd(c.tx, poolID, nodeID, config)
 }
 
+// Storage pools state.
+const (
+	storagePoolPending int = iota // Storage pool defined but not yet created.
+	storagePoolCreated            // Storage pool created on all nodes.
+)
+
 // Get all storage pools.
 func (c *Cluster) StoragePools() ([]string, error) {
 	var name string
@@ -191,7 +197,7 @@ func (c *Cluster) StoragePoolCreate(poolName string, poolDescription string, poo
 		return -1, err
 	}
 
-	result, err := tx.Exec("INSERT INTO storage_pools (name, description, driver) VALUES (?, ?, ?)", poolName, poolDescription, poolDriver)
+	result, err := tx.Exec("INSERT INTO storage_pools (name, description, driver, state) VALUES (?, ?, ?, ?)", poolName, poolDescription, poolDriver, storagePoolCreated)
 	if err != nil {
 		tx.Rollback()
 		return -1, err
@@ -203,6 +209,14 @@ func (c *Cluster) StoragePoolCreate(poolName string, poolDescription string, poo
 		return -1, err
 	}
 
+	// Insert a node-specific entry pointing to ourselves.
+	columns := []string{"storage_pool_id", "node_id"}
+	values := []interface{}{id, c.nodeID}
+	_, err = query.UpsertObject(tx, "storage_pools_nodes", columns, values)
+	if err != nil {
+		return -1, err
+	}
+
 	err = storagePoolConfigAdd(tx, id, c.nodeID, poolConfig)
 	if err != nil {
 		tx.Rollback()
diff --git a/test/includes/lxd.sh b/test/includes/lxd.sh
index ec82823ee..efebb728b 100644
--- a/test/includes/lxd.sh
+++ b/test/includes/lxd.sh
@@ -209,6 +209,7 @@ kill_lxd() {
         check_empty_table "${daemon_dir}/raft/db.bin" "profiles_devices"
         check_empty_table "${daemon_dir}/raft/db.bin" "profiles_devices_config"
         check_empty_table "${daemon_dir}/raft/db.bin" "storage_pools"
+        check_empty_table "${daemon_dir}/raft/db.bin" "storage_pools_nodes"
         check_empty_table "${daemon_dir}/raft/db.bin" "storage_pools_config"
         check_empty_table "${daemon_dir}/raft/db.bin" "storage_volumes"
         check_empty_table "${daemon_dir}/raft/db.bin" "storage_volumes_config"

From 91f2929c4ca22994a67c88802830535326276d73 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Tue, 5 Dec 2017 12:30:08 +0000
Subject: [PATCH 103/116] Add db.NodesCount to return the number of existing
 nodes

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/cluster/membership.go      | 12 ++++++++++++
 lxd/cluster/membership_test.go | 13 +++++++++++++
 lxd/db/node.go                 | 12 ++++++++++++
 lxd/db/node_test.go            | 16 ++++++++++++++++
 4 files changed, 53 insertions(+)

diff --git a/lxd/cluster/membership.go b/lxd/cluster/membership.go
index 68357c8ba..1b0166f7a 100644
--- a/lxd/cluster/membership.go
+++ b/lxd/cluster/membership.go
@@ -478,6 +478,18 @@ func List(state *state.State) ([]db.NodeInfo, map[int64]bool, error) {
 	return nodes, flags, nil
 }
 
+// Count is a convenience for checking the current number of nodes in the
+// cluster.
+func Count(state *state.State) (int, error) {
+	var count int
+	err := state.Cluster.Transaction(func(tx *db.ClusterTx) error {
+		var err error
+		count, err = tx.NodesCount()
+		return err
+	})
+	return count, err
+}
+
 // Check that node-related preconditions are met for bootstrapping or joining a
 // cluster.
 func membershipCheckNodeStateForBootstrapOrJoin(tx *db.NodeTx, address string) error {
diff --git a/lxd/cluster/membership_test.go b/lxd/cluster/membership_test.go
index b454e7824..f38f43c65 100644
--- a/lxd/cluster/membership_test.go
+++ b/lxd/cluster/membership_test.go
@@ -127,6 +127,10 @@ func TestBootstrap(t *testing.T) {
 	conn, err := driver.Open("test.db")
 	require.NoError(t, err)
 	require.NoError(t, conn.Close())
+
+	count, err := cluster.Count(state)
+	require.NoError(t, err)
+	assert.Equal(t, 1, count)
 }
 
 // If pre-conditions are not met, a descriptive error is returned.
@@ -306,6 +310,11 @@ func TestJoin(t *testing.T) {
 	assert.True(t, flags[1])
 	assert.True(t, flags[2])
 
+	// The Count function returns the number of nodes.
+	count, err := cluster.Count(state)
+	require.NoError(t, err)
+	assert.Equal(t, 2, count)
+
 	// Leave the cluster.
 	leaving, err := cluster.Leave(state, gateway, "rusp", false /* force */)
 	require.NoError(t, err)
@@ -325,6 +334,10 @@ func TestJoin(t *testing.T) {
 	future := raft.GetConfiguration()
 	require.NoError(t, future.Error())
 	assert.Len(t, future.Configuration().Servers, 1)
+
+	count, err = cluster.Count(state)
+	require.NoError(t, err)
+	assert.Equal(t, 1, count)
 }
 
 // Helper for setting fixtures for Bootstrap tests.
diff --git a/lxd/db/node.go b/lxd/db/node.go
index 2982bd884..172d79448 100644
--- a/lxd/db/node.go
+++ b/lxd/db/node.go
@@ -69,6 +69,18 @@ func (c *ClusterTx) Nodes() ([]NodeInfo, error) {
 	return c.nodes("")
 }
 
+// NodesCount returns the number of nodes in the LXD cluster.
+//
+// Since there's always at least one node row, even when not-clustered, the
+// return value is greater than zero
+func (c *ClusterTx) NodesCount() (int, error) {
+	count, err := query.Count(c.tx, "nodes", "")
+	if err != nil {
+		return 0, errors.Wrap(err, "failed to count existing nodes")
+	}
+	return count, nil
+}
+
 // NodeRename changes the name of an existing node.
 //
 // Return an error if a node with the same name already exists.
diff --git a/lxd/db/node_test.go b/lxd/db/node_test.go
index 84a6bceff..d95363746 100644
--- a/lxd/db/node_test.go
+++ b/lxd/db/node_test.go
@@ -37,6 +37,22 @@ func TestNodeAdd(t *testing.T) {
 	assert.Equal(t, "buzz", node.Name)
 }
 
+func TestNodesCount(t *testing.T) {
+	tx, cleanup := db.NewTestClusterTx(t)
+	defer cleanup()
+
+	count, err := tx.NodesCount()
+	require.NoError(t, err)
+	assert.Equal(t, 1, count) // There's always at least one node.
+
+	_, err = tx.NodeAdd("buzz", "1.2.3.4:666")
+	require.NoError(t, err)
+
+	count, err = tx.NodesCount()
+	require.NoError(t, err)
+	assert.Equal(t, 2, count)
+}
+
 // Rename a node
 func TestNodeRename(t *testing.T) {
 	tx, cleanup := db.NewTestClusterTx(t)

From 67a99c99b45f68c58b53d9236ca5cb1cefa7e675 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Tue, 5 Dec 2017 15:13:50 +0000
Subject: [PATCH 104/116] Change db.SelectConfig to support params substitution

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/db/config.go            | 4 ++--
 lxd/db/networks.go          | 2 +-
 lxd/db/query/config.go      | 8 ++++----
 lxd/db/query/config_test.go | 8 ++++----
 lxd/db/storage_pools.go     | 3 +--
 5 files changed, 12 insertions(+), 13 deletions(-)

diff --git a/lxd/db/config.go b/lxd/db/config.go
index d76d8188a..5a44dd1db 100644
--- a/lxd/db/config.go
+++ b/lxd/db/config.go
@@ -4,7 +4,7 @@ import "github.com/lxc/lxd/lxd/db/query"
 
 // Config fetches all LXD node-level config keys.
 func (n *NodeTx) Config() (map[string]string, error) {
-	return query.SelectConfig(n.tx, "config")
+	return query.SelectConfig(n.tx, "config", "")
 }
 
 // UpdateConfig updates the given LXD node-level configuration keys in the
@@ -15,7 +15,7 @@ func (n *NodeTx) UpdateConfig(values map[string]string) error {
 
 // Config fetches all LXD cluster config keys.
 func (c *ClusterTx) Config() (map[string]string, error) {
-	return query.SelectConfig(c.tx, "config")
+	return query.SelectConfig(c.tx, "config", "")
 }
 
 // UpdateConfig updates the given LXD cluster configuration keys in the
diff --git a/lxd/db/networks.go b/lxd/db/networks.go
index 3e9107899..19d60f35a 100644
--- a/lxd/db/networks.go
+++ b/lxd/db/networks.go
@@ -21,7 +21,7 @@ func (c *ClusterTx) NetworkConfigs() (map[string]map[string]string, error) {
 	networks := make(map[string]map[string]string, len(names))
 	for _, name := range names {
 		table := "networks_config JOIN networks ON networks.id=networks_config.network_id"
-		config, err := query.SelectConfig(c.tx, table, fmt.Sprintf("networks.name='%s'", name))
+		config, err := query.SelectConfig(c.tx, table, "networks.name=?", name)
 		if err != nil {
 			return nil, err
 		}
diff --git a/lxd/db/query/config.go b/lxd/db/query/config.go
index 878b6d8f0..94acf49ea 100644
--- a/lxd/db/query/config.go
+++ b/lxd/db/query/config.go
@@ -11,13 +11,13 @@ import (
 // additional WHERE filters can be specified.
 //
 // Returns a map of key names to their associated values.
-func SelectConfig(tx *sql.Tx, table string, filters ...string) (map[string]string, error) {
+func SelectConfig(tx *sql.Tx, table string, where string, args ...interface{}) (map[string]string, error) {
 	query := fmt.Sprintf("SELECT key, value FROM %s", table)
-	if len(filters) > 0 {
-		query += " WHERE " + strings.Join(filters, " ")
+	if where != "" {
+		query += fmt.Sprintf(" WHERE %s", where)
 	}
 
-	rows, err := tx.Query(query)
+	rows, err := tx.Query(query, args...)
 	if err != nil {
 		return nil, err
 	}
diff --git a/lxd/db/query/config_test.go b/lxd/db/query/config_test.go
index fe09735e3..d77026d17 100644
--- a/lxd/db/query/config_test.go
+++ b/lxd/db/query/config_test.go
@@ -11,14 +11,14 @@ import (
 
 func TestSelectConfig(t *testing.T) {
 	tx := newTxForConfig(t)
-	values, err := query.SelectConfig(tx, "test")
+	values, err := query.SelectConfig(tx, "test", "")
 	require.NoError(t, err)
 	assert.Equal(t, map[string]string{"foo": "x", "bar": "zz"}, values)
 }
 
 func TestSelectConfig_WithFilters(t *testing.T) {
 	tx := newTxForConfig(t)
-	values, err := query.SelectConfig(tx, "test", "key='bar'")
+	values, err := query.SelectConfig(tx, "test", "key=?", "bar")
 	require.NoError(t, err)
 	assert.Equal(t, map[string]string{"bar": "zz"}, values)
 }
@@ -31,7 +31,7 @@ func TestUpdateConfig_NewKeys(t *testing.T) {
 	err := query.UpdateConfig(tx, "test", values)
 	require.NoError(t, err)
 
-	values, err = query.SelectConfig(tx, "test")
+	values, err = query.SelectConfig(tx, "test", "")
 	require.NoError(t, err)
 	assert.Equal(t, map[string]string{"foo": "y", "bar": "zz"}, values)
 }
@@ -44,7 +44,7 @@ func TestDeleteConfig_Delete(t *testing.T) {
 	err := query.UpdateConfig(tx, "test", values)
 
 	require.NoError(t, err)
-	values, err = query.SelectConfig(tx, "test")
+	values, err = query.SelectConfig(tx, "test", "")
 	require.NoError(t, err)
 	assert.Equal(t, map[string]string{"bar": "zz"}, values)
 }
diff --git a/lxd/db/storage_pools.go b/lxd/db/storage_pools.go
index 498c8685e..7d154c666 100644
--- a/lxd/db/storage_pools.go
+++ b/lxd/db/storage_pools.go
@@ -22,8 +22,7 @@ func (c *ClusterTx) StoragePoolConfigs() (map[string]map[string]string, error) {
 		table := `
 storage_pools_config JOIN storage_pools ON storage_pools.id=storage_pools_config.storage_pool_id
 `
-		filter := fmt.Sprintf("storage_pools.name='%s'", name)
-		config, err := query.SelectConfig(c.tx, table, filter)
+		config, err := query.SelectConfig(c.tx, table, "storage_pools.name=?", name)
 		if err != nil {
 			return nil, err
 		}

From 4df468b66807dc942926c81d010cbdb64888bb69 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Tue, 5 Dec 2017 15:31:35 +0000
Subject: [PATCH 105/116] Update storage_pools_nodes when a node joins the
 cluster

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/cluster/membership.go |  6 +++++-
 lxd/db/storage_pools.go   | 20 +++++++++++++++++++-
 2 files changed, 24 insertions(+), 2 deletions(-)

diff --git a/lxd/cluster/membership.go b/lxd/cluster/membership.go
index 1b0166f7a..7f09f68d4 100644
--- a/lxd/cluster/membership.go
+++ b/lxd/cluster/membership.go
@@ -318,10 +318,14 @@ func Join(state *state.State, gateway *Gateway, cert *shared.CertInfo, name stri
 			if !ok {
 				return fmt.Errorf("joining node has no config for pool %s", name)
 			}
+			err := tx.StoragePoolNodeJoin(id, node.ID)
+			if err != nil {
+				return errors.Wrap(err, "failed to add joining node's to the pool")
+			}
 			// We only need to add the source key, since the other keys are global and
 			// are already there.
 			config = map[string]string{"source": config["source"]}
-			err := tx.StoragePoolConfigAdd(id, node.ID, config)
+			err = tx.StoragePoolConfigAdd(id, node.ID, config)
 			if err != nil {
 				return errors.Wrap(err, "failed to add joining node's pool config")
 			}
diff --git a/lxd/db/storage_pools.go b/lxd/db/storage_pools.go
index 7d154c666..d8e3dd708 100644
--- a/lxd/db/storage_pools.go
+++ b/lxd/db/storage_pools.go
@@ -12,6 +12,10 @@ import (
 
 // StoragePoolConfigs returns a map associating each storage pool name to its
 // config values.
+//
+// The config values are the ones defined for the node this function is run
+// on. They are used by cluster.Join when a new node joins the cluster and its
+// configuration needs to be migrated to the cluster database.
 func (c *ClusterTx) StoragePoolConfigs() (map[string]map[string]string, error) {
 	names, err := query.SelectStrings(c.tx, "SELECT name FROM storage_pools")
 	if err != nil {
@@ -22,7 +26,9 @@ func (c *ClusterTx) StoragePoolConfigs() (map[string]map[string]string, error) {
 		table := `
 storage_pools_config JOIN storage_pools ON storage_pools.id=storage_pools_config.storage_pool_id
 `
-		config, err := query.SelectConfig(c.tx, table, "storage_pools.name=?", name)
+		config, err := query.SelectConfig(
+			c.tx, table, "storage_pools.name=? AND storage_pools_config.storage_pool_id=?",
+			name, c.nodeID)
 		if err != nil {
 			return nil, err
 		}
@@ -56,6 +62,18 @@ func (c *ClusterTx) StoragePoolIDs() (map[string]int64, error) {
 	return ids, nil
 }
 
+// StoragePoolNodeJoin adds a new entry in the storage_pools_nodes table.
+//
+// It should only be used when a new node joins the cluster, when it's safe to
+// assume that the relevant pool has already been created on the joining node,
+// and we just need to track it.
+func (c *ClusterTx) StoragePoolNodeJoin(poolID, nodeID int64) error {
+	columns := []string{"storage_pool_id", "node_id"}
+	values := []interface{}{poolID, nodeID}
+	_, err := query.UpsertObject(c.tx, "storage_pools_nodes", columns, values)
+	return err
+}
+
 // StoragePoolConfigAdd adds a new entry in the storage_pools_config table
 func (c *ClusterTx) StoragePoolConfigAdd(poolID, nodeID int64, config map[string]string) error {
 	return storagePoolConfigAdd(c.tx, poolID, nodeID, config)

From 8d52b53fa13de7fe58d6451e891964a35abeb07a Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Tue, 5 Dec 2017 15:33:01 +0000
Subject: [PATCH 106/116] Add db helper functions to create pending storage
 pools

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/db/storage_pools.go      | 173 +++++++++++++++++++++++++++++++++++++++++++
 lxd/db/storage_pools_test.go |  90 ++++++++++++++++++++++
 2 files changed, 263 insertions(+)
 create mode 100644 lxd/db/storage_pools_test.go

diff --git a/lxd/db/storage_pools.go b/lxd/db/storage_pools.go
index d8e3dd708..8c85aecc1 100644
--- a/lxd/db/storage_pools.go
+++ b/lxd/db/storage_pools.go
@@ -3,10 +3,12 @@ package db
 import (
 	"database/sql"
 	"fmt"
+	"strings"
 
 	_ "github.com/mattn/go-sqlite3"
 
 	"github.com/lxc/lxd/lxd/db/query"
+	"github.com/lxc/lxd/shared"
 	"github.com/lxc/lxd/shared/api"
 )
 
@@ -37,6 +39,23 @@ storage_pools_config JOIN storage_pools ON storage_pools.id=storage_pools_config
 	return pools, nil
 }
 
+// StoragePoolID returns the ID of the pool with the given name.
+func (c *ClusterTx) StoragePoolID(name string) (int64, error) {
+	stmt := "SELECT id FROM storage_pools WHERE name=?"
+	ids, err := query.SelectIntegers(c.tx, stmt, name)
+	if err != nil {
+		return -1, err
+	}
+	switch len(ids) {
+	case 0:
+		return -1, NoSuchObjectError
+	case 1:
+		return int64(ids[0]), nil
+	default:
+		return -1, fmt.Errorf("more than one pool has the given name")
+	}
+}
+
 // StoragePoolIDs returns a map associating each storage pool name to its ID.
 func (c *ClusterTx) StoragePoolIDs() (map[string]int64, error) {
 	pools := []struct {
@@ -83,8 +102,162 @@ func (c *ClusterTx) StoragePoolConfigAdd(poolID, nodeID int64, config map[string
 const (
 	storagePoolPending int = iota // Storage pool defined but not yet created.
 	storagePoolCreated            // Storage pool created on all nodes.
+	storagePoolErrored            // Storage pool creation failed on some nodes
 )
 
+// StoragePoolCreatePending creates a new pending storage pool on the node with
+// the given name.
+func (c *ClusterTx) StoragePoolCreatePending(node, name, driver string, conf map[string]string) error {
+	// First check if a storage pool with the given name exists, and, if
+	// so, that it has a matching driver and it's in the pending state.
+	pool := struct {
+		id     int64
+		driver string
+		state  int
+	}{}
+
+	var errConsistency error
+	dest := func(i int) []interface{} {
+		// Sanity check that there is at most one pool with the given name.
+		if i != 0 {
+			errConsistency = fmt.Errorf("more than one pool exists with the given name")
+		}
+		return []interface{}{&pool.id, &pool.driver, &pool.state}
+	}
+	stmt := "SELECT id, driver, state FROM storage_pools WHERE name=?"
+	err := query.SelectObjects(c.tx, dest, stmt, name)
+	if err != nil {
+		return err
+	}
+	if errConsistency != nil {
+		return errConsistency
+	}
+
+	var poolID = pool.id
+	if poolID == 0 {
+		// No existing pool with the given name was found, let's create
+		// one.
+		columns := []string{"name", "driver"}
+		values := []interface{}{name, driver}
+		poolID, err = query.UpsertObject(c.tx, "storage_pools", columns, values)
+		if err != nil {
+			return err
+		}
+	} else {
+		// Check that the existing pools matches the given driver and
+		// is in the pending state.
+		if pool.driver != driver {
+			return fmt.Errorf("pool already exists with a different driver")
+		}
+		if pool.state != storagePoolPending {
+			return fmt.Errorf("pool is not in pending state")
+		}
+	}
+
+	// Get the ID of the node with the given name.
+	nodeInfo, err := c.NodeByName(node)
+	if err != nil {
+		return err
+	}
+
+	// Check that no storage_pool entry of this node and pool exists yet.
+	count, err := query.Count(
+		c.tx, "storage_pools_nodes", "storage_pool_id=? AND node_id=?", poolID, nodeInfo.ID)
+	if err != nil {
+		return err
+	}
+	if count != 0 {
+		return DbErrAlreadyDefined
+	}
+
+	// Insert the node-specific configuration.
+	columns := []string{"storage_pool_id", "node_id"}
+	values := []interface{}{poolID, nodeInfo.ID}
+	_, err = query.UpsertObject(c.tx, "storage_pools_nodes", columns, values)
+	if err != nil {
+		return err
+	}
+	err = c.StoragePoolConfigAdd(poolID, nodeInfo.ID, conf)
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
+// StoragePoolCreated sets the state of the given pool to "CREATED".
+func (c *ClusterTx) StoragePoolCreated(name string) error {
+	return c.storagePoolState(name, storagePoolCreated)
+}
+
+// StoragePoolErrored sets the state of the given pool to "ERRORED".
+func (c *ClusterTx) StoragePoolErrored(name string) error {
+	return c.storagePoolState(name, storagePoolErrored)
+}
+
+func (c *ClusterTx) storagePoolState(name string, state int) error {
+	stmt := "UPDATE storage_pools SET state=? WHERE name=?"
+	result, err := c.tx.Exec(stmt, state, name)
+	if err != nil {
+		return err
+	}
+	n, err := result.RowsAffected()
+	if err != nil {
+		return err
+	}
+	if n != 1 {
+		return NoSuchObjectError
+	}
+	return nil
+}
+
+// StoragePoolNodeConfigs returns the node-specific configuration of all
+// nodes grouped by node name, for the given poolID.
+//
+// If the storage pool is not defined on all nodes, an error is returned.
+func (c *ClusterTx) StoragePoolNodeConfigs(poolID int64) (map[string]map[string]string, error) {
+	// Fetch all nodes.
+	nodes, err := c.Nodes()
+	if err != nil {
+		return nil, err
+	}
+
+	// Fetch the names of the nodes where the storage pool is defined.
+	stmt := `
+SELECT nodes.name FROM nodes
+  LEFT JOIN storage_pools_nodes ON storage_pools_nodes.node_id = nodes.id
+  LEFT JOIN storage_pools ON storage_pools_nodes.storage_pool_id = storage_pools.id
+WHERE storage_pools.id = ? AND storage_pools.state = ?
+`
+	defined, err := query.SelectStrings(c.tx, stmt, poolID, storagePoolPending)
+	if err != nil {
+		return nil, err
+	}
+
+	// Figure which nodes are missing
+	missing := []string{}
+	for _, node := range nodes {
+		if !shared.StringInSlice(node.Name, defined) {
+			missing = append(missing, node.Name)
+		}
+	}
+
+	if len(missing) > 0 {
+		return nil, fmt.Errorf("Pool not defined on nodes: %s", strings.Join(missing, ", "))
+	}
+
+	configs := map[string]map[string]string{}
+	for _, node := range nodes {
+		config, err := query.SelectConfig(c.tx, "storage_pools_config", "node_id=?", node.ID)
+		if err != nil {
+			return nil, err
+		}
+		configs[node.Name] = config
+	}
+
+	return configs, nil
+}
+
 // Get all storage pools.
 func (c *Cluster) StoragePools() ([]string, error) {
 	var name string
diff --git a/lxd/db/storage_pools_test.go b/lxd/db/storage_pools_test.go
new file mode 100644
index 000000000..b4a06081d
--- /dev/null
+++ b/lxd/db/storage_pools_test.go
@@ -0,0 +1,90 @@
+package db_test
+
+import (
+	"testing"
+
+	"github.com/lxc/lxd/lxd/db"
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
+)
+
+func TestStoragePoolsCreatePending(t *testing.T) {
+	tx, cleanup := db.NewTestClusterTx(t)
+	defer cleanup()
+
+	_, err := tx.NodeAdd("buzz", "1.2.3.4:666")
+	require.NoError(t, err)
+	_, err = tx.NodeAdd("rusp", "5.6.7.8:666")
+	require.NoError(t, err)
+
+	config := map[string]string{"source": "/foo"}
+	err = tx.StoragePoolCreatePending("buzz", "pool1", "dir", config)
+	require.NoError(t, err)
+
+	poolID, err := tx.StoragePoolID("pool1")
+	require.NoError(t, err)
+	assert.True(t, poolID > 0)
+
+	config = map[string]string{"source": "/bar"}
+	err = tx.StoragePoolCreatePending("rusp", "pool1", "dir", config)
+	require.NoError(t, err)
+
+	// The initial node (whose name is 'none' by default) is missing.
+	_, err = tx.StoragePoolNodeConfigs(poolID)
+	require.EqualError(t, err, "Pool not defined on nodes: none")
+
+	config = map[string]string{"source": "/egg"}
+	err = tx.StoragePoolCreatePending("none", "pool1", "dir", config)
+	require.NoError(t, err)
+
+	// Now the storage is defined on all nodes.
+	configs, err := tx.StoragePoolNodeConfigs(poolID)
+	require.NoError(t, err)
+	assert.Len(t, configs, 3)
+	assert.Equal(t, map[string]string{"source": "/foo"}, configs["buzz"])
+	assert.Equal(t, map[string]string{"source": "/bar"}, configs["rusp"])
+	assert.Equal(t, map[string]string{"source": "/egg"}, configs["none"])
+}
+
+// If an entry for the given pool and node already exists, an error is
+// returned.
+func TestStoragePoolsCreatePending_AlreadyDefined(t *testing.T) {
+	tx, cleanup := db.NewTestClusterTx(t)
+	defer cleanup()
+
+	_, err := tx.NodeAdd("buzz", "1.2.3.4:666")
+	require.NoError(t, err)
+
+	err = tx.StoragePoolCreatePending("buzz", "pool1", "dir", map[string]string{})
+	require.NoError(t, err)
+
+	err = tx.StoragePoolCreatePending("buzz", "pool1", "dir", map[string]string{})
+	require.Equal(t, db.DbErrAlreadyDefined, err)
+}
+
+// If no node with the given name is found, an error is returned.
+func TestStoragePoolsCreatePending_NonExistingNode(t *testing.T) {
+	tx, cleanup := db.NewTestClusterTx(t)
+	defer cleanup()
+
+	err := tx.StoragePoolCreatePending("buzz", "pool1", "dir", map[string]string{})
+	require.Equal(t, db.NoSuchObjectError, err)
+}
+
+// If a pool with the given name already exists but has different driver, an
+// error is returned.
+func TestStoragePoolsCreatePending_DriverMismatch(t *testing.T) {
+	tx, cleanup := db.NewTestClusterTx(t)
+	defer cleanup()
+
+	_, err := tx.NodeAdd("buzz", "1.2.3.4:666")
+	require.NoError(t, err)
+	_, err = tx.NodeAdd("rusp", "5.6.7.8:666")
+	require.NoError(t, err)
+
+	err = tx.StoragePoolCreatePending("buzz", "pool1", "dir", map[string]string{})
+	require.NoError(t, err)
+
+	err = tx.StoragePoolCreatePending("rusp", "pool1", "zfs", map[string]string{})
+	require.EqualError(t, err, "pool already exists with a different driver")
+}

From d8c487e44508df1dae2b30164d2dc55d499197f2 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Tue, 5 Dec 2017 15:59:48 +0000
Subject: [PATCH 107/116] Add targetNode query parameter to /1.0/storage-pools
 POST

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 client/lxd_storage_pools.go   |  6 +++++-
 doc/api-extensions.md         |  1 +
 lxd/.dir-locals.el            |  2 +-
 lxd/api_storage_pools_test.go | 40 ++++++++++++++++++++++++++++++++++++++
 lxd/db/storage_pools.go       | 14 ++++++++++++--
 lxd/storage_pools.go          | 45 ++++++++++++++++++++++++++++++++++++++++---
 shared/api/storage_pool.go    |  3 +++
 7 files changed, 104 insertions(+), 7 deletions(-)
 create mode 100644 lxd/api_storage_pools_test.go

diff --git a/client/lxd_storage_pools.go b/client/lxd_storage_pools.go
index 8e10288f1..717c0f22d 100644
--- a/client/lxd_storage_pools.go
+++ b/client/lxd_storage_pools.go
@@ -71,7 +71,11 @@ func (r *ProtocolLXD) CreateStoragePool(pool api.StoragePoolsPost) error {
 	}
 
 	// Send the request
-	_, _, err := r.query("POST", "/storage-pools", pool, "")
+	path := "/storage-pools"
+	if r.targetNode != "" {
+		path += fmt.Sprintf("?targetNode=%s", r.targetNode)
+	}
+	_, _, err := r.query("POST", path, pool, "")
 	if err != nil {
 		return err
 	}
diff --git a/doc/api-extensions.md b/doc/api-extensions.md
index 35a89addc..4d113caae 100644
--- a/doc/api-extensions.md
+++ b/doc/api-extensions.md
@@ -388,3 +388,4 @@ This includes the following new endpoints:
 The following existing endpoints have been modified:
 
  * `POST /1.0/containers` accepts a new targetNode query parameter
+ * `POST /1.0/storage-pools` accepts a new targetNode query parameter
diff --git a/lxd/.dir-locals.el b/lxd/.dir-locals.el
index bf09f9074..9342fb083 100644
--- a/lxd/.dir-locals.el
+++ b/lxd/.dir-locals.el
@@ -1,7 +1,7 @@
 ;;; Directory Local Variables
 ;;; For more information see (info "(emacs) Directory Variables")
 ((go-mode
-  . ((go-test-args . "-tags libsqlite3 -timeout 25s")
+  . ((go-test-args . "-tags libsqlite3 -timeout 35s")
      (eval
       . (set
 	 (make-local-variable 'flycheck-go-build-tags)
diff --git a/lxd/api_storage_pools_test.go b/lxd/api_storage_pools_test.go
new file mode 100644
index 000000000..8c572f487
--- /dev/null
+++ b/lxd/api_storage_pools_test.go
@@ -0,0 +1,40 @@
+package main
+
+import (
+	"testing"
+
+	"github.com/lxc/lxd/shared/api"
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
+)
+
+// Create a new pending storage pool using the targetNode query paramenter.
+func TestStoragePoolsCreate_TargetNode(t *testing.T) {
+	if testing.Short() {
+		t.Skip("skipping storage-pools targetNode test in short mode.")
+	}
+	daemons, cleanup := newDaemons(t, 2)
+	defer cleanup()
+
+	f := clusterFixture{t: t}
+	f.FormCluster(daemons)
+
+	daemon := daemons[0]
+	client := f.ClientUnix(daemon).ClusterTargetNode("rusp-0")
+
+	poolPost := api.StoragePoolsPost{
+		Name:   "mypool",
+		Driver: "dir",
+	}
+	poolPost.Config = map[string]string{
+		"source": "",
+	}
+
+	err := client.CreateStoragePool(poolPost)
+	require.NoError(t, err)
+
+	pool, _, err := client.GetStoragePool("mypool")
+	require.NoError(t, err)
+
+	assert.Equal(t, "PENDING", pool.State)
+}
diff --git a/lxd/db/storage_pools.go b/lxd/db/storage_pools.go
index 8c85aecc1..943b08f90 100644
--- a/lxd/db/storage_pools.go
+++ b/lxd/db/storage_pools.go
@@ -328,10 +328,11 @@ func (c *Cluster) StoragePoolGet(poolName string) (int64, *api.StoragePool, erro
 	var poolDriver string
 	poolID := int64(-1)
 	description := sql.NullString{}
+	var state int
 
-	query := "SELECT id, driver, description FROM storage_pools WHERE name=?"
+	query := "SELECT id, driver, description, state FROM storage_pools WHERE name=?"
 	inargs := []interface{}{poolName}
-	outargs := []interface{}{&poolID, &poolDriver, &description}
+	outargs := []interface{}{&poolID, &poolDriver, &description, &state}
 
 	err := dbQueryRowScan(c.db, query, inargs, outargs)
 	if err != nil {
@@ -353,6 +354,15 @@ func (c *Cluster) StoragePoolGet(poolName string) (int64, *api.StoragePool, erro
 	storagePool.Description = description.String
 	storagePool.Config = config
 
+	switch state {
+	case storagePoolPending:
+		storagePool.State = "PENDING"
+	case storagePoolCreated:
+		storagePool.State = "CREATED"
+	default:
+		storagePool.State = "UNKNOWN"
+	}
+
 	return poolID, &storagePool, nil
 }
 
diff --git a/lxd/storage_pools.go b/lxd/storage_pools.go
index a0e384ab5..4f4bafdc9 100644
--- a/lxd/storage_pools.go
+++ b/lxd/storage_pools.go
@@ -8,6 +8,7 @@ import (
 	"strings"
 
 	"github.com/gorilla/mux"
+	"github.com/lxc/lxd/lxd/cluster"
 	"github.com/lxc/lxd/lxd/db"
 	"github.com/lxc/lxd/lxd/util"
 	"github.com/lxc/lxd/shared/api"
@@ -77,12 +78,50 @@ func storagePoolsPost(d *Daemon, r *http.Request) Response {
 		return BadRequest(fmt.Errorf("No driver provided"))
 	}
 
-	err = storagePoolCreateInternal(d.State(), req.Name, req.Description, req.Driver, req.Config)
+	url := fmt.Sprintf("/%s/storage-pools/%s", version.APIVersion, req.Name)
+	response := SyncResponseLocation(true, nil, url)
+
+	targetNode := r.FormValue("targetNode")
+	if targetNode == "" {
+		count, err := cluster.Count(d.State())
+		if err != nil {
+			return SmartError(err)
+		}
+
+		if count == 1 {
+			// No targetNode was specified and we're either a single-node
+			// cluster or not clustered at all, so create the storage
+			// pool immediately.
+			err = storagePoolCreateInternal(
+				d.State(), req.Name, req.Description, req.Driver, req.Config)
+			if err != nil {
+				return InternalError(err)
+			}
+			return response
+		}
+
+		// No targetNode was specified and we're clustered. Check that
+		// the storage pool has been defined on all nodes and, if so,
+		// actually create it on all of them.
+		panic("TODO")
+	}
+
+	// A targetNode was specified, let's just define the node's storage
+	// without actually creating it. The only legal key value for the
+	// storage config is 'source'.
+	for key := range req.Config {
+		if key != "source" {
+			return SmartError(fmt.Errorf("Invalid config key '%s'", key))
+		}
+	}
+	err = d.cluster.Transaction(func(tx *db.ClusterTx) error {
+		return tx.StoragePoolCreatePending(targetNode, req.Name, req.Driver, req.Config)
+	})
 	if err != nil {
-		return InternalError(err)
+		return SmartError(err)
 	}
 
-	return SyncResponseLocation(true, nil, fmt.Sprintf("/%s/storage-pools/%s", version.APIVersion, req.Name))
+	return response
 }
 
 var storagePoolsCmd = Command{name: "storage-pools", get: storagePoolsGet, post: storagePoolsPost}
diff --git a/shared/api/storage_pool.go b/shared/api/storage_pool.go
index 157d2d275..ac5a2b3cb 100644
--- a/shared/api/storage_pool.go
+++ b/shared/api/storage_pool.go
@@ -19,6 +19,9 @@ type StoragePool struct {
 	Name   string   `json:"name" yaml:"name"`
 	Driver string   `json:"driver" yaml:"driver"`
 	UsedBy []string `json:"used_by" yaml:"used_by"`
+
+	// API extension: clustering
+	State string `json:"state" yaml:"state"`
 }
 
 // StoragePoolPut represents the modifiable fields of a LXD storage pool.

From c2606f5d88ec5751aad762f8697cf1591b6a769c Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Tue, 5 Dec 2017 16:33:01 +0000
Subject: [PATCH 108/116] Add STATE column to lxc storage list, when LXD is
 clustered

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxc/storage.go            | 24 +++++++++++++++++++-----
 po/de.po                  | 26 +++++++++++++-------------
 po/el.po                  | 26 +++++++++++++-------------
 po/fr.po                  | 26 +++++++++++++-------------
 po/id.po                  | 26 +++++++++++++-------------
 po/it.po                  | 26 +++++++++++++-------------
 po/ja.po                  | 26 +++++++++++++-------------
 po/lxd.pot                | 26 +++++++++++++-------------
 po/nb_NO.po               | 26 +++++++++++++-------------
 po/nl.po                  | 26 +++++++++++++-------------
 po/pt_BR.po               | 26 +++++++++++++-------------
 po/ru.po                  | 26 +++++++++++++-------------
 po/sr.po                  | 26 +++++++++++++-------------
 po/sv.po                  | 26 +++++++++++++-------------
 po/tr.po                  | 26 +++++++++++++-------------
 po/zh.po                  | 26 +++++++++++++-------------
 po/zh_Hans.po             | 26 +++++++++++++-------------
 test/main.sh              |  1 +
 test/suites/clustering.sh | 37 +++++++++++++++++++++++++++++++++++++
 19 files changed, 265 insertions(+), 213 deletions(-)

diff --git a/lxc/storage.go b/lxc/storage.go
index 75e2b1b35..66e20c216 100644
--- a/lxc/storage.go
+++ b/lxc/storage.go
@@ -669,20 +669,34 @@ func (c *storageCmd) doStoragePoolsList(conf *config.Config, args []string) erro
 	data := [][]string{}
 	for _, pool := range pools {
 		usedby := strconv.Itoa(len(pool.UsedBy))
-
-		data = append(data, []string{pool.Name, pool.Description, pool.Driver, pool.Config["source"], usedby})
+		details := []string{pool.Name, pool.Description, pool.Driver}
+		if client.IsClustered() {
+			details = append(details, pool.State)
+		} else {
+			details = append(details, pool.Config["source"])
+		}
+		details = append(details, usedby)
+		data = append(data, details)
 	}
 
 	table := tablewriter.NewWriter(os.Stdout)
 	table.SetAutoWrapText(false)
 	table.SetAlignment(tablewriter.ALIGN_LEFT)
 	table.SetRowLine(true)
-	table.SetHeader([]string{
+
+	header := []string{
 		i18n.G("NAME"),
 		i18n.G("DESCRIPTION"),
 		i18n.G("DRIVER"),
-		i18n.G("SOURCE"),
-		i18n.G("USED BY")})
+	}
+	if client.IsClustered() {
+		header = append(header, i18n.G("STATE"))
+	} else {
+		header = append(header, i18n.G("SOURCE"))
+	}
+	header = append(header, i18n.G("USED BY"))
+	table.SetHeader(header)
+
 	sort.Sort(byName(data))
 	table.AppendBulk(data)
 	table.Render()
diff --git a/po/de.po b/po/de.po
index 1cf22ea85..a9384d8b2 100644
--- a/po/de.po
+++ b/po/de.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: LXD\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2017-12-04 09:00+0000\n"
+"POT-Creation-Date: 2017-12-06 11:48+0000\n"
 "PO-Revision-Date: 2017-02-14 17:11+0000\n"
 "Last-Translator: Tim Rose <tim at netlope.de>\n"
 "Language-Team: German <https://hosted.weblate.org/projects/linux-containers/"
@@ -430,7 +430,7 @@ msgid "Config key/value to apply to the new container"
 msgstr "kann nicht zum selben Container Namen kopieren"
 
 #: lxc/config.go:816 lxc/config.go:881 lxc/config.go:1331 lxc/image.go:1190
-#: lxc/network.go:426 lxc/profile.go:275 lxc/storage.go:604 lxc/storage.go:979
+#: lxc/network.go:426 lxc/profile.go:275 lxc/storage.go:604 lxc/storage.go:993
 #, fuzzy, c-format
 msgid "Config parsing error: %s"
 msgstr "YAML Analyse Fehler %v\n"
@@ -503,11 +503,11 @@ msgid "DATABASE"
 msgstr ""
 
 #: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:467 lxc/network.go:525
-#: lxc/storage.go:682 lxc/storage.go:793
+#: lxc/storage.go:689 lxc/storage.go:807
 msgid "DESCRIPTION"
 msgstr ""
 
-#: lxc/storage.go:683
+#: lxc/storage.go:690
 msgid "DRIVER"
 msgstr ""
 
@@ -858,7 +858,7 @@ msgid "Must supply container name for: "
 msgstr "der Name des Ursprung Containers muss angegeben werden"
 
 #: lxc/cluster.go:181 lxc/list.go:469 lxc/network.go:522 lxc/profile.go:573
-#: lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
+#: lxc/remote.go:409 lxc/storage.go:688 lxc/storage.go:806
 msgid "NAME"
 msgstr ""
 
@@ -1022,7 +1022,7 @@ msgstr ""
 msgid "Pid: %d"
 msgstr ""
 
-#: lxc/network.go:427 lxc/profile.go:276 lxc/storage.go:605 lxc/storage.go:980
+#: lxc/network.go:427 lxc/profile.go:276 lxc/storage.go:605 lxc/storage.go:994
 msgid "Press enter to open the editor again"
 msgstr ""
 
@@ -1134,7 +1134,7 @@ msgstr ""
 msgid "Remove %s (yes/no): "
 msgstr ""
 
-#: lxc/storage.go:1011
+#: lxc/storage.go:1025
 #, c-format
 msgid "Renamed storage volume from \"%s\" to \"%s\""
 msgstr ""
@@ -1170,11 +1170,11 @@ msgstr ""
 msgid "SNAPSHOTS"
 msgstr ""
 
-#: lxc/storage.go:684
+#: lxc/storage.go:695
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/cluster.go:184 lxc/list.go:473
+#: lxc/cluster.go:184 lxc/list.go:473 lxc/storage.go:693
 msgid "STATE"
 msgstr ""
 
@@ -1310,12 +1310,12 @@ msgstr "Profil %s gelöscht\n"
 msgid "Storage pool name"
 msgstr "Profilname kann nicht geändert werden"
 
-#: lxc/storage.go:826
+#: lxc/storage.go:840
 #, fuzzy, c-format
 msgid "Storage volume %s created"
 msgstr "Profil %s erstellt\n"
 
-#: lxc/storage.go:841
+#: lxc/storage.go:855
 #, fuzzy, c-format
 msgid "Storage volume %s deleted"
 msgstr "Profil %s gelöscht\n"
@@ -1333,7 +1333,7 @@ msgstr ""
 msgid "Swap (peak)"
 msgstr ""
 
-#: lxc/list.go:474 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:791
+#: lxc/list.go:474 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:805
 msgid "TYPE"
 msgstr ""
 
@@ -1454,7 +1454,7 @@ msgstr ""
 msgid "URL"
 msgstr ""
 
-#: lxc/network.go:526 lxc/profile.go:574 lxc/storage.go:685 lxc/storage.go:794
+#: lxc/network.go:526 lxc/profile.go:574 lxc/storage.go:697 lxc/storage.go:808
 msgid "USED BY"
 msgstr ""
 
diff --git a/po/el.po b/po/el.po
index 802e492f1..9bb891661 100644
--- a/po/el.po
+++ b/po/el.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: lxd\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2017-12-04 09:00+0000\n"
+"POT-Creation-Date: 2017-12-06 11:48+0000\n"
 "PO-Revision-Date: 2017-02-14 08:00+0000\n"
 "Last-Translator: Simos Xenitellis <simos.65 at gmail.com>\n"
 "Language-Team: Greek <https://hosted.weblate.org/projects/linux-containers/"
@@ -318,7 +318,7 @@ msgid "Config key/value to apply to the new container"
 msgstr ""
 
 #: lxc/config.go:816 lxc/config.go:881 lxc/config.go:1331 lxc/image.go:1190
-#: lxc/network.go:426 lxc/profile.go:275 lxc/storage.go:604 lxc/storage.go:979
+#: lxc/network.go:426 lxc/profile.go:275 lxc/storage.go:604 lxc/storage.go:993
 #, c-format
 msgid "Config parsing error: %s"
 msgstr ""
@@ -389,11 +389,11 @@ msgid "DATABASE"
 msgstr ""
 
 #: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:467 lxc/network.go:525
-#: lxc/storage.go:682 lxc/storage.go:793
+#: lxc/storage.go:689 lxc/storage.go:807
 msgid "DESCRIPTION"
 msgstr ""
 
-#: lxc/storage.go:683
+#: lxc/storage.go:690
 msgid "DRIVER"
 msgstr ""
 
@@ -731,7 +731,7 @@ msgid "Must supply container name for: "
 msgstr ""
 
 #: lxc/cluster.go:181 lxc/list.go:469 lxc/network.go:522 lxc/profile.go:573
-#: lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
+#: lxc/remote.go:409 lxc/storage.go:688 lxc/storage.go:806
 msgid "NAME"
 msgstr ""
 
@@ -889,7 +889,7 @@ msgstr ""
 msgid "Pid: %d"
 msgstr ""
 
-#: lxc/network.go:427 lxc/profile.go:276 lxc/storage.go:605 lxc/storage.go:980
+#: lxc/network.go:427 lxc/profile.go:276 lxc/storage.go:605 lxc/storage.go:994
 msgid "Press enter to open the editor again"
 msgstr ""
 
@@ -998,7 +998,7 @@ msgstr ""
 msgid "Remove %s (yes/no): "
 msgstr ""
 
-#: lxc/storage.go:1011
+#: lxc/storage.go:1025
 #, c-format
 msgid "Renamed storage volume from \"%s\" to \"%s\""
 msgstr ""
@@ -1032,11 +1032,11 @@ msgstr ""
 msgid "SNAPSHOTS"
 msgstr ""
 
-#: lxc/storage.go:684
+#: lxc/storage.go:695
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/cluster.go:184 lxc/list.go:473
+#: lxc/cluster.go:184 lxc/list.go:473 lxc/storage.go:693
 msgid "STATE"
 msgstr ""
 
@@ -1167,12 +1167,12 @@ msgstr ""
 msgid "Storage pool name"
 msgstr ""
 
-#: lxc/storage.go:826
+#: lxc/storage.go:840
 #, c-format
 msgid "Storage volume %s created"
 msgstr ""
 
-#: lxc/storage.go:841
+#: lxc/storage.go:855
 #, c-format
 msgid "Storage volume %s deleted"
 msgstr ""
@@ -1189,7 +1189,7 @@ msgstr ""
 msgid "Swap (peak)"
 msgstr ""
 
-#: lxc/list.go:474 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:791
+#: lxc/list.go:474 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:805
 msgid "TYPE"
 msgstr ""
 
@@ -1304,7 +1304,7 @@ msgstr ""
 msgid "URL"
 msgstr ""
 
-#: lxc/network.go:526 lxc/profile.go:574 lxc/storage.go:685 lxc/storage.go:794
+#: lxc/network.go:526 lxc/profile.go:574 lxc/storage.go:697 lxc/storage.go:808
 msgid "USED BY"
 msgstr ""
 
diff --git a/po/fr.po b/po/fr.po
index 53920289c..e0b55ca05 100644
--- a/po/fr.po
+++ b/po/fr.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: LXD\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2017-12-04 09:00+0000\n"
+"POT-Creation-Date: 2017-12-06 11:48+0000\n"
 "PO-Revision-Date: 2017-10-26 15:46+0000\n"
 "Last-Translator: Alban Vidal <alban.vidal at zordhak.fr>\n"
 "Language-Team: French <https://hosted.weblate.org/projects/linux-containers/"
@@ -418,7 +418,7 @@ msgid "Config key/value to apply to the new container"
 msgstr "Clé/valeur de configuration à appliquer au nouveau conteneur"
 
 #: lxc/config.go:816 lxc/config.go:881 lxc/config.go:1331 lxc/image.go:1190
-#: lxc/network.go:426 lxc/profile.go:275 lxc/storage.go:604 lxc/storage.go:979
+#: lxc/network.go:426 lxc/profile.go:275 lxc/storage.go:604 lxc/storage.go:993
 #, c-format
 msgid "Config parsing error: %s"
 msgstr "Erreur lors de la lecture de la configuration : %s"
@@ -490,11 +490,11 @@ msgid "DATABASE"
 msgstr ""
 
 #: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:467 lxc/network.go:525
-#: lxc/storage.go:682 lxc/storage.go:793
+#: lxc/storage.go:689 lxc/storage.go:807
 msgid "DESCRIPTION"
 msgstr "DESCRIPTION"
 
-#: lxc/storage.go:683
+#: lxc/storage.go:690
 msgid "DRIVER"
 msgstr "PILOTE"
 
@@ -844,7 +844,7 @@ msgid "Must supply container name for: "
 msgstr "Vous devez fournir le nom d'un conteneur pour : "
 
 #: lxc/cluster.go:181 lxc/list.go:469 lxc/network.go:522 lxc/profile.go:573
-#: lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
+#: lxc/remote.go:409 lxc/storage.go:688 lxc/storage.go:806
 msgid "NAME"
 msgstr "NOM"
 
@@ -1005,7 +1005,7 @@ msgstr "Permission refusée, êtes-vous dans le groupe lxd ?"
 msgid "Pid: %d"
 msgstr "Pid : %d"
 
-#: lxc/network.go:427 lxc/profile.go:276 lxc/storage.go:605 lxc/storage.go:980
+#: lxc/network.go:427 lxc/profile.go:276 lxc/storage.go:605 lxc/storage.go:994
 msgid "Press enter to open the editor again"
 msgstr "Appuyer sur Entrée pour ouvrir à nouveau l'éditeur"
 
@@ -1115,7 +1115,7 @@ msgstr "Serveur distant : %s"
 msgid "Remove %s (yes/no): "
 msgstr "Supprimer %s (oui/non) : "
 
-#: lxc/storage.go:1011
+#: lxc/storage.go:1025
 #, c-format
 msgid "Renamed storage volume from \"%s\" to \"%s\""
 msgstr ""
@@ -1151,11 +1151,11 @@ msgstr "TAILLE"
 msgid "SNAPSHOTS"
 msgstr "INSTANTANÉS"
 
-#: lxc/storage.go:684
+#: lxc/storage.go:695
 msgid "SOURCE"
 msgstr "SOURCE"
 
-#: lxc/cluster.go:184 lxc/list.go:473
+#: lxc/cluster.go:184 lxc/list.go:473 lxc/storage.go:693
 msgid "STATE"
 msgstr "ÉTAT"
 
@@ -1291,12 +1291,12 @@ msgstr "Le réseau %s a été supprimé"
 msgid "Storage pool name"
 msgstr "Nom de l'ensemble de stockage"
 
-#: lxc/storage.go:826
+#: lxc/storage.go:840
 #, fuzzy, c-format
 msgid "Storage volume %s created"
 msgstr "Profil %s créé"
 
-#: lxc/storage.go:841
+#: lxc/storage.go:855
 #, fuzzy, c-format
 msgid "Storage volume %s deleted"
 msgstr "Profil %s supprimé"
@@ -1313,7 +1313,7 @@ msgstr "Swap (courant)"
 msgid "Swap (peak)"
 msgstr "Swap (pointe)"
 
-#: lxc/list.go:474 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:791
+#: lxc/list.go:474 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:805
 msgid "TYPE"
 msgstr "TYPE"
 
@@ -1440,7 +1440,7 @@ msgstr "DATE DE PUBLICATION"
 msgid "URL"
 msgstr "URL"
 
-#: lxc/network.go:526 lxc/profile.go:574 lxc/storage.go:685 lxc/storage.go:794
+#: lxc/network.go:526 lxc/profile.go:574 lxc/storage.go:697 lxc/storage.go:808
 msgid "USED BY"
 msgstr "UTILISÉ PAR"
 
diff --git a/po/id.po b/po/id.po
index 775f49de0..e7b57bd27 100644
--- a/po/id.po
+++ b/po/id.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: lxd\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2017-12-04 09:00+0000\n"
+"POT-Creation-Date: 2017-12-06 11:48+0000\n"
 "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
 "Last-Translator: Automatically generated\n"
 "Language-Team: none\n"
@@ -314,7 +314,7 @@ msgid "Config key/value to apply to the new container"
 msgstr ""
 
 #: lxc/config.go:816 lxc/config.go:881 lxc/config.go:1331 lxc/image.go:1190
-#: lxc/network.go:426 lxc/profile.go:275 lxc/storage.go:604 lxc/storage.go:979
+#: lxc/network.go:426 lxc/profile.go:275 lxc/storage.go:604 lxc/storage.go:993
 #, c-format
 msgid "Config parsing error: %s"
 msgstr ""
@@ -385,11 +385,11 @@ msgid "DATABASE"
 msgstr ""
 
 #: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:467 lxc/network.go:525
-#: lxc/storage.go:682 lxc/storage.go:793
+#: lxc/storage.go:689 lxc/storage.go:807
 msgid "DESCRIPTION"
 msgstr ""
 
-#: lxc/storage.go:683
+#: lxc/storage.go:690
 msgid "DRIVER"
 msgstr ""
 
@@ -725,7 +725,7 @@ msgid "Must supply container name for: "
 msgstr ""
 
 #: lxc/cluster.go:181 lxc/list.go:469 lxc/network.go:522 lxc/profile.go:573
-#: lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
+#: lxc/remote.go:409 lxc/storage.go:688 lxc/storage.go:806
 msgid "NAME"
 msgstr ""
 
@@ -882,7 +882,7 @@ msgstr ""
 msgid "Pid: %d"
 msgstr ""
 
-#: lxc/network.go:427 lxc/profile.go:276 lxc/storage.go:605 lxc/storage.go:980
+#: lxc/network.go:427 lxc/profile.go:276 lxc/storage.go:605 lxc/storage.go:994
 msgid "Press enter to open the editor again"
 msgstr ""
 
@@ -991,7 +991,7 @@ msgstr ""
 msgid "Remove %s (yes/no): "
 msgstr ""
 
-#: lxc/storage.go:1011
+#: lxc/storage.go:1025
 #, c-format
 msgid "Renamed storage volume from \"%s\" to \"%s\""
 msgstr ""
@@ -1025,11 +1025,11 @@ msgstr ""
 msgid "SNAPSHOTS"
 msgstr ""
 
-#: lxc/storage.go:684
+#: lxc/storage.go:695
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/cluster.go:184 lxc/list.go:473
+#: lxc/cluster.go:184 lxc/list.go:473 lxc/storage.go:693
 msgid "STATE"
 msgstr ""
 
@@ -1160,12 +1160,12 @@ msgstr ""
 msgid "Storage pool name"
 msgstr ""
 
-#: lxc/storage.go:826
+#: lxc/storage.go:840
 #, c-format
 msgid "Storage volume %s created"
 msgstr ""
 
-#: lxc/storage.go:841
+#: lxc/storage.go:855
 #, c-format
 msgid "Storage volume %s deleted"
 msgstr ""
@@ -1182,7 +1182,7 @@ msgstr ""
 msgid "Swap (peak)"
 msgstr ""
 
-#: lxc/list.go:474 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:791
+#: lxc/list.go:474 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:805
 msgid "TYPE"
 msgstr ""
 
@@ -1297,7 +1297,7 @@ msgstr ""
 msgid "URL"
 msgstr ""
 
-#: lxc/network.go:526 lxc/profile.go:574 lxc/storage.go:685 lxc/storage.go:794
+#: lxc/network.go:526 lxc/profile.go:574 lxc/storage.go:697 lxc/storage.go:808
 msgid "USED BY"
 msgstr ""
 
diff --git a/po/it.po b/po/it.po
index 86c034965..3a3bd30be 100644
--- a/po/it.po
+++ b/po/it.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: lxd\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2017-12-04 09:00+0000\n"
+"POT-Creation-Date: 2017-12-06 11:48+0000\n"
 "PO-Revision-Date: 2017-08-18 14:22+0000\n"
 "Last-Translator: Alberto Donato <alberto.donato at gmail.com>\n"
 "Language-Team: Italian <https://hosted.weblate.org/projects/linux-containers/"
@@ -339,7 +339,7 @@ msgid "Config key/value to apply to the new container"
 msgstr ""
 
 #: lxc/config.go:816 lxc/config.go:881 lxc/config.go:1331 lxc/image.go:1190
-#: lxc/network.go:426 lxc/profile.go:275 lxc/storage.go:604 lxc/storage.go:979
+#: lxc/network.go:426 lxc/profile.go:275 lxc/storage.go:604 lxc/storage.go:993
 #, c-format
 msgid "Config parsing error: %s"
 msgstr ""
@@ -410,11 +410,11 @@ msgid "DATABASE"
 msgstr ""
 
 #: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:467 lxc/network.go:525
-#: lxc/storage.go:682 lxc/storage.go:793
+#: lxc/storage.go:689 lxc/storage.go:807
 msgid "DESCRIPTION"
 msgstr "DESCRIZIONE"
 
-#: lxc/storage.go:683
+#: lxc/storage.go:690
 msgid "DRIVER"
 msgstr "DRIVER"
 
@@ -751,7 +751,7 @@ msgid "Must supply container name for: "
 msgstr ""
 
 #: lxc/cluster.go:181 lxc/list.go:469 lxc/network.go:522 lxc/profile.go:573
-#: lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
+#: lxc/remote.go:409 lxc/storage.go:688 lxc/storage.go:806
 msgid "NAME"
 msgstr ""
 
@@ -908,7 +908,7 @@ msgstr ""
 msgid "Pid: %d"
 msgstr ""
 
-#: lxc/network.go:427 lxc/profile.go:276 lxc/storage.go:605 lxc/storage.go:980
+#: lxc/network.go:427 lxc/profile.go:276 lxc/storage.go:605 lxc/storage.go:994
 msgid "Press enter to open the editor again"
 msgstr ""
 
@@ -1017,7 +1017,7 @@ msgstr ""
 msgid "Remove %s (yes/no): "
 msgstr ""
 
-#: lxc/storage.go:1011
+#: lxc/storage.go:1025
 #, c-format
 msgid "Renamed storage volume from \"%s\" to \"%s\""
 msgstr ""
@@ -1051,11 +1051,11 @@ msgstr ""
 msgid "SNAPSHOTS"
 msgstr ""
 
-#: lxc/storage.go:684
+#: lxc/storage.go:695
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/cluster.go:184 lxc/list.go:473
+#: lxc/cluster.go:184 lxc/list.go:473 lxc/storage.go:693
 msgid "STATE"
 msgstr ""
 
@@ -1186,12 +1186,12 @@ msgstr ""
 msgid "Storage pool name"
 msgstr ""
 
-#: lxc/storage.go:826
+#: lxc/storage.go:840
 #, c-format
 msgid "Storage volume %s created"
 msgstr ""
 
-#: lxc/storage.go:841
+#: lxc/storage.go:855
 #, c-format
 msgid "Storage volume %s deleted"
 msgstr ""
@@ -1208,7 +1208,7 @@ msgstr ""
 msgid "Swap (peak)"
 msgstr ""
 
-#: lxc/list.go:474 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:791
+#: lxc/list.go:474 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:805
 msgid "TYPE"
 msgstr ""
 
@@ -1323,7 +1323,7 @@ msgstr ""
 msgid "URL"
 msgstr ""
 
-#: lxc/network.go:526 lxc/profile.go:574 lxc/storage.go:685 lxc/storage.go:794
+#: lxc/network.go:526 lxc/profile.go:574 lxc/storage.go:697 lxc/storage.go:808
 msgid "USED BY"
 msgstr ""
 
diff --git a/po/ja.po b/po/ja.po
index dc5b4740f..f21046b15 100644
--- a/po/ja.po
+++ b/po/ja.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: LXD\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2017-12-04 09:00+0000\n"
+"POT-Creation-Date: 2017-12-06 11:48+0000\n"
 "PO-Revision-Date: 2017-09-28 20:29+0000\n"
 "Last-Translator: KATOH Yasufumi <karma at jazz.email.ne.jp>\n"
 "Language-Team: Japanese <https://hosted.weblate.org/projects/linux-"
@@ -320,7 +320,7 @@ msgid "Config key/value to apply to the new container"
 msgstr "新しいコンテナに適用するキー/値の設定"
 
 #: lxc/config.go:816 lxc/config.go:881 lxc/config.go:1331 lxc/image.go:1190
-#: lxc/network.go:426 lxc/profile.go:275 lxc/storage.go:604 lxc/storage.go:979
+#: lxc/network.go:426 lxc/profile.go:275 lxc/storage.go:604 lxc/storage.go:993
 #, c-format
 msgid "Config parsing error: %s"
 msgstr "設定の構文エラー: %s"
@@ -391,11 +391,11 @@ msgid "DATABASE"
 msgstr ""
 
 #: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:467 lxc/network.go:525
-#: lxc/storage.go:682 lxc/storage.go:793
+#: lxc/storage.go:689 lxc/storage.go:807
 msgid "DESCRIPTION"
 msgstr ""
 
-#: lxc/storage.go:683
+#: lxc/storage.go:690
 msgid "DRIVER"
 msgstr ""
 
@@ -736,7 +736,7 @@ msgid "Must supply container name for: "
 msgstr "コンテナ名を指定する必要があります: "
 
 #: lxc/cluster.go:181 lxc/list.go:469 lxc/network.go:522 lxc/profile.go:573
-#: lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
+#: lxc/remote.go:409 lxc/storage.go:688 lxc/storage.go:806
 msgid "NAME"
 msgstr ""
 
@@ -894,7 +894,7 @@ msgstr "アクセスが拒否されました。lxd グループに所属して
 msgid "Pid: %d"
 msgstr "Pid: %d"
 
-#: lxc/network.go:427 lxc/profile.go:276 lxc/storage.go:605 lxc/storage.go:980
+#: lxc/network.go:427 lxc/profile.go:276 lxc/storage.go:605 lxc/storage.go:994
 msgid "Press enter to open the editor again"
 msgstr "再度エディタを開くためには Enter キーを押します"
 
@@ -1003,7 +1003,7 @@ msgstr "リモート名: %s"
 msgid "Remove %s (yes/no): "
 msgstr "%s を消去しますか (yes/no): "
 
-#: lxc/storage.go:1011
+#: lxc/storage.go:1025
 #, c-format
 msgid "Renamed storage volume from \"%s\" to \"%s\""
 msgstr ""
@@ -1038,11 +1038,11 @@ msgstr ""
 msgid "SNAPSHOTS"
 msgstr ""
 
-#: lxc/storage.go:684
+#: lxc/storage.go:695
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/cluster.go:184 lxc/list.go:473
+#: lxc/cluster.go:184 lxc/list.go:473 lxc/storage.go:693
 msgid "STATE"
 msgstr ""
 
@@ -1174,12 +1174,12 @@ msgstr "ストレージプール %s を削除しました"
 msgid "Storage pool name"
 msgstr "ストレージプール名"
 
-#: lxc/storage.go:826
+#: lxc/storage.go:840
 #, c-format
 msgid "Storage volume %s created"
 msgstr "ストレージボリューム %s を作成しました"
 
-#: lxc/storage.go:841
+#: lxc/storage.go:855
 #, c-format
 msgid "Storage volume %s deleted"
 msgstr "ストレージボリューム %s を削除しました"
@@ -1196,7 +1196,7 @@ msgstr "Swap (現在値)"
 msgid "Swap (peak)"
 msgstr "Swap (ピーク)"
 
-#: lxc/list.go:474 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:791
+#: lxc/list.go:474 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:805
 msgid "TYPE"
 msgstr ""
 
@@ -1324,7 +1324,7 @@ msgstr ""
 msgid "URL"
 msgstr ""
 
-#: lxc/network.go:526 lxc/profile.go:574 lxc/storage.go:685 lxc/storage.go:794
+#: lxc/network.go:526 lxc/profile.go:574 lxc/storage.go:697 lxc/storage.go:808
 msgid "USED BY"
 msgstr ""
 
diff --git a/po/lxd.pot b/po/lxd.pot
index 35717c06f..639df2a73 100644
--- a/po/lxd.pot
+++ b/po/lxd.pot
@@ -7,7 +7,7 @@
 msgid   ""
 msgstr  "Project-Id-Version: lxd\n"
         "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-        "POT-Creation-Date: 2017-12-04 09:00+0000\n"
+        "POT-Creation-Date: 2017-12-06 11:57+0000\n"
         "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
         "Last-Translator: FULL NAME <EMAIL at ADDRESS>\n"
         "Language-Team: LANGUAGE <LL at li.org>\n"
@@ -306,7 +306,7 @@ msgstr  ""
 msgid   "Config key/value to apply to the new container"
 msgstr  ""
 
-#: lxc/config.go:816 lxc/config.go:881 lxc/config.go:1331 lxc/image.go:1190 lxc/network.go:426 lxc/profile.go:275 lxc/storage.go:604 lxc/storage.go:979
+#: lxc/config.go:816 lxc/config.go:881 lxc/config.go:1331 lxc/image.go:1190 lxc/network.go:426 lxc/profile.go:275 lxc/storage.go:604 lxc/storage.go:993
 #, c-format
 msgid   "Config parsing error: %s"
 msgstr  ""
@@ -376,11 +376,11 @@ msgstr  ""
 msgid   "DATABASE"
 msgstr  ""
 
-#: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:467 lxc/network.go:525 lxc/storage.go:682 lxc/storage.go:793
+#: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:467 lxc/network.go:525 lxc/storage.go:689 lxc/storage.go:807
 msgid   "DESCRIPTION"
 msgstr  ""
 
-#: lxc/storage.go:683
+#: lxc/storage.go:690
 msgid   "DRIVER"
 msgstr  ""
 
@@ -715,7 +715,7 @@ msgstr  ""
 msgid   "Must supply container name for: "
 msgstr  ""
 
-#: lxc/cluster.go:181 lxc/list.go:469 lxc/network.go:522 lxc/profile.go:573 lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
+#: lxc/cluster.go:181 lxc/list.go:469 lxc/network.go:522 lxc/profile.go:573 lxc/remote.go:409 lxc/storage.go:688 lxc/storage.go:806
 msgid   "NAME"
 msgstr  ""
 
@@ -872,7 +872,7 @@ msgstr  ""
 msgid   "Pid: %d"
 msgstr  ""
 
-#: lxc/network.go:427 lxc/profile.go:276 lxc/storage.go:605 lxc/storage.go:980
+#: lxc/network.go:427 lxc/profile.go:276 lxc/storage.go:605 lxc/storage.go:994
 msgid   "Press enter to open the editor again"
 msgstr  ""
 
@@ -980,7 +980,7 @@ msgstr  ""
 msgid   "Remove %s (yes/no): "
 msgstr  ""
 
-#: lxc/storage.go:1011
+#: lxc/storage.go:1025
 #, c-format
 msgid   "Renamed storage volume from \"%s\" to \"%s\""
 msgstr  ""
@@ -1014,11 +1014,11 @@ msgstr  ""
 msgid   "SNAPSHOTS"
 msgstr  ""
 
-#: lxc/storage.go:684
+#: lxc/storage.go:695
 msgid   "SOURCE"
 msgstr  ""
 
-#: lxc/cluster.go:184 lxc/list.go:473
+#: lxc/cluster.go:184 lxc/list.go:473 lxc/storage.go:693
 msgid   "STATE"
 msgstr  ""
 
@@ -1149,12 +1149,12 @@ msgstr  ""
 msgid   "Storage pool name"
 msgstr  ""
 
-#: lxc/storage.go:826
+#: lxc/storage.go:840
 #, c-format
 msgid   "Storage volume %s created"
 msgstr  ""
 
-#: lxc/storage.go:841
+#: lxc/storage.go:855
 #, c-format
 msgid   "Storage volume %s deleted"
 msgstr  ""
@@ -1171,7 +1171,7 @@ msgstr  ""
 msgid   "Swap (peak)"
 msgstr  ""
 
-#: lxc/list.go:474 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:791
+#: lxc/list.go:474 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:805
 msgid   "TYPE"
 msgstr  ""
 
@@ -1282,7 +1282,7 @@ msgstr  ""
 msgid   "URL"
 msgstr  ""
 
-#: lxc/network.go:526 lxc/profile.go:574 lxc/storage.go:685 lxc/storage.go:794
+#: lxc/network.go:526 lxc/profile.go:574 lxc/storage.go:697 lxc/storage.go:808
 msgid   "USED BY"
 msgstr  ""
 
diff --git a/po/nb_NO.po b/po/nb_NO.po
index ef5376b0e..a0d8b2a87 100644
--- a/po/nb_NO.po
+++ b/po/nb_NO.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: lxd\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2017-12-04 09:00+0000\n"
+"POT-Creation-Date: 2017-12-06 11:48+0000\n"
 "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
 "Last-Translator: Automatically generated\n"
 "Language-Team: none\n"
@@ -314,7 +314,7 @@ msgid "Config key/value to apply to the new container"
 msgstr ""
 
 #: lxc/config.go:816 lxc/config.go:881 lxc/config.go:1331 lxc/image.go:1190
-#: lxc/network.go:426 lxc/profile.go:275 lxc/storage.go:604 lxc/storage.go:979
+#: lxc/network.go:426 lxc/profile.go:275 lxc/storage.go:604 lxc/storage.go:993
 #, c-format
 msgid "Config parsing error: %s"
 msgstr ""
@@ -385,11 +385,11 @@ msgid "DATABASE"
 msgstr ""
 
 #: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:467 lxc/network.go:525
-#: lxc/storage.go:682 lxc/storage.go:793
+#: lxc/storage.go:689 lxc/storage.go:807
 msgid "DESCRIPTION"
 msgstr ""
 
-#: lxc/storage.go:683
+#: lxc/storage.go:690
 msgid "DRIVER"
 msgstr ""
 
@@ -725,7 +725,7 @@ msgid "Must supply container name for: "
 msgstr ""
 
 #: lxc/cluster.go:181 lxc/list.go:469 lxc/network.go:522 lxc/profile.go:573
-#: lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
+#: lxc/remote.go:409 lxc/storage.go:688 lxc/storage.go:806
 msgid "NAME"
 msgstr ""
 
@@ -882,7 +882,7 @@ msgstr ""
 msgid "Pid: %d"
 msgstr ""
 
-#: lxc/network.go:427 lxc/profile.go:276 lxc/storage.go:605 lxc/storage.go:980
+#: lxc/network.go:427 lxc/profile.go:276 lxc/storage.go:605 lxc/storage.go:994
 msgid "Press enter to open the editor again"
 msgstr ""
 
@@ -991,7 +991,7 @@ msgstr ""
 msgid "Remove %s (yes/no): "
 msgstr ""
 
-#: lxc/storage.go:1011
+#: lxc/storage.go:1025
 #, c-format
 msgid "Renamed storage volume from \"%s\" to \"%s\""
 msgstr ""
@@ -1025,11 +1025,11 @@ msgstr ""
 msgid "SNAPSHOTS"
 msgstr ""
 
-#: lxc/storage.go:684
+#: lxc/storage.go:695
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/cluster.go:184 lxc/list.go:473
+#: lxc/cluster.go:184 lxc/list.go:473 lxc/storage.go:693
 msgid "STATE"
 msgstr ""
 
@@ -1160,12 +1160,12 @@ msgstr ""
 msgid "Storage pool name"
 msgstr ""
 
-#: lxc/storage.go:826
+#: lxc/storage.go:840
 #, c-format
 msgid "Storage volume %s created"
 msgstr ""
 
-#: lxc/storage.go:841
+#: lxc/storage.go:855
 #, c-format
 msgid "Storage volume %s deleted"
 msgstr ""
@@ -1182,7 +1182,7 @@ msgstr ""
 msgid "Swap (peak)"
 msgstr ""
 
-#: lxc/list.go:474 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:791
+#: lxc/list.go:474 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:805
 msgid "TYPE"
 msgstr ""
 
@@ -1297,7 +1297,7 @@ msgstr ""
 msgid "URL"
 msgstr ""
 
-#: lxc/network.go:526 lxc/profile.go:574 lxc/storage.go:685 lxc/storage.go:794
+#: lxc/network.go:526 lxc/profile.go:574 lxc/storage.go:697 lxc/storage.go:808
 msgid "USED BY"
 msgstr ""
 
diff --git a/po/nl.po b/po/nl.po
index 06673fd9a..da4091f7d 100644
--- a/po/nl.po
+++ b/po/nl.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: lxd\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2017-12-04 09:00+0000\n"
+"POT-Creation-Date: 2017-12-06 11:48+0000\n"
 "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
 "Last-Translator: Automatically generated\n"
 "Language-Team: none\n"
@@ -314,7 +314,7 @@ msgid "Config key/value to apply to the new container"
 msgstr ""
 
 #: lxc/config.go:816 lxc/config.go:881 lxc/config.go:1331 lxc/image.go:1190
-#: lxc/network.go:426 lxc/profile.go:275 lxc/storage.go:604 lxc/storage.go:979
+#: lxc/network.go:426 lxc/profile.go:275 lxc/storage.go:604 lxc/storage.go:993
 #, c-format
 msgid "Config parsing error: %s"
 msgstr ""
@@ -385,11 +385,11 @@ msgid "DATABASE"
 msgstr ""
 
 #: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:467 lxc/network.go:525
-#: lxc/storage.go:682 lxc/storage.go:793
+#: lxc/storage.go:689 lxc/storage.go:807
 msgid "DESCRIPTION"
 msgstr ""
 
-#: lxc/storage.go:683
+#: lxc/storage.go:690
 msgid "DRIVER"
 msgstr ""
 
@@ -725,7 +725,7 @@ msgid "Must supply container name for: "
 msgstr ""
 
 #: lxc/cluster.go:181 lxc/list.go:469 lxc/network.go:522 lxc/profile.go:573
-#: lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
+#: lxc/remote.go:409 lxc/storage.go:688 lxc/storage.go:806
 msgid "NAME"
 msgstr ""
 
@@ -882,7 +882,7 @@ msgstr ""
 msgid "Pid: %d"
 msgstr ""
 
-#: lxc/network.go:427 lxc/profile.go:276 lxc/storage.go:605 lxc/storage.go:980
+#: lxc/network.go:427 lxc/profile.go:276 lxc/storage.go:605 lxc/storage.go:994
 msgid "Press enter to open the editor again"
 msgstr ""
 
@@ -991,7 +991,7 @@ msgstr ""
 msgid "Remove %s (yes/no): "
 msgstr ""
 
-#: lxc/storage.go:1011
+#: lxc/storage.go:1025
 #, c-format
 msgid "Renamed storage volume from \"%s\" to \"%s\""
 msgstr ""
@@ -1025,11 +1025,11 @@ msgstr ""
 msgid "SNAPSHOTS"
 msgstr ""
 
-#: lxc/storage.go:684
+#: lxc/storage.go:695
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/cluster.go:184 lxc/list.go:473
+#: lxc/cluster.go:184 lxc/list.go:473 lxc/storage.go:693
 msgid "STATE"
 msgstr ""
 
@@ -1160,12 +1160,12 @@ msgstr ""
 msgid "Storage pool name"
 msgstr ""
 
-#: lxc/storage.go:826
+#: lxc/storage.go:840
 #, c-format
 msgid "Storage volume %s created"
 msgstr ""
 
-#: lxc/storage.go:841
+#: lxc/storage.go:855
 #, c-format
 msgid "Storage volume %s deleted"
 msgstr ""
@@ -1182,7 +1182,7 @@ msgstr ""
 msgid "Swap (peak)"
 msgstr ""
 
-#: lxc/list.go:474 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:791
+#: lxc/list.go:474 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:805
 msgid "TYPE"
 msgstr ""
 
@@ -1297,7 +1297,7 @@ msgstr ""
 msgid "URL"
 msgstr ""
 
-#: lxc/network.go:526 lxc/profile.go:574 lxc/storage.go:685 lxc/storage.go:794
+#: lxc/network.go:526 lxc/profile.go:574 lxc/storage.go:697 lxc/storage.go:808
 msgid "USED BY"
 msgstr ""
 
diff --git a/po/pt_BR.po b/po/pt_BR.po
index 82f511ab5..7548e6757 100644
--- a/po/pt_BR.po
+++ b/po/pt_BR.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: lxd\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2017-12-04 09:00+0000\n"
+"POT-Creation-Date: 2017-12-06 11:48+0000\n"
 "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
 "Last-Translator: Automatically generated\n"
 "Language-Team: none\n"
@@ -314,7 +314,7 @@ msgid "Config key/value to apply to the new container"
 msgstr ""
 
 #: lxc/config.go:816 lxc/config.go:881 lxc/config.go:1331 lxc/image.go:1190
-#: lxc/network.go:426 lxc/profile.go:275 lxc/storage.go:604 lxc/storage.go:979
+#: lxc/network.go:426 lxc/profile.go:275 lxc/storage.go:604 lxc/storage.go:993
 #, c-format
 msgid "Config parsing error: %s"
 msgstr ""
@@ -385,11 +385,11 @@ msgid "DATABASE"
 msgstr ""
 
 #: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:467 lxc/network.go:525
-#: lxc/storage.go:682 lxc/storage.go:793
+#: lxc/storage.go:689 lxc/storage.go:807
 msgid "DESCRIPTION"
 msgstr ""
 
-#: lxc/storage.go:683
+#: lxc/storage.go:690
 msgid "DRIVER"
 msgstr ""
 
@@ -725,7 +725,7 @@ msgid "Must supply container name for: "
 msgstr ""
 
 #: lxc/cluster.go:181 lxc/list.go:469 lxc/network.go:522 lxc/profile.go:573
-#: lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
+#: lxc/remote.go:409 lxc/storage.go:688 lxc/storage.go:806
 msgid "NAME"
 msgstr ""
 
@@ -882,7 +882,7 @@ msgstr ""
 msgid "Pid: %d"
 msgstr ""
 
-#: lxc/network.go:427 lxc/profile.go:276 lxc/storage.go:605 lxc/storage.go:980
+#: lxc/network.go:427 lxc/profile.go:276 lxc/storage.go:605 lxc/storage.go:994
 msgid "Press enter to open the editor again"
 msgstr ""
 
@@ -991,7 +991,7 @@ msgstr ""
 msgid "Remove %s (yes/no): "
 msgstr ""
 
-#: lxc/storage.go:1011
+#: lxc/storage.go:1025
 #, c-format
 msgid "Renamed storage volume from \"%s\" to \"%s\""
 msgstr ""
@@ -1025,11 +1025,11 @@ msgstr ""
 msgid "SNAPSHOTS"
 msgstr ""
 
-#: lxc/storage.go:684
+#: lxc/storage.go:695
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/cluster.go:184 lxc/list.go:473
+#: lxc/cluster.go:184 lxc/list.go:473 lxc/storage.go:693
 msgid "STATE"
 msgstr ""
 
@@ -1160,12 +1160,12 @@ msgstr ""
 msgid "Storage pool name"
 msgstr ""
 
-#: lxc/storage.go:826
+#: lxc/storage.go:840
 #, c-format
 msgid "Storage volume %s created"
 msgstr ""
 
-#: lxc/storage.go:841
+#: lxc/storage.go:855
 #, c-format
 msgid "Storage volume %s deleted"
 msgstr ""
@@ -1182,7 +1182,7 @@ msgstr ""
 msgid "Swap (peak)"
 msgstr ""
 
-#: lxc/list.go:474 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:791
+#: lxc/list.go:474 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:805
 msgid "TYPE"
 msgstr ""
 
@@ -1297,7 +1297,7 @@ msgstr ""
 msgid "URL"
 msgstr ""
 
-#: lxc/network.go:526 lxc/profile.go:574 lxc/storage.go:685 lxc/storage.go:794
+#: lxc/network.go:526 lxc/profile.go:574 lxc/storage.go:697 lxc/storage.go:808
 msgid "USED BY"
 msgstr ""
 
diff --git a/po/ru.po b/po/ru.po
index 9210abcba..f40ef67b8 100644
--- a/po/ru.po
+++ b/po/ru.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: lxd\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2017-12-04 09:00+0000\n"
+"POT-Creation-Date: 2017-12-06 11:48+0000\n"
 "PO-Revision-Date: 2017-09-05 16:48+0000\n"
 "Last-Translator: Ilya Yakimavets <ilya.yakimavets at backend.expert>\n"
 "Language-Team: Russian <https://hosted.weblate.org/projects/linux-containers/"
@@ -402,7 +402,7 @@ msgid "Config key/value to apply to the new container"
 msgstr ""
 
 #: lxc/config.go:816 lxc/config.go:881 lxc/config.go:1331 lxc/image.go:1190
-#: lxc/network.go:426 lxc/profile.go:275 lxc/storage.go:604 lxc/storage.go:979
+#: lxc/network.go:426 lxc/profile.go:275 lxc/storage.go:604 lxc/storage.go:993
 #, c-format
 msgid "Config parsing error: %s"
 msgstr ""
@@ -473,11 +473,11 @@ msgid "DATABASE"
 msgstr ""
 
 #: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:467 lxc/network.go:525
-#: lxc/storage.go:682 lxc/storage.go:793
+#: lxc/storage.go:689 lxc/storage.go:807
 msgid "DESCRIPTION"
 msgstr ""
 
-#: lxc/storage.go:683
+#: lxc/storage.go:690
 msgid "DRIVER"
 msgstr ""
 
@@ -815,7 +815,7 @@ msgid "Must supply container name for: "
 msgstr ""
 
 #: lxc/cluster.go:181 lxc/list.go:469 lxc/network.go:522 lxc/profile.go:573
-#: lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
+#: lxc/remote.go:409 lxc/storage.go:688 lxc/storage.go:806
 msgid "NAME"
 msgstr ""
 
@@ -973,7 +973,7 @@ msgstr ""
 msgid "Pid: %d"
 msgstr ""
 
-#: lxc/network.go:427 lxc/profile.go:276 lxc/storage.go:605 lxc/storage.go:980
+#: lxc/network.go:427 lxc/profile.go:276 lxc/storage.go:605 lxc/storage.go:994
 msgid "Press enter to open the editor again"
 msgstr ""
 
@@ -1082,7 +1082,7 @@ msgstr ""
 msgid "Remove %s (yes/no): "
 msgstr ""
 
-#: lxc/storage.go:1011
+#: lxc/storage.go:1025
 #, c-format
 msgid "Renamed storage volume from \"%s\" to \"%s\""
 msgstr ""
@@ -1116,11 +1116,11 @@ msgstr ""
 msgid "SNAPSHOTS"
 msgstr ""
 
-#: lxc/storage.go:684
+#: lxc/storage.go:695
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/cluster.go:184 lxc/list.go:473
+#: lxc/cluster.go:184 lxc/list.go:473 lxc/storage.go:693
 msgid "STATE"
 msgstr ""
 
@@ -1251,12 +1251,12 @@ msgstr ""
 msgid "Storage pool name"
 msgstr ""
 
-#: lxc/storage.go:826
+#: lxc/storage.go:840
 #, c-format
 msgid "Storage volume %s created"
 msgstr ""
 
-#: lxc/storage.go:841
+#: lxc/storage.go:855
 #, c-format
 msgid "Storage volume %s deleted"
 msgstr ""
@@ -1273,7 +1273,7 @@ msgstr ""
 msgid "Swap (peak)"
 msgstr ""
 
-#: lxc/list.go:474 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:791
+#: lxc/list.go:474 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:805
 msgid "TYPE"
 msgstr ""
 
@@ -1388,7 +1388,7 @@ msgstr ""
 msgid "URL"
 msgstr ""
 
-#: lxc/network.go:526 lxc/profile.go:574 lxc/storage.go:685 lxc/storage.go:794
+#: lxc/network.go:526 lxc/profile.go:574 lxc/storage.go:697 lxc/storage.go:808
 msgid "USED BY"
 msgstr ""
 
diff --git a/po/sr.po b/po/sr.po
index bb124ee16..cd56a45dc 100644
--- a/po/sr.po
+++ b/po/sr.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: lxd\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2017-12-04 09:00+0000\n"
+"POT-Creation-Date: 2017-12-06 11:48+0000\n"
 "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
 "Last-Translator: Automatically generated\n"
 "Language-Team: none\n"
@@ -314,7 +314,7 @@ msgid "Config key/value to apply to the new container"
 msgstr ""
 
 #: lxc/config.go:816 lxc/config.go:881 lxc/config.go:1331 lxc/image.go:1190
-#: lxc/network.go:426 lxc/profile.go:275 lxc/storage.go:604 lxc/storage.go:979
+#: lxc/network.go:426 lxc/profile.go:275 lxc/storage.go:604 lxc/storage.go:993
 #, c-format
 msgid "Config parsing error: %s"
 msgstr ""
@@ -385,11 +385,11 @@ msgid "DATABASE"
 msgstr ""
 
 #: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:467 lxc/network.go:525
-#: lxc/storage.go:682 lxc/storage.go:793
+#: lxc/storage.go:689 lxc/storage.go:807
 msgid "DESCRIPTION"
 msgstr ""
 
-#: lxc/storage.go:683
+#: lxc/storage.go:690
 msgid "DRIVER"
 msgstr ""
 
@@ -725,7 +725,7 @@ msgid "Must supply container name for: "
 msgstr ""
 
 #: lxc/cluster.go:181 lxc/list.go:469 lxc/network.go:522 lxc/profile.go:573
-#: lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
+#: lxc/remote.go:409 lxc/storage.go:688 lxc/storage.go:806
 msgid "NAME"
 msgstr ""
 
@@ -882,7 +882,7 @@ msgstr ""
 msgid "Pid: %d"
 msgstr ""
 
-#: lxc/network.go:427 lxc/profile.go:276 lxc/storage.go:605 lxc/storage.go:980
+#: lxc/network.go:427 lxc/profile.go:276 lxc/storage.go:605 lxc/storage.go:994
 msgid "Press enter to open the editor again"
 msgstr ""
 
@@ -991,7 +991,7 @@ msgstr ""
 msgid "Remove %s (yes/no): "
 msgstr ""
 
-#: lxc/storage.go:1011
+#: lxc/storage.go:1025
 #, c-format
 msgid "Renamed storage volume from \"%s\" to \"%s\""
 msgstr ""
@@ -1025,11 +1025,11 @@ msgstr ""
 msgid "SNAPSHOTS"
 msgstr ""
 
-#: lxc/storage.go:684
+#: lxc/storage.go:695
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/cluster.go:184 lxc/list.go:473
+#: lxc/cluster.go:184 lxc/list.go:473 lxc/storage.go:693
 msgid "STATE"
 msgstr ""
 
@@ -1160,12 +1160,12 @@ msgstr ""
 msgid "Storage pool name"
 msgstr ""
 
-#: lxc/storage.go:826
+#: lxc/storage.go:840
 #, c-format
 msgid "Storage volume %s created"
 msgstr ""
 
-#: lxc/storage.go:841
+#: lxc/storage.go:855
 #, c-format
 msgid "Storage volume %s deleted"
 msgstr ""
@@ -1182,7 +1182,7 @@ msgstr ""
 msgid "Swap (peak)"
 msgstr ""
 
-#: lxc/list.go:474 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:791
+#: lxc/list.go:474 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:805
 msgid "TYPE"
 msgstr ""
 
@@ -1297,7 +1297,7 @@ msgstr ""
 msgid "URL"
 msgstr ""
 
-#: lxc/network.go:526 lxc/profile.go:574 lxc/storage.go:685 lxc/storage.go:794
+#: lxc/network.go:526 lxc/profile.go:574 lxc/storage.go:697 lxc/storage.go:808
 msgid "USED BY"
 msgstr ""
 
diff --git a/po/sv.po b/po/sv.po
index 81b4289d2..a0e2c97c7 100644
--- a/po/sv.po
+++ b/po/sv.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: lxd\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2017-12-04 09:00+0000\n"
+"POT-Creation-Date: 2017-12-06 11:48+0000\n"
 "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
 "Last-Translator: Automatically generated\n"
 "Language-Team: none\n"
@@ -314,7 +314,7 @@ msgid "Config key/value to apply to the new container"
 msgstr ""
 
 #: lxc/config.go:816 lxc/config.go:881 lxc/config.go:1331 lxc/image.go:1190
-#: lxc/network.go:426 lxc/profile.go:275 lxc/storage.go:604 lxc/storage.go:979
+#: lxc/network.go:426 lxc/profile.go:275 lxc/storage.go:604 lxc/storage.go:993
 #, c-format
 msgid "Config parsing error: %s"
 msgstr ""
@@ -385,11 +385,11 @@ msgid "DATABASE"
 msgstr ""
 
 #: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:467 lxc/network.go:525
-#: lxc/storage.go:682 lxc/storage.go:793
+#: lxc/storage.go:689 lxc/storage.go:807
 msgid "DESCRIPTION"
 msgstr ""
 
-#: lxc/storage.go:683
+#: lxc/storage.go:690
 msgid "DRIVER"
 msgstr ""
 
@@ -725,7 +725,7 @@ msgid "Must supply container name for: "
 msgstr ""
 
 #: lxc/cluster.go:181 lxc/list.go:469 lxc/network.go:522 lxc/profile.go:573
-#: lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
+#: lxc/remote.go:409 lxc/storage.go:688 lxc/storage.go:806
 msgid "NAME"
 msgstr ""
 
@@ -882,7 +882,7 @@ msgstr ""
 msgid "Pid: %d"
 msgstr ""
 
-#: lxc/network.go:427 lxc/profile.go:276 lxc/storage.go:605 lxc/storage.go:980
+#: lxc/network.go:427 lxc/profile.go:276 lxc/storage.go:605 lxc/storage.go:994
 msgid "Press enter to open the editor again"
 msgstr ""
 
@@ -991,7 +991,7 @@ msgstr ""
 msgid "Remove %s (yes/no): "
 msgstr ""
 
-#: lxc/storage.go:1011
+#: lxc/storage.go:1025
 #, c-format
 msgid "Renamed storage volume from \"%s\" to \"%s\""
 msgstr ""
@@ -1025,11 +1025,11 @@ msgstr ""
 msgid "SNAPSHOTS"
 msgstr ""
 
-#: lxc/storage.go:684
+#: lxc/storage.go:695
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/cluster.go:184 lxc/list.go:473
+#: lxc/cluster.go:184 lxc/list.go:473 lxc/storage.go:693
 msgid "STATE"
 msgstr ""
 
@@ -1160,12 +1160,12 @@ msgstr ""
 msgid "Storage pool name"
 msgstr ""
 
-#: lxc/storage.go:826
+#: lxc/storage.go:840
 #, c-format
 msgid "Storage volume %s created"
 msgstr ""
 
-#: lxc/storage.go:841
+#: lxc/storage.go:855
 #, c-format
 msgid "Storage volume %s deleted"
 msgstr ""
@@ -1182,7 +1182,7 @@ msgstr ""
 msgid "Swap (peak)"
 msgstr ""
 
-#: lxc/list.go:474 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:791
+#: lxc/list.go:474 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:805
 msgid "TYPE"
 msgstr ""
 
@@ -1297,7 +1297,7 @@ msgstr ""
 msgid "URL"
 msgstr ""
 
-#: lxc/network.go:526 lxc/profile.go:574 lxc/storage.go:685 lxc/storage.go:794
+#: lxc/network.go:526 lxc/profile.go:574 lxc/storage.go:697 lxc/storage.go:808
 msgid "USED BY"
 msgstr ""
 
diff --git a/po/tr.po b/po/tr.po
index dda48eabc..7c47f0d0e 100644
--- a/po/tr.po
+++ b/po/tr.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: lxd\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2017-12-04 09:00+0000\n"
+"POT-Creation-Date: 2017-12-06 11:48+0000\n"
 "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
 "Last-Translator: Automatically generated\n"
 "Language-Team: none\n"
@@ -314,7 +314,7 @@ msgid "Config key/value to apply to the new container"
 msgstr ""
 
 #: lxc/config.go:816 lxc/config.go:881 lxc/config.go:1331 lxc/image.go:1190
-#: lxc/network.go:426 lxc/profile.go:275 lxc/storage.go:604 lxc/storage.go:979
+#: lxc/network.go:426 lxc/profile.go:275 lxc/storage.go:604 lxc/storage.go:993
 #, c-format
 msgid "Config parsing error: %s"
 msgstr ""
@@ -385,11 +385,11 @@ msgid "DATABASE"
 msgstr ""
 
 #: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:467 lxc/network.go:525
-#: lxc/storage.go:682 lxc/storage.go:793
+#: lxc/storage.go:689 lxc/storage.go:807
 msgid "DESCRIPTION"
 msgstr ""
 
-#: lxc/storage.go:683
+#: lxc/storage.go:690
 msgid "DRIVER"
 msgstr ""
 
@@ -725,7 +725,7 @@ msgid "Must supply container name for: "
 msgstr ""
 
 #: lxc/cluster.go:181 lxc/list.go:469 lxc/network.go:522 lxc/profile.go:573
-#: lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
+#: lxc/remote.go:409 lxc/storage.go:688 lxc/storage.go:806
 msgid "NAME"
 msgstr ""
 
@@ -882,7 +882,7 @@ msgstr ""
 msgid "Pid: %d"
 msgstr ""
 
-#: lxc/network.go:427 lxc/profile.go:276 lxc/storage.go:605 lxc/storage.go:980
+#: lxc/network.go:427 lxc/profile.go:276 lxc/storage.go:605 lxc/storage.go:994
 msgid "Press enter to open the editor again"
 msgstr ""
 
@@ -991,7 +991,7 @@ msgstr ""
 msgid "Remove %s (yes/no): "
 msgstr ""
 
-#: lxc/storage.go:1011
+#: lxc/storage.go:1025
 #, c-format
 msgid "Renamed storage volume from \"%s\" to \"%s\""
 msgstr ""
@@ -1025,11 +1025,11 @@ msgstr ""
 msgid "SNAPSHOTS"
 msgstr ""
 
-#: lxc/storage.go:684
+#: lxc/storage.go:695
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/cluster.go:184 lxc/list.go:473
+#: lxc/cluster.go:184 lxc/list.go:473 lxc/storage.go:693
 msgid "STATE"
 msgstr ""
 
@@ -1160,12 +1160,12 @@ msgstr ""
 msgid "Storage pool name"
 msgstr ""
 
-#: lxc/storage.go:826
+#: lxc/storage.go:840
 #, c-format
 msgid "Storage volume %s created"
 msgstr ""
 
-#: lxc/storage.go:841
+#: lxc/storage.go:855
 #, c-format
 msgid "Storage volume %s deleted"
 msgstr ""
@@ -1182,7 +1182,7 @@ msgstr ""
 msgid "Swap (peak)"
 msgstr ""
 
-#: lxc/list.go:474 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:791
+#: lxc/list.go:474 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:805
 msgid "TYPE"
 msgstr ""
 
@@ -1297,7 +1297,7 @@ msgstr ""
 msgid "URL"
 msgstr ""
 
-#: lxc/network.go:526 lxc/profile.go:574 lxc/storage.go:685 lxc/storage.go:794
+#: lxc/network.go:526 lxc/profile.go:574 lxc/storage.go:697 lxc/storage.go:808
 msgid "USED BY"
 msgstr ""
 
diff --git a/po/zh.po b/po/zh.po
index 19c6b27f8..df3af9211 100644
--- a/po/zh.po
+++ b/po/zh.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: lxd\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2017-12-04 09:00+0000\n"
+"POT-Creation-Date: 2017-12-06 11:48+0000\n"
 "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
 "Last-Translator: Automatically generated\n"
 "Language-Team: none\n"
@@ -314,7 +314,7 @@ msgid "Config key/value to apply to the new container"
 msgstr ""
 
 #: lxc/config.go:816 lxc/config.go:881 lxc/config.go:1331 lxc/image.go:1190
-#: lxc/network.go:426 lxc/profile.go:275 lxc/storage.go:604 lxc/storage.go:979
+#: lxc/network.go:426 lxc/profile.go:275 lxc/storage.go:604 lxc/storage.go:993
 #, c-format
 msgid "Config parsing error: %s"
 msgstr ""
@@ -385,11 +385,11 @@ msgid "DATABASE"
 msgstr ""
 
 #: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:467 lxc/network.go:525
-#: lxc/storage.go:682 lxc/storage.go:793
+#: lxc/storage.go:689 lxc/storage.go:807
 msgid "DESCRIPTION"
 msgstr ""
 
-#: lxc/storage.go:683
+#: lxc/storage.go:690
 msgid "DRIVER"
 msgstr ""
 
@@ -725,7 +725,7 @@ msgid "Must supply container name for: "
 msgstr ""
 
 #: lxc/cluster.go:181 lxc/list.go:469 lxc/network.go:522 lxc/profile.go:573
-#: lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
+#: lxc/remote.go:409 lxc/storage.go:688 lxc/storage.go:806
 msgid "NAME"
 msgstr ""
 
@@ -882,7 +882,7 @@ msgstr ""
 msgid "Pid: %d"
 msgstr ""
 
-#: lxc/network.go:427 lxc/profile.go:276 lxc/storage.go:605 lxc/storage.go:980
+#: lxc/network.go:427 lxc/profile.go:276 lxc/storage.go:605 lxc/storage.go:994
 msgid "Press enter to open the editor again"
 msgstr ""
 
@@ -991,7 +991,7 @@ msgstr ""
 msgid "Remove %s (yes/no): "
 msgstr ""
 
-#: lxc/storage.go:1011
+#: lxc/storage.go:1025
 #, c-format
 msgid "Renamed storage volume from \"%s\" to \"%s\""
 msgstr ""
@@ -1025,11 +1025,11 @@ msgstr ""
 msgid "SNAPSHOTS"
 msgstr ""
 
-#: lxc/storage.go:684
+#: lxc/storage.go:695
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/cluster.go:184 lxc/list.go:473
+#: lxc/cluster.go:184 lxc/list.go:473 lxc/storage.go:693
 msgid "STATE"
 msgstr ""
 
@@ -1160,12 +1160,12 @@ msgstr ""
 msgid "Storage pool name"
 msgstr ""
 
-#: lxc/storage.go:826
+#: lxc/storage.go:840
 #, c-format
 msgid "Storage volume %s created"
 msgstr ""
 
-#: lxc/storage.go:841
+#: lxc/storage.go:855
 #, c-format
 msgid "Storage volume %s deleted"
 msgstr ""
@@ -1182,7 +1182,7 @@ msgstr ""
 msgid "Swap (peak)"
 msgstr ""
 
-#: lxc/list.go:474 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:791
+#: lxc/list.go:474 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:805
 msgid "TYPE"
 msgstr ""
 
@@ -1297,7 +1297,7 @@ msgstr ""
 msgid "URL"
 msgstr ""
 
-#: lxc/network.go:526 lxc/profile.go:574 lxc/storage.go:685 lxc/storage.go:794
+#: lxc/network.go:526 lxc/profile.go:574 lxc/storage.go:697 lxc/storage.go:808
 msgid "USED BY"
 msgstr ""
 
diff --git a/po/zh_Hans.po b/po/zh_Hans.po
index 86f400bb0..544abd00f 100644
--- a/po/zh_Hans.po
+++ b/po/zh_Hans.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: lxd\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2017-12-04 09:00+0000\n"
+"POT-Creation-Date: 2017-12-06 11:48+0000\n"
 "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
 "Last-Translator: Automatically generated\n"
 "Language-Team: none\n"
@@ -314,7 +314,7 @@ msgid "Config key/value to apply to the new container"
 msgstr ""
 
 #: lxc/config.go:816 lxc/config.go:881 lxc/config.go:1331 lxc/image.go:1190
-#: lxc/network.go:426 lxc/profile.go:275 lxc/storage.go:604 lxc/storage.go:979
+#: lxc/network.go:426 lxc/profile.go:275 lxc/storage.go:604 lxc/storage.go:993
 #, c-format
 msgid "Config parsing error: %s"
 msgstr ""
@@ -385,11 +385,11 @@ msgid "DATABASE"
 msgstr ""
 
 #: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:467 lxc/network.go:525
-#: lxc/storage.go:682 lxc/storage.go:793
+#: lxc/storage.go:689 lxc/storage.go:807
 msgid "DESCRIPTION"
 msgstr ""
 
-#: lxc/storage.go:683
+#: lxc/storage.go:690
 msgid "DRIVER"
 msgstr ""
 
@@ -725,7 +725,7 @@ msgid "Must supply container name for: "
 msgstr ""
 
 #: lxc/cluster.go:181 lxc/list.go:469 lxc/network.go:522 lxc/profile.go:573
-#: lxc/remote.go:409 lxc/storage.go:681 lxc/storage.go:792
+#: lxc/remote.go:409 lxc/storage.go:688 lxc/storage.go:806
 msgid "NAME"
 msgstr ""
 
@@ -882,7 +882,7 @@ msgstr ""
 msgid "Pid: %d"
 msgstr ""
 
-#: lxc/network.go:427 lxc/profile.go:276 lxc/storage.go:605 lxc/storage.go:980
+#: lxc/network.go:427 lxc/profile.go:276 lxc/storage.go:605 lxc/storage.go:994
 msgid "Press enter to open the editor again"
 msgstr ""
 
@@ -991,7 +991,7 @@ msgstr ""
 msgid "Remove %s (yes/no): "
 msgstr ""
 
-#: lxc/storage.go:1011
+#: lxc/storage.go:1025
 #, c-format
 msgid "Renamed storage volume from \"%s\" to \"%s\""
 msgstr ""
@@ -1025,11 +1025,11 @@ msgstr ""
 msgid "SNAPSHOTS"
 msgstr ""
 
-#: lxc/storage.go:684
+#: lxc/storage.go:695
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/cluster.go:184 lxc/list.go:473
+#: lxc/cluster.go:184 lxc/list.go:473 lxc/storage.go:693
 msgid "STATE"
 msgstr ""
 
@@ -1160,12 +1160,12 @@ msgstr ""
 msgid "Storage pool name"
 msgstr ""
 
-#: lxc/storage.go:826
+#: lxc/storage.go:840
 #, c-format
 msgid "Storage volume %s created"
 msgstr ""
 
-#: lxc/storage.go:841
+#: lxc/storage.go:855
 #, c-format
 msgid "Storage volume %s deleted"
 msgstr ""
@@ -1182,7 +1182,7 @@ msgstr ""
 msgid "Swap (peak)"
 msgstr ""
 
-#: lxc/list.go:474 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:791
+#: lxc/list.go:474 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:805
 msgid "TYPE"
 msgstr ""
 
@@ -1297,7 +1297,7 @@ msgstr ""
 msgid "URL"
 msgstr ""
 
-#: lxc/network.go:526 lxc/profile.go:574 lxc/storage.go:685 lxc/storage.go:794
+#: lxc/network.go:526 lxc/profile.go:574 lxc/storage.go:697 lxc/storage.go:808
 msgid "USED BY"
 msgstr ""
 
diff --git a/test/main.sh b/test/main.sh
index 364ff0339..c619ed4f2 100755
--- a/test/main.sh
+++ b/test/main.sh
@@ -195,6 +195,7 @@ run_test test_macaroon_auth "macaroon authentication"
 run_test test_console "console"
 run_test test_clustering_membership "clustering membership"
 run_test test_clustering_containers "clustering containers"
+run_test test_clustering_storage "clustering storage"
 
 # shellcheck disable=SC2034
 TEST_RESULT=success
diff --git a/test/suites/clustering.sh b/test/suites/clustering.sh
index 5c90dc980..3f9b3d167 100644
--- a/test/suites/clustering.sh
+++ b/test/suites/clustering.sh
@@ -167,3 +167,40 @@ test_clustering_containers() {
   teardown_clustering_netns
   teardown_clustering_bridge
 }
+
+test_clustering_storage() {
+  setup_clustering_bridge
+  prefix="lxd$$"
+  bridge="${prefix}"
+
+  setup_clustering_netns 1
+  LXD_ONE_DIR=$(mktemp -d -p "${TEST_DIR}" XXX)
+  chmod +x "${LXD_ONE_DIR}"
+  ns1="${prefix}1"
+  spawn_lxd_and_bootstrap_cluster "${ns1}" "${bridge}" "${LXD_ONE_DIR}"
+
+  # The state of the preseeded storage pool shows up as CREATED
+  LXD_DIR="${LXD_ONE_DIR}" lxc storage list | grep data | grep -q CREATED
+
+  # Add a newline at the end of each line. YAML as weird rules..
+  cert=$(sed ':a;N;$!ba;s/\n/\n\n/g' "${LXD_ONE_DIR}/server.crt")
+
+  # Spawn a second node
+  setup_clustering_netns 2
+  LXD_TWO_DIR=$(mktemp -d -p "${TEST_DIR}" XXX)
+  chmod +x "${LXD_TWO_DIR}"
+  ns2="${prefix}2"
+  spawn_lxd_and_join_cluster "${ns2}" "${bridge}" "${cert}" 2 1 "${LXD_TWO_DIR}"
+
+  # The state of the preseeded storage pool is still CREATED
+  LXD_DIR="${LXD_ONE_DIR}" lxc storage list | grep data | grep -q CREATED
+
+  LXD_DIR="${LXD_TWO_DIR}" lxd shutdown
+  LXD_DIR="${LXD_ONE_DIR}" lxd shutdown
+  sleep 2
+  rm -f "${LXD_TWO_DIR}/unix.socket"
+  rm -f "${LXD_ONE_DIR}/unix.socket"
+
+  teardown_clustering_netns
+  teardown_clustering_bridge
+}

From 11338a69baf2b822f3708e8cee82ae999ff29d4a Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Tue, 5 Dec 2017 17:20:15 +0000
Subject: [PATCH 109/116] Add --target command line option to lxc storage
 create

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxc/storage.go            | 16 +++++++++++--
 po/de.po                  | 58 ++++++++++++++++++++++++++---------------------
 po/el.po                  | 58 ++++++++++++++++++++++++++---------------------
 po/fr.po                  | 58 ++++++++++++++++++++++++++---------------------
 po/id.po                  | 58 ++++++++++++++++++++++++++---------------------
 po/it.po                  | 58 ++++++++++++++++++++++++++---------------------
 po/ja.po                  | 58 ++++++++++++++++++++++++++---------------------
 po/lxd.pot                | 57 +++++++++++++++++++++++++---------------------
 po/nb_NO.po               | 58 ++++++++++++++++++++++++++---------------------
 po/nl.po                  | 58 ++++++++++++++++++++++++++---------------------
 po/pt_BR.po               | 58 ++++++++++++++++++++++++++---------------------
 po/ru.po                  | 58 ++++++++++++++++++++++++++---------------------
 po/sr.po                  | 58 ++++++++++++++++++++++++++---------------------
 po/sv.po                  | 58 ++++++++++++++++++++++++++---------------------
 po/tr.po                  | 58 ++++++++++++++++++++++++++---------------------
 po/zh.po                  | 58 ++++++++++++++++++++++++++---------------------
 po/zh_Hans.po             | 58 ++++++++++++++++++++++++++---------------------
 test/suites/clustering.sh |  8 +++++++
 18 files changed, 533 insertions(+), 418 deletions(-)

diff --git a/lxc/storage.go b/lxc/storage.go
index 66e20c216..29e153bda 100644
--- a/lxc/storage.go
+++ b/lxc/storage.go
@@ -23,6 +23,7 @@ import (
 
 type storageCmd struct {
 	resources bool
+	target    string
 }
 
 func (c *storageCmd) showByDefault() bool {
@@ -73,7 +74,7 @@ lxc storage list [<remote>:]
 lxc storage show [<remote>:]<pool> [--resources]
     Show details of a storage pool.
 
-lxc storage create [<remote>:]<pool> <driver> [key=value]...
+lxc storage create [<remote>:]<pool> <driver> [key=value]... [--target <node>]
     Create a storage pool.
 
 lxc storage get [<remote>:]<pool> <key>
@@ -149,6 +150,7 @@ lxc storage volume show default container/data
 
 func (c *storageCmd) flags() {
 	gnuflag.BoolVar(&c.resources, "resources", false, i18n.G("Show the resources available to the storage pool"))
+	gnuflag.StringVar(&c.target, "target", "", i18n.G("Node name"))
 }
 
 func (c *storageCmd) run(conf *config.Config, args []string) error {
@@ -487,13 +489,23 @@ func (c *storageCmd) doStoragePoolCreate(client lxd.ContainerServer, name string
 		pool.Config[entry[0]] = entry[1]
 	}
 
+	// If a target node was specified the API won't actually create the
+	// pool, but only define it as pending in the database.
+	if c.target != "" {
+		client = client.ClusterTargetNode(c.target)
+	}
+
 	// Create the pool
 	err := client.CreateStoragePool(pool)
 	if err != nil {
 		return err
 	}
 
-	fmt.Printf(i18n.G("Storage pool %s created")+"\n", name)
+	if c.target != "" {
+		fmt.Printf(i18n.G("Storage pool %s pending on node %s")+"\n", name, c.target)
+	} else {
+		fmt.Printf(i18n.G("Storage pool %s created")+"\n", name)
+	}
 
 	return nil
 }
diff --git a/po/de.po b/po/de.po
index a9384d8b2..43c389ee6 100644
--- a/po/de.po
+++ b/po/de.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: LXD\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2017-12-06 11:48+0000\n"
+"POT-Creation-Date: 2017-12-06 11:58+0000\n"
 "PO-Revision-Date: 2017-02-14 17:11+0000\n"
 "Last-Translator: Tim Rose <tim at netlope.de>\n"
 "Language-Team: German <https://hosted.weblate.org/projects/linux-containers/"
@@ -19,7 +19,7 @@ msgstr ""
 "Plural-Forms: nplurals=2; plural=n != 1;\n"
 "X-Generator: Weblate 2.14-dev\n"
 
-#: lxc/storage.go:33
+#: lxc/storage.go:34
 #, fuzzy
 msgid ""
 "### This is a yaml representation of a storage pool.\n"
@@ -54,7 +54,7 @@ msgstr ""
 "###\n"
 "### Der Name wird zwar angezeigt, lässt sich jedoch nicht ändern.\n"
 
-#: lxc/storage.go:50
+#: lxc/storage.go:51
 #, fuzzy
 msgid ""
 "### This is a yaml representation of a storage volume.\n"
@@ -403,7 +403,7 @@ msgstr ""
 msgid "Can't unset key '%s', it's not currently set."
 msgstr ""
 
-#: lxc/profile.go:546 lxc/storage.go:653
+#: lxc/profile.go:546 lxc/storage.go:665
 msgid "Cannot provide container name to list"
 msgstr ""
 
@@ -430,7 +430,7 @@ msgid "Config key/value to apply to the new container"
 msgstr "kann nicht zum selben Container Namen kopieren"
 
 #: lxc/config.go:816 lxc/config.go:881 lxc/config.go:1331 lxc/image.go:1190
-#: lxc/network.go:426 lxc/profile.go:275 lxc/storage.go:604 lxc/storage.go:993
+#: lxc/network.go:426 lxc/profile.go:275 lxc/storage.go:616 lxc/storage.go:1005
 #, fuzzy, c-format
 msgid "Config parsing error: %s"
 msgstr "YAML Analyse Fehler %v\n"
@@ -503,11 +503,11 @@ msgid "DATABASE"
 msgstr ""
 
 #: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:467 lxc/network.go:525
-#: lxc/storage.go:689 lxc/storage.go:807
+#: lxc/storage.go:701 lxc/storage.go:819
 msgid "DESCRIPTION"
 msgstr ""
 
-#: lxc/storage.go:690
+#: lxc/storage.go:702
 msgid "DRIVER"
 msgstr ""
 
@@ -834,7 +834,7 @@ msgstr ""
 msgid "Missing summary."
 msgstr "Fehlende Zusammenfassung."
 
-#: lxc/network.go:284 lxc/network.go:337 lxc/storage.go:402 lxc/storage.go:522
+#: lxc/network.go:284 lxc/network.go:337 lxc/storage.go:404 lxc/storage.go:534
 msgid "More than one device matches, specify the device name."
 msgstr ""
 
@@ -858,7 +858,7 @@ msgid "Must supply container name for: "
 msgstr "der Name des Ursprung Containers muss angegeben werden"
 
 #: lxc/cluster.go:181 lxc/list.go:469 lxc/network.go:522 lxc/profile.go:573
-#: lxc/remote.go:409 lxc/storage.go:688 lxc/storage.go:806
+#: lxc/remote.go:409 lxc/storage.go:700 lxc/storage.go:818
 msgid "NAME"
 msgstr ""
 
@@ -913,7 +913,7 @@ msgstr "Kein Zertifikat zum hinzufügen bereitgestellt"
 msgid "No device found for this network"
 msgstr "Kein Zertifikat für diese Verbindung"
 
-#: lxc/storage.go:411 lxc/storage.go:531
+#: lxc/storage.go:413 lxc/storage.go:543
 #, fuzzy
 msgid "No device found for this storage volume."
 msgstr "Kein Zertifikat für diese Verbindung"
@@ -932,7 +932,7 @@ msgstr "Gerät %s wurde von %s entfernt\n"
 msgid "Node %s renamed to %s"
 msgstr "Profil %s wurde auf %s angewandt\n"
 
-#: lxc/init.go:149
+#: lxc/init.go:149 lxc/storage.go:153
 msgid "Node name"
 msgstr ""
 
@@ -941,7 +941,7 @@ msgstr ""
 msgid "Node: %s"
 msgstr "automatisches Update: %s"
 
-#: lxc/storage.go:355 lxc/storage.go:448
+#: lxc/storage.go:357 lxc/storage.go:450
 msgid "Only \"custom\" volumes can be attached to containers."
 msgstr ""
 
@@ -1022,7 +1022,7 @@ msgstr ""
 msgid "Pid: %d"
 msgstr ""
 
-#: lxc/network.go:427 lxc/profile.go:276 lxc/storage.go:605 lxc/storage.go:994
+#: lxc/network.go:427 lxc/profile.go:276 lxc/storage.go:617 lxc/storage.go:1006
 msgid "Press enter to open the editor again"
 msgstr ""
 
@@ -1134,7 +1134,7 @@ msgstr ""
 msgid "Remove %s (yes/no): "
 msgstr ""
 
-#: lxc/storage.go:1025
+#: lxc/storage.go:1037
 #, c-format
 msgid "Renamed storage volume from \"%s\" to \"%s\""
 msgstr ""
@@ -1170,11 +1170,11 @@ msgstr ""
 msgid "SNAPSHOTS"
 msgstr ""
 
-#: lxc/storage.go:695
+#: lxc/storage.go:707
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/cluster.go:184 lxc/list.go:473 lxc/storage.go:693
+#: lxc/cluster.go:184 lxc/list.go:473 lxc/storage.go:705
 msgid "STATE"
 msgstr ""
 
@@ -1240,7 +1240,7 @@ msgstr ""
 msgid "Show the resources available to the server"
 msgstr ""
 
-#: lxc/storage.go:151
+#: lxc/storage.go:152
 msgid "Show the resources available to the storage pool"
 msgstr ""
 
@@ -1295,27 +1295,32 @@ msgstr "Anhalten des Containers fehlgeschlagen!"
 msgid "Stopping the container failed: %s"
 msgstr "Anhalten des Containers fehlgeschlagen!"
 
-#: lxc/storage.go:496
+#: lxc/storage.go:507
 #, fuzzy, c-format
 msgid "Storage pool %s created"
 msgstr "Profil %s erstellt\n"
 
-#: lxc/storage.go:555
+#: lxc/storage.go:567
 #, fuzzy, c-format
 msgid "Storage pool %s deleted"
 msgstr "Profil %s gelöscht\n"
 
+#: lxc/storage.go:505
+#, fuzzy, c-format
+msgid "Storage pool %s pending on node %s"
+msgstr "Profil %s erstellt\n"
+
 #: lxc/init.go:146 lxc/init.go:147
 #, fuzzy
 msgid "Storage pool name"
 msgstr "Profilname kann nicht geändert werden"
 
-#: lxc/storage.go:840
+#: lxc/storage.go:852
 #, fuzzy, c-format
 msgid "Storage volume %s created"
 msgstr "Profil %s erstellt\n"
 
-#: lxc/storage.go:855
+#: lxc/storage.go:867
 #, fuzzy, c-format
 msgid "Storage volume %s deleted"
 msgstr "Profil %s gelöscht\n"
@@ -1333,7 +1338,7 @@ msgstr ""
 msgid "Swap (peak)"
 msgstr ""
 
-#: lxc/list.go:474 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:805
+#: lxc/list.go:474 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:817
 msgid "TYPE"
 msgstr ""
 
@@ -1371,7 +1376,7 @@ msgstr ""
 msgid "The opposite of \"lxc pause\" is \"lxc start\"."
 msgstr ""
 
-#: lxc/network.go:298 lxc/network.go:351 lxc/storage.go:416 lxc/storage.go:536
+#: lxc/network.go:298 lxc/network.go:351 lxc/storage.go:418 lxc/storage.go:548
 #, fuzzy
 msgid "The specified device doesn't exist"
 msgstr "entfernte Instanz %s existiert nicht"
@@ -1454,7 +1459,7 @@ msgstr ""
 msgid "URL"
 msgstr ""
 
-#: lxc/network.go:526 lxc/profile.go:574 lxc/storage.go:697 lxc/storage.go:808
+#: lxc/network.go:526 lxc/profile.go:574 lxc/storage.go:709 lxc/storage.go:820
 msgid "USED BY"
 msgstr ""
 
@@ -2393,7 +2398,7 @@ msgid ""
 "    Create a snapshot of \"u1\" called \"snap0\"."
 msgstr ""
 
-#: lxc/storage.go:64
+#: lxc/storage.go:65
 msgid ""
 "Usage: lxc storage <subcommand> [options]\n"
 "\n"
@@ -2406,7 +2411,8 @@ msgid ""
 "lxc storage show [<remote>:]<pool> [--resources]\n"
 "    Show details of a storage pool.\n"
 "\n"
-"lxc storage create [<remote>:]<pool> <driver> [key=value]...\n"
+"lxc storage create [<remote>:]<pool> <driver> [key=value]... [--target "
+"<node>]\n"
 "    Create a storage pool.\n"
 "\n"
 "lxc storage get [<remote>:]<pool> <key>\n"
diff --git a/po/el.po b/po/el.po
index 9bb891661..6a86b3e0c 100644
--- a/po/el.po
+++ b/po/el.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: lxd\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2017-12-06 11:48+0000\n"
+"POT-Creation-Date: 2017-12-06 11:58+0000\n"
 "PO-Revision-Date: 2017-02-14 08:00+0000\n"
 "Last-Translator: Simos Xenitellis <simos.65 at gmail.com>\n"
 "Language-Team: Greek <https://hosted.weblate.org/projects/linux-containers/"
@@ -19,7 +19,7 @@ msgstr ""
 "Plural-Forms: nplurals=2; plural=n != 1;\n"
 "X-Generator: Weblate 2.12-dev\n"
 
-#: lxc/storage.go:33
+#: lxc/storage.go:34
 msgid ""
 "### This is a yaml representation of a storage pool.\n"
 "### Any line starting with a '# will be ignored.\n"
@@ -36,7 +36,7 @@ msgid ""
 "###   zfs.pool_name: default"
 msgstr ""
 
-#: lxc/storage.go:50
+#: lxc/storage.go:51
 msgid ""
 "### This is a yaml representation of a storage volume.\n"
 "### Any line starting with a '# will be ignored.\n"
@@ -292,7 +292,7 @@ msgstr ""
 msgid "Can't unset key '%s', it's not currently set."
 msgstr ""
 
-#: lxc/profile.go:546 lxc/storage.go:653
+#: lxc/profile.go:546 lxc/storage.go:665
 msgid "Cannot provide container name to list"
 msgstr ""
 
@@ -318,7 +318,7 @@ msgid "Config key/value to apply to the new container"
 msgstr ""
 
 #: lxc/config.go:816 lxc/config.go:881 lxc/config.go:1331 lxc/image.go:1190
-#: lxc/network.go:426 lxc/profile.go:275 lxc/storage.go:604 lxc/storage.go:993
+#: lxc/network.go:426 lxc/profile.go:275 lxc/storage.go:616 lxc/storage.go:1005
 #, c-format
 msgid "Config parsing error: %s"
 msgstr ""
@@ -389,11 +389,11 @@ msgid "DATABASE"
 msgstr ""
 
 #: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:467 lxc/network.go:525
-#: lxc/storage.go:689 lxc/storage.go:807
+#: lxc/storage.go:701 lxc/storage.go:819
 msgid "DESCRIPTION"
 msgstr ""
 
-#: lxc/storage.go:690
+#: lxc/storage.go:702
 msgid "DRIVER"
 msgstr ""
 
@@ -710,7 +710,7 @@ msgstr "  Χρήση μνήμης:"
 msgid "Missing summary."
 msgstr ""
 
-#: lxc/network.go:284 lxc/network.go:337 lxc/storage.go:402 lxc/storage.go:522
+#: lxc/network.go:284 lxc/network.go:337 lxc/storage.go:404 lxc/storage.go:534
 msgid "More than one device matches, specify the device name."
 msgstr ""
 
@@ -731,7 +731,7 @@ msgid "Must supply container name for: "
 msgstr ""
 
 #: lxc/cluster.go:181 lxc/list.go:469 lxc/network.go:522 lxc/profile.go:573
-#: lxc/remote.go:409 lxc/storage.go:688 lxc/storage.go:806
+#: lxc/remote.go:409 lxc/storage.go:700 lxc/storage.go:818
 msgid "NAME"
 msgstr ""
 
@@ -784,7 +784,7 @@ msgstr ""
 msgid "No device found for this network"
 msgstr ""
 
-#: lxc/storage.go:411 lxc/storage.go:531
+#: lxc/storage.go:413 lxc/storage.go:543
 msgid "No device found for this storage volume."
 msgstr ""
 
@@ -802,7 +802,7 @@ msgstr ""
 msgid "Node %s renamed to %s"
 msgstr ""
 
-#: lxc/init.go:149
+#: lxc/init.go:149 lxc/storage.go:153
 msgid "Node name"
 msgstr ""
 
@@ -811,7 +811,7 @@ msgstr ""
 msgid "Node: %s"
 msgstr ""
 
-#: lxc/storage.go:355 lxc/storage.go:448
+#: lxc/storage.go:357 lxc/storage.go:450
 msgid "Only \"custom\" volumes can be attached to containers."
 msgstr ""
 
@@ -889,7 +889,7 @@ msgstr ""
 msgid "Pid: %d"
 msgstr ""
 
-#: lxc/network.go:427 lxc/profile.go:276 lxc/storage.go:605 lxc/storage.go:994
+#: lxc/network.go:427 lxc/profile.go:276 lxc/storage.go:617 lxc/storage.go:1006
 msgid "Press enter to open the editor again"
 msgstr ""
 
@@ -998,7 +998,7 @@ msgstr ""
 msgid "Remove %s (yes/no): "
 msgstr ""
 
-#: lxc/storage.go:1025
+#: lxc/storage.go:1037
 #, c-format
 msgid "Renamed storage volume from \"%s\" to \"%s\""
 msgstr ""
@@ -1032,11 +1032,11 @@ msgstr ""
 msgid "SNAPSHOTS"
 msgstr ""
 
-#: lxc/storage.go:695
+#: lxc/storage.go:707
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/cluster.go:184 lxc/list.go:473 lxc/storage.go:693
+#: lxc/cluster.go:184 lxc/list.go:473 lxc/storage.go:705
 msgid "STATE"
 msgstr ""
 
@@ -1100,7 +1100,7 @@ msgstr ""
 msgid "Show the resources available to the server"
 msgstr ""
 
-#: lxc/storage.go:151
+#: lxc/storage.go:152
 msgid "Show the resources available to the storage pool"
 msgstr ""
 
@@ -1153,26 +1153,31 @@ msgstr ""
 msgid "Stopping the container failed: %s"
 msgstr ""
 
-#: lxc/storage.go:496
+#: lxc/storage.go:507
 #, c-format
 msgid "Storage pool %s created"
 msgstr ""
 
-#: lxc/storage.go:555
+#: lxc/storage.go:567
 #, c-format
 msgid "Storage pool %s deleted"
 msgstr ""
 
+#: lxc/storage.go:505
+#, c-format
+msgid "Storage pool %s pending on node %s"
+msgstr ""
+
 #: lxc/init.go:146 lxc/init.go:147
 msgid "Storage pool name"
 msgstr ""
 
-#: lxc/storage.go:840
+#: lxc/storage.go:852
 #, c-format
 msgid "Storage volume %s created"
 msgstr ""
 
-#: lxc/storage.go:855
+#: lxc/storage.go:867
 #, c-format
 msgid "Storage volume %s deleted"
 msgstr ""
@@ -1189,7 +1194,7 @@ msgstr ""
 msgid "Swap (peak)"
 msgstr ""
 
-#: lxc/list.go:474 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:805
+#: lxc/list.go:474 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:817
 msgid "TYPE"
 msgstr ""
 
@@ -1225,7 +1230,7 @@ msgstr ""
 msgid "The opposite of \"lxc pause\" is \"lxc start\"."
 msgstr ""
 
-#: lxc/network.go:298 lxc/network.go:351 lxc/storage.go:416 lxc/storage.go:536
+#: lxc/network.go:298 lxc/network.go:351 lxc/storage.go:418 lxc/storage.go:548
 msgid "The specified device doesn't exist"
 msgstr ""
 
@@ -1304,7 +1309,7 @@ msgstr ""
 msgid "URL"
 msgstr ""
 
-#: lxc/network.go:526 lxc/profile.go:574 lxc/storage.go:697 lxc/storage.go:808
+#: lxc/network.go:526 lxc/profile.go:574 lxc/storage.go:709 lxc/storage.go:820
 msgid "USED BY"
 msgstr ""
 
@@ -2083,7 +2088,7 @@ msgid ""
 "    Create a snapshot of \"u1\" called \"snap0\"."
 msgstr ""
 
-#: lxc/storage.go:64
+#: lxc/storage.go:65
 msgid ""
 "Usage: lxc storage <subcommand> [options]\n"
 "\n"
@@ -2096,7 +2101,8 @@ msgid ""
 "lxc storage show [<remote>:]<pool> [--resources]\n"
 "    Show details of a storage pool.\n"
 "\n"
-"lxc storage create [<remote>:]<pool> <driver> [key=value]...\n"
+"lxc storage create [<remote>:]<pool> <driver> [key=value]... [--target "
+"<node>]\n"
 "    Create a storage pool.\n"
 "\n"
 "lxc storage get [<remote>:]<pool> <key>\n"
diff --git a/po/fr.po b/po/fr.po
index e0b55ca05..63d5fbedf 100644
--- a/po/fr.po
+++ b/po/fr.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: LXD\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2017-12-06 11:48+0000\n"
+"POT-Creation-Date: 2017-12-06 11:58+0000\n"
 "PO-Revision-Date: 2017-10-26 15:46+0000\n"
 "Last-Translator: Alban Vidal <alban.vidal at zordhak.fr>\n"
 "Language-Team: French <https://hosted.weblate.org/projects/linux-containers/"
@@ -19,7 +19,7 @@ msgstr ""
 "Plural-Forms: nplurals=2; plural=n > 1;\n"
 "X-Generator: Weblate 2.17\n"
 
-#: lxc/storage.go:33
+#: lxc/storage.go:34
 msgid ""
 "### This is a yaml representation of a storage pool.\n"
 "### Any line starting with a '# will be ignored.\n"
@@ -50,7 +50,7 @@ msgstr ""
 "###   source: /home/chb/mnt/lxd_test/default.img\n"
 "###   zfs.pool_name: default"
 
-#: lxc/storage.go:50
+#: lxc/storage.go:51
 #, fuzzy
 msgid ""
 "### This is a yaml representation of a storage volume.\n"
@@ -392,7 +392,7 @@ msgid "Can't unset key '%s', it's not currently set."
 msgstr ""
 "Impossible de désaffecter la clé '%s', elle n'est pas définie actuellement."
 
-#: lxc/profile.go:546 lxc/storage.go:653
+#: lxc/profile.go:546 lxc/storage.go:665
 msgid "Cannot provide container name to list"
 msgstr "Impossible de fournir le nom du conteneur à lister"
 
@@ -418,7 +418,7 @@ msgid "Config key/value to apply to the new container"
 msgstr "Clé/valeur de configuration à appliquer au nouveau conteneur"
 
 #: lxc/config.go:816 lxc/config.go:881 lxc/config.go:1331 lxc/image.go:1190
-#: lxc/network.go:426 lxc/profile.go:275 lxc/storage.go:604 lxc/storage.go:993
+#: lxc/network.go:426 lxc/profile.go:275 lxc/storage.go:616 lxc/storage.go:1005
 #, c-format
 msgid "Config parsing error: %s"
 msgstr "Erreur lors de la lecture de la configuration : %s"
@@ -490,11 +490,11 @@ msgid "DATABASE"
 msgstr ""
 
 #: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:467 lxc/network.go:525
-#: lxc/storage.go:689 lxc/storage.go:807
+#: lxc/storage.go:701 lxc/storage.go:819
 msgid "DESCRIPTION"
 msgstr "DESCRIPTION"
 
-#: lxc/storage.go:690
+#: lxc/storage.go:702
 msgid "DRIVER"
 msgstr "PILOTE"
 
@@ -821,7 +821,7 @@ msgstr "  Mémoire utilisée :"
 msgid "Missing summary."
 msgstr "Résumé manquant."
 
-#: lxc/network.go:284 lxc/network.go:337 lxc/storage.go:402 lxc/storage.go:522
+#: lxc/network.go:284 lxc/network.go:337 lxc/storage.go:404 lxc/storage.go:534
 msgid "More than one device matches, specify the device name."
 msgstr "Plus d'un périphérique correspond, spécifier le nom du périphérique."
 
@@ -844,7 +844,7 @@ msgid "Must supply container name for: "
 msgstr "Vous devez fournir le nom d'un conteneur pour : "
 
 #: lxc/cluster.go:181 lxc/list.go:469 lxc/network.go:522 lxc/profile.go:573
-#: lxc/remote.go:409 lxc/storage.go:688 lxc/storage.go:806
+#: lxc/remote.go:409 lxc/storage.go:700 lxc/storage.go:818
 msgid "NAME"
 msgstr "NOM"
 
@@ -897,7 +897,7 @@ msgstr "Un certificat à ajouter n'a pas été fourni"
 msgid "No device found for this network"
 msgstr "Aucun périphérique existant pour ce réseau"
 
-#: lxc/storage.go:411 lxc/storage.go:531
+#: lxc/storage.go:413 lxc/storage.go:543
 #, fuzzy
 msgid "No device found for this storage volume."
 msgstr "Aucun périphérique existant pour ce réseau"
@@ -916,7 +916,7 @@ msgstr "Profil %s supprimé de %s"
 msgid "Node %s renamed to %s"
 msgstr "Profil %s ajouté à %s"
 
-#: lxc/init.go:149
+#: lxc/init.go:149 lxc/storage.go:153
 #, fuzzy
 msgid "Node name"
 msgstr "Nom du réseau"
@@ -926,7 +926,7 @@ msgstr "Nom du réseau"
 msgid "Node: %s"
 msgstr "Nom : %s"
 
-#: lxc/storage.go:355 lxc/storage.go:448
+#: lxc/storage.go:357 lxc/storage.go:450
 msgid "Only \"custom\" volumes can be attached to containers."
 msgstr "Seul les volumes \"personnalisés\" peuvent être attaché aux conteneurs"
 
@@ -1005,7 +1005,7 @@ msgstr "Permission refusée, êtes-vous dans le groupe lxd ?"
 msgid "Pid: %d"
 msgstr "Pid : %d"
 
-#: lxc/network.go:427 lxc/profile.go:276 lxc/storage.go:605 lxc/storage.go:994
+#: lxc/network.go:427 lxc/profile.go:276 lxc/storage.go:617 lxc/storage.go:1006
 msgid "Press enter to open the editor again"
 msgstr "Appuyer sur Entrée pour ouvrir à nouveau l'éditeur"
 
@@ -1115,7 +1115,7 @@ msgstr "Serveur distant : %s"
 msgid "Remove %s (yes/no): "
 msgstr "Supprimer %s (oui/non) : "
 
-#: lxc/storage.go:1025
+#: lxc/storage.go:1037
 #, c-format
 msgid "Renamed storage volume from \"%s\" to \"%s\""
 msgstr ""
@@ -1151,11 +1151,11 @@ msgstr "TAILLE"
 msgid "SNAPSHOTS"
 msgstr "INSTANTANÉS"
 
-#: lxc/storage.go:695
+#: lxc/storage.go:707
 msgid "SOURCE"
 msgstr "SOURCE"
 
-#: lxc/cluster.go:184 lxc/list.go:473 lxc/storage.go:693
+#: lxc/cluster.go:184 lxc/list.go:473 lxc/storage.go:705
 msgid "STATE"
 msgstr "ÉTAT"
 
@@ -1222,7 +1222,7 @@ msgstr "Afficher la configuration étendue"
 msgid "Show the resources available to the server"
 msgstr ""
 
-#: lxc/storage.go:151
+#: lxc/storage.go:152
 msgid "Show the resources available to the storage pool"
 msgstr ""
 
@@ -1277,26 +1277,31 @@ msgstr "L'arrêt du conteneur a échoué !"
 msgid "Stopping the container failed: %s"
 msgstr "L'arrêt du conteneur a échoué !"
 
-#: lxc/storage.go:496
+#: lxc/storage.go:507
 #, fuzzy, c-format
 msgid "Storage pool %s created"
 msgstr "Le réseau %s a été créé"
 
-#: lxc/storage.go:555
+#: lxc/storage.go:567
 #, fuzzy, c-format
 msgid "Storage pool %s deleted"
 msgstr "Le réseau %s a été supprimé"
 
+#: lxc/storage.go:505
+#, fuzzy, c-format
+msgid "Storage pool %s pending on node %s"
+msgstr "Le réseau %s a été créé"
+
 #: lxc/init.go:146 lxc/init.go:147
 msgid "Storage pool name"
 msgstr "Nom de l'ensemble de stockage"
 
-#: lxc/storage.go:840
+#: lxc/storage.go:852
 #, fuzzy, c-format
 msgid "Storage volume %s created"
 msgstr "Profil %s créé"
 
-#: lxc/storage.go:855
+#: lxc/storage.go:867
 #, fuzzy, c-format
 msgid "Storage volume %s deleted"
 msgstr "Profil %s supprimé"
@@ -1313,7 +1318,7 @@ msgstr "Swap (courant)"
 msgid "Swap (peak)"
 msgstr "Swap (pointe)"
 
-#: lxc/list.go:474 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:805
+#: lxc/list.go:474 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:817
 msgid "TYPE"
 msgstr "TYPE"
 
@@ -1355,7 +1360,7 @@ msgstr "L'image locale '%s' n'a pas été trouvée, essayer '%s:' à la place."
 msgid "The opposite of \"lxc pause\" is \"lxc start\"."
 msgstr "Le pendant de `lxc pause` est `lxc start`."
 
-#: lxc/network.go:298 lxc/network.go:351 lxc/storage.go:416 lxc/storage.go:536
+#: lxc/network.go:298 lxc/network.go:351 lxc/storage.go:418 lxc/storage.go:548
 msgid "The specified device doesn't exist"
 msgstr "Le périphérique indiqué n'existe pas"
 
@@ -1440,7 +1445,7 @@ msgstr "DATE DE PUBLICATION"
 msgid "URL"
 msgstr "URL"
 
-#: lxc/network.go:526 lxc/profile.go:574 lxc/storage.go:697 lxc/storage.go:808
+#: lxc/network.go:526 lxc/profile.go:574 lxc/storage.go:709 lxc/storage.go:820
 msgid "USED BY"
 msgstr "UTILISÉ PAR"
 
@@ -2676,7 +2681,7 @@ msgstr ""
 "Exemple :\n"
 "    lxc snapshot u1 snap0"
 
-#: lxc/storage.go:64
+#: lxc/storage.go:65
 msgid ""
 "Usage: lxc storage <subcommand> [options]\n"
 "\n"
@@ -2689,7 +2694,8 @@ msgid ""
 "lxc storage show [<remote>:]<pool> [--resources]\n"
 "    Show details of a storage pool.\n"
 "\n"
-"lxc storage create [<remote>:]<pool> <driver> [key=value]...\n"
+"lxc storage create [<remote>:]<pool> <driver> [key=value]... [--target "
+"<node>]\n"
 "    Create a storage pool.\n"
 "\n"
 "lxc storage get [<remote>:]<pool> <key>\n"
diff --git a/po/id.po b/po/id.po
index e7b57bd27..ce3d09875 100644
--- a/po/id.po
+++ b/po/id.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: lxd\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2017-12-06 11:48+0000\n"
+"POT-Creation-Date: 2017-12-06 11:58+0000\n"
 "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
 "Last-Translator: Automatically generated\n"
 "Language-Team: none\n"
@@ -16,7 +16,7 @@ msgstr ""
 "Content-Type: text/plain; charset=UTF-8\n"
 "Content-Transfer-Encoding: 8bit\n"
 
-#: lxc/storage.go:33
+#: lxc/storage.go:34
 msgid ""
 "### This is a yaml representation of a storage pool.\n"
 "### Any line starting with a '# will be ignored.\n"
@@ -33,7 +33,7 @@ msgid ""
 "###   zfs.pool_name: default"
 msgstr ""
 
-#: lxc/storage.go:50
+#: lxc/storage.go:51
 msgid ""
 "### This is a yaml representation of a storage volume.\n"
 "### Any line starting with a '# will be ignored.\n"
@@ -288,7 +288,7 @@ msgstr ""
 msgid "Can't unset key '%s', it's not currently set."
 msgstr ""
 
-#: lxc/profile.go:546 lxc/storage.go:653
+#: lxc/profile.go:546 lxc/storage.go:665
 msgid "Cannot provide container name to list"
 msgstr ""
 
@@ -314,7 +314,7 @@ msgid "Config key/value to apply to the new container"
 msgstr ""
 
 #: lxc/config.go:816 lxc/config.go:881 lxc/config.go:1331 lxc/image.go:1190
-#: lxc/network.go:426 lxc/profile.go:275 lxc/storage.go:604 lxc/storage.go:993
+#: lxc/network.go:426 lxc/profile.go:275 lxc/storage.go:616 lxc/storage.go:1005
 #, c-format
 msgid "Config parsing error: %s"
 msgstr ""
@@ -385,11 +385,11 @@ msgid "DATABASE"
 msgstr ""
 
 #: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:467 lxc/network.go:525
-#: lxc/storage.go:689 lxc/storage.go:807
+#: lxc/storage.go:701 lxc/storage.go:819
 msgid "DESCRIPTION"
 msgstr ""
 
-#: lxc/storage.go:690
+#: lxc/storage.go:702
 msgid "DRIVER"
 msgstr ""
 
@@ -704,7 +704,7 @@ msgstr ""
 msgid "Missing summary."
 msgstr ""
 
-#: lxc/network.go:284 lxc/network.go:337 lxc/storage.go:402 lxc/storage.go:522
+#: lxc/network.go:284 lxc/network.go:337 lxc/storage.go:404 lxc/storage.go:534
 msgid "More than one device matches, specify the device name."
 msgstr ""
 
@@ -725,7 +725,7 @@ msgid "Must supply container name for: "
 msgstr ""
 
 #: lxc/cluster.go:181 lxc/list.go:469 lxc/network.go:522 lxc/profile.go:573
-#: lxc/remote.go:409 lxc/storage.go:688 lxc/storage.go:806
+#: lxc/remote.go:409 lxc/storage.go:700 lxc/storage.go:818
 msgid "NAME"
 msgstr ""
 
@@ -777,7 +777,7 @@ msgstr ""
 msgid "No device found for this network"
 msgstr ""
 
-#: lxc/storage.go:411 lxc/storage.go:531
+#: lxc/storage.go:413 lxc/storage.go:543
 msgid "No device found for this storage volume."
 msgstr ""
 
@@ -795,7 +795,7 @@ msgstr ""
 msgid "Node %s renamed to %s"
 msgstr ""
 
-#: lxc/init.go:149
+#: lxc/init.go:149 lxc/storage.go:153
 msgid "Node name"
 msgstr ""
 
@@ -804,7 +804,7 @@ msgstr ""
 msgid "Node: %s"
 msgstr ""
 
-#: lxc/storage.go:355 lxc/storage.go:448
+#: lxc/storage.go:357 lxc/storage.go:450
 msgid "Only \"custom\" volumes can be attached to containers."
 msgstr ""
 
@@ -882,7 +882,7 @@ msgstr ""
 msgid "Pid: %d"
 msgstr ""
 
-#: lxc/network.go:427 lxc/profile.go:276 lxc/storage.go:605 lxc/storage.go:994
+#: lxc/network.go:427 lxc/profile.go:276 lxc/storage.go:617 lxc/storage.go:1006
 msgid "Press enter to open the editor again"
 msgstr ""
 
@@ -991,7 +991,7 @@ msgstr ""
 msgid "Remove %s (yes/no): "
 msgstr ""
 
-#: lxc/storage.go:1025
+#: lxc/storage.go:1037
 #, c-format
 msgid "Renamed storage volume from \"%s\" to \"%s\""
 msgstr ""
@@ -1025,11 +1025,11 @@ msgstr ""
 msgid "SNAPSHOTS"
 msgstr ""
 
-#: lxc/storage.go:695
+#: lxc/storage.go:707
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/cluster.go:184 lxc/list.go:473 lxc/storage.go:693
+#: lxc/cluster.go:184 lxc/list.go:473 lxc/storage.go:705
 msgid "STATE"
 msgstr ""
 
@@ -1093,7 +1093,7 @@ msgstr ""
 msgid "Show the resources available to the server"
 msgstr ""
 
-#: lxc/storage.go:151
+#: lxc/storage.go:152
 msgid "Show the resources available to the storage pool"
 msgstr ""
 
@@ -1146,26 +1146,31 @@ msgstr ""
 msgid "Stopping the container failed: %s"
 msgstr ""
 
-#: lxc/storage.go:496
+#: lxc/storage.go:507
 #, c-format
 msgid "Storage pool %s created"
 msgstr ""
 
-#: lxc/storage.go:555
+#: lxc/storage.go:567
 #, c-format
 msgid "Storage pool %s deleted"
 msgstr ""
 
+#: lxc/storage.go:505
+#, c-format
+msgid "Storage pool %s pending on node %s"
+msgstr ""
+
 #: lxc/init.go:146 lxc/init.go:147
 msgid "Storage pool name"
 msgstr ""
 
-#: lxc/storage.go:840
+#: lxc/storage.go:852
 #, c-format
 msgid "Storage volume %s created"
 msgstr ""
 
-#: lxc/storage.go:855
+#: lxc/storage.go:867
 #, c-format
 msgid "Storage volume %s deleted"
 msgstr ""
@@ -1182,7 +1187,7 @@ msgstr ""
 msgid "Swap (peak)"
 msgstr ""
 
-#: lxc/list.go:474 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:805
+#: lxc/list.go:474 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:817
 msgid "TYPE"
 msgstr ""
 
@@ -1218,7 +1223,7 @@ msgstr ""
 msgid "The opposite of \"lxc pause\" is \"lxc start\"."
 msgstr ""
 
-#: lxc/network.go:298 lxc/network.go:351 lxc/storage.go:416 lxc/storage.go:536
+#: lxc/network.go:298 lxc/network.go:351 lxc/storage.go:418 lxc/storage.go:548
 msgid "The specified device doesn't exist"
 msgstr ""
 
@@ -1297,7 +1302,7 @@ msgstr ""
 msgid "URL"
 msgstr ""
 
-#: lxc/network.go:526 lxc/profile.go:574 lxc/storage.go:697 lxc/storage.go:808
+#: lxc/network.go:526 lxc/profile.go:574 lxc/storage.go:709 lxc/storage.go:820
 msgid "USED BY"
 msgstr ""
 
@@ -2076,7 +2081,7 @@ msgid ""
 "    Create a snapshot of \"u1\" called \"snap0\"."
 msgstr ""
 
-#: lxc/storage.go:64
+#: lxc/storage.go:65
 msgid ""
 "Usage: lxc storage <subcommand> [options]\n"
 "\n"
@@ -2089,7 +2094,8 @@ msgid ""
 "lxc storage show [<remote>:]<pool> [--resources]\n"
 "    Show details of a storage pool.\n"
 "\n"
-"lxc storage create [<remote>:]<pool> <driver> [key=value]...\n"
+"lxc storage create [<remote>:]<pool> <driver> [key=value]... [--target "
+"<node>]\n"
 "    Create a storage pool.\n"
 "\n"
 "lxc storage get [<remote>:]<pool> <key>\n"
diff --git a/po/it.po b/po/it.po
index 3a3bd30be..ab39edb88 100644
--- a/po/it.po
+++ b/po/it.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: lxd\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2017-12-06 11:48+0000\n"
+"POT-Creation-Date: 2017-12-06 11:58+0000\n"
 "PO-Revision-Date: 2017-08-18 14:22+0000\n"
 "Last-Translator: Alberto Donato <alberto.donato at gmail.com>\n"
 "Language-Team: Italian <https://hosted.weblate.org/projects/linux-containers/"
@@ -19,7 +19,7 @@ msgstr ""
 "Plural-Forms: nplurals=2; plural=n != 1;\n"
 "X-Generator: Weblate 2.17-dev\n"
 
-#: lxc/storage.go:33
+#: lxc/storage.go:34
 msgid ""
 "### This is a yaml representation of a storage pool.\n"
 "### Any line starting with a '# will be ignored.\n"
@@ -50,7 +50,7 @@ msgstr ""
 "###   source: /home/chb/mnt/lxd_test/default.img\n"
 "###   zfs.pool_name: default"
 
-#: lxc/storage.go:50
+#: lxc/storage.go:51
 msgid ""
 "### This is a yaml representation of a storage volume.\n"
 "### Any line starting with a '# will be ignored.\n"
@@ -313,7 +313,7 @@ msgstr ""
 msgid "Can't unset key '%s', it's not currently set."
 msgstr ""
 
-#: lxc/profile.go:546 lxc/storage.go:653
+#: lxc/profile.go:546 lxc/storage.go:665
 msgid "Cannot provide container name to list"
 msgstr ""
 
@@ -339,7 +339,7 @@ msgid "Config key/value to apply to the new container"
 msgstr ""
 
 #: lxc/config.go:816 lxc/config.go:881 lxc/config.go:1331 lxc/image.go:1190
-#: lxc/network.go:426 lxc/profile.go:275 lxc/storage.go:604 lxc/storage.go:993
+#: lxc/network.go:426 lxc/profile.go:275 lxc/storage.go:616 lxc/storage.go:1005
 #, c-format
 msgid "Config parsing error: %s"
 msgstr ""
@@ -410,11 +410,11 @@ msgid "DATABASE"
 msgstr ""
 
 #: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:467 lxc/network.go:525
-#: lxc/storage.go:689 lxc/storage.go:807
+#: lxc/storage.go:701 lxc/storage.go:819
 msgid "DESCRIPTION"
 msgstr "DESCRIZIONE"
 
-#: lxc/storage.go:690
+#: lxc/storage.go:702
 msgid "DRIVER"
 msgstr "DRIVER"
 
@@ -730,7 +730,7 @@ msgstr ""
 msgid "Missing summary."
 msgstr ""
 
-#: lxc/network.go:284 lxc/network.go:337 lxc/storage.go:402 lxc/storage.go:522
+#: lxc/network.go:284 lxc/network.go:337 lxc/storage.go:404 lxc/storage.go:534
 msgid "More than one device matches, specify the device name."
 msgstr ""
 
@@ -751,7 +751,7 @@ msgid "Must supply container name for: "
 msgstr ""
 
 #: lxc/cluster.go:181 lxc/list.go:469 lxc/network.go:522 lxc/profile.go:573
-#: lxc/remote.go:409 lxc/storage.go:688 lxc/storage.go:806
+#: lxc/remote.go:409 lxc/storage.go:700 lxc/storage.go:818
 msgid "NAME"
 msgstr ""
 
@@ -803,7 +803,7 @@ msgstr ""
 msgid "No device found for this network"
 msgstr ""
 
-#: lxc/storage.go:411 lxc/storage.go:531
+#: lxc/storage.go:413 lxc/storage.go:543
 msgid "No device found for this storage volume."
 msgstr ""
 
@@ -821,7 +821,7 @@ msgstr ""
 msgid "Node %s renamed to %s"
 msgstr ""
 
-#: lxc/init.go:149
+#: lxc/init.go:149 lxc/storage.go:153
 msgid "Node name"
 msgstr ""
 
@@ -830,7 +830,7 @@ msgstr ""
 msgid "Node: %s"
 msgstr "Aggiornamento automatico: %s"
 
-#: lxc/storage.go:355 lxc/storage.go:448
+#: lxc/storage.go:357 lxc/storage.go:450
 msgid "Only \"custom\" volumes can be attached to containers."
 msgstr ""
 
@@ -908,7 +908,7 @@ msgstr ""
 msgid "Pid: %d"
 msgstr ""
 
-#: lxc/network.go:427 lxc/profile.go:276 lxc/storage.go:605 lxc/storage.go:994
+#: lxc/network.go:427 lxc/profile.go:276 lxc/storage.go:617 lxc/storage.go:1006
 msgid "Press enter to open the editor again"
 msgstr ""
 
@@ -1017,7 +1017,7 @@ msgstr ""
 msgid "Remove %s (yes/no): "
 msgstr ""
 
-#: lxc/storage.go:1025
+#: lxc/storage.go:1037
 #, c-format
 msgid "Renamed storage volume from \"%s\" to \"%s\""
 msgstr ""
@@ -1051,11 +1051,11 @@ msgstr ""
 msgid "SNAPSHOTS"
 msgstr ""
 
-#: lxc/storage.go:695
+#: lxc/storage.go:707
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/cluster.go:184 lxc/list.go:473 lxc/storage.go:693
+#: lxc/cluster.go:184 lxc/list.go:473 lxc/storage.go:705
 msgid "STATE"
 msgstr ""
 
@@ -1119,7 +1119,7 @@ msgstr ""
 msgid "Show the resources available to the server"
 msgstr ""
 
-#: lxc/storage.go:151
+#: lxc/storage.go:152
 msgid "Show the resources available to the storage pool"
 msgstr ""
 
@@ -1172,26 +1172,31 @@ msgstr ""
 msgid "Stopping the container failed: %s"
 msgstr ""
 
-#: lxc/storage.go:496
+#: lxc/storage.go:507
 #, c-format
 msgid "Storage pool %s created"
 msgstr ""
 
-#: lxc/storage.go:555
+#: lxc/storage.go:567
 #, c-format
 msgid "Storage pool %s deleted"
 msgstr ""
 
+#: lxc/storage.go:505
+#, c-format
+msgid "Storage pool %s pending on node %s"
+msgstr ""
+
 #: lxc/init.go:146 lxc/init.go:147
 msgid "Storage pool name"
 msgstr ""
 
-#: lxc/storage.go:840
+#: lxc/storage.go:852
 #, c-format
 msgid "Storage volume %s created"
 msgstr ""
 
-#: lxc/storage.go:855
+#: lxc/storage.go:867
 #, c-format
 msgid "Storage volume %s deleted"
 msgstr ""
@@ -1208,7 +1213,7 @@ msgstr ""
 msgid "Swap (peak)"
 msgstr ""
 
-#: lxc/list.go:474 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:805
+#: lxc/list.go:474 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:817
 msgid "TYPE"
 msgstr ""
 
@@ -1244,7 +1249,7 @@ msgstr ""
 msgid "The opposite of \"lxc pause\" is \"lxc start\"."
 msgstr ""
 
-#: lxc/network.go:298 lxc/network.go:351 lxc/storage.go:416 lxc/storage.go:536
+#: lxc/network.go:298 lxc/network.go:351 lxc/storage.go:418 lxc/storage.go:548
 msgid "The specified device doesn't exist"
 msgstr ""
 
@@ -1323,7 +1328,7 @@ msgstr ""
 msgid "URL"
 msgstr ""
 
-#: lxc/network.go:526 lxc/profile.go:574 lxc/storage.go:697 lxc/storage.go:808
+#: lxc/network.go:526 lxc/profile.go:574 lxc/storage.go:709 lxc/storage.go:820
 msgid "USED BY"
 msgstr ""
 
@@ -2102,7 +2107,7 @@ msgid ""
 "    Create a snapshot of \"u1\" called \"snap0\"."
 msgstr ""
 
-#: lxc/storage.go:64
+#: lxc/storage.go:65
 msgid ""
 "Usage: lxc storage <subcommand> [options]\n"
 "\n"
@@ -2115,7 +2120,8 @@ msgid ""
 "lxc storage show [<remote>:]<pool> [--resources]\n"
 "    Show details of a storage pool.\n"
 "\n"
-"lxc storage create [<remote>:]<pool> <driver> [key=value]...\n"
+"lxc storage create [<remote>:]<pool> <driver> [key=value]... [--target "
+"<node>]\n"
 "    Create a storage pool.\n"
 "\n"
 "lxc storage get [<remote>:]<pool> <key>\n"
diff --git a/po/ja.po b/po/ja.po
index f21046b15..ea950116a 100644
--- a/po/ja.po
+++ b/po/ja.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: LXD\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2017-12-06 11:48+0000\n"
+"POT-Creation-Date: 2017-12-06 11:58+0000\n"
 "PO-Revision-Date: 2017-09-28 20:29+0000\n"
 "Last-Translator: KATOH Yasufumi <karma at jazz.email.ne.jp>\n"
 "Language-Team: Japanese <https://hosted.weblate.org/projects/linux-"
@@ -19,7 +19,7 @@ msgstr ""
 "Plural-Forms: nplurals=1; plural=0;\n"
 "X-Generator: Weblate 2.17-dev\n"
 
-#: lxc/storage.go:33
+#: lxc/storage.go:34
 msgid ""
 "### This is a yaml representation of a storage pool.\n"
 "### Any line starting with a '# will be ignored.\n"
@@ -36,7 +36,7 @@ msgid ""
 "###   zfs.pool_name: default"
 msgstr ""
 
-#: lxc/storage.go:50
+#: lxc/storage.go:51
 msgid ""
 "### This is a yaml representation of a storage volume.\n"
 "### Any line starting with a '# will be ignored.\n"
@@ -294,7 +294,7 @@ msgstr "キー '%s' が設定されていないので削除できません"
 msgid "Can't unset key '%s', it's not currently set."
 msgstr "キー '%s' が指定されていないので削除できません。"
 
-#: lxc/profile.go:546 lxc/storage.go:653
+#: lxc/profile.go:546 lxc/storage.go:665
 msgid "Cannot provide container name to list"
 msgstr "コンテナ名を取得できません"
 
@@ -320,7 +320,7 @@ msgid "Config key/value to apply to the new container"
 msgstr "新しいコンテナに適用するキー/値の設定"
 
 #: lxc/config.go:816 lxc/config.go:881 lxc/config.go:1331 lxc/image.go:1190
-#: lxc/network.go:426 lxc/profile.go:275 lxc/storage.go:604 lxc/storage.go:993
+#: lxc/network.go:426 lxc/profile.go:275 lxc/storage.go:616 lxc/storage.go:1005
 #, c-format
 msgid "Config parsing error: %s"
 msgstr "設定の構文エラー: %s"
@@ -391,11 +391,11 @@ msgid "DATABASE"
 msgstr ""
 
 #: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:467 lxc/network.go:525
-#: lxc/storage.go:689 lxc/storage.go:807
+#: lxc/storage.go:701 lxc/storage.go:819
 msgid "DESCRIPTION"
 msgstr ""
 
-#: lxc/storage.go:690
+#: lxc/storage.go:702
 msgid "DRIVER"
 msgstr ""
 
@@ -713,7 +713,7 @@ msgstr "メモリ消費量:"
 msgid "Missing summary."
 msgstr "サマリーはありません。"
 
-#: lxc/network.go:284 lxc/network.go:337 lxc/storage.go:402 lxc/storage.go:522
+#: lxc/network.go:284 lxc/network.go:337 lxc/storage.go:404 lxc/storage.go:534
 msgid "More than one device matches, specify the device name."
 msgstr "複数のデバイスとマッチします。デバイス名を指定してください。"
 
@@ -736,7 +736,7 @@ msgid "Must supply container name for: "
 msgstr "コンテナ名を指定する必要があります: "
 
 #: lxc/cluster.go:181 lxc/list.go:469 lxc/network.go:522 lxc/profile.go:573
-#: lxc/remote.go:409 lxc/storage.go:688 lxc/storage.go:806
+#: lxc/remote.go:409 lxc/storage.go:700 lxc/storage.go:818
 msgid "NAME"
 msgstr ""
 
@@ -788,7 +788,7 @@ msgstr "追加すべき証明書が提供されていません"
 msgid "No device found for this network"
 msgstr "このネットワークに対するデバイスがありません"
 
-#: lxc/storage.go:411 lxc/storage.go:531
+#: lxc/storage.go:413 lxc/storage.go:543
 msgid "No device found for this storage volume."
 msgstr "このストレージボリュームに対するデバイスがありません。"
 
@@ -806,7 +806,7 @@ msgstr "プロファイル %s が %s から削除されました"
 msgid "Node %s renamed to %s"
 msgstr "プロファイル %s が %s に追加されました"
 
-#: lxc/init.go:149
+#: lxc/init.go:149 lxc/storage.go:153
 #, fuzzy
 msgid "Node name"
 msgstr "ネットワーク名:"
@@ -816,7 +816,7 @@ msgstr "ネットワーク名:"
 msgid "Node: %s"
 msgstr "コンテナ名: %s"
 
-#: lxc/storage.go:355 lxc/storage.go:448
+#: lxc/storage.go:357 lxc/storage.go:450
 msgid "Only \"custom\" volumes can be attached to containers."
 msgstr "\"カスタム\" のボリュームのみがコンテナにアタッチできます。"
 
@@ -894,7 +894,7 @@ msgstr "アクセスが拒否されました。lxd グループに所属して
 msgid "Pid: %d"
 msgstr "Pid: %d"
 
-#: lxc/network.go:427 lxc/profile.go:276 lxc/storage.go:605 lxc/storage.go:994
+#: lxc/network.go:427 lxc/profile.go:276 lxc/storage.go:617 lxc/storage.go:1006
 msgid "Press enter to open the editor again"
 msgstr "再度エディタを開くためには Enter キーを押します"
 
@@ -1003,7 +1003,7 @@ msgstr "リモート名: %s"
 msgid "Remove %s (yes/no): "
 msgstr "%s を消去しますか (yes/no): "
 
-#: lxc/storage.go:1025
+#: lxc/storage.go:1037
 #, c-format
 msgid "Renamed storage volume from \"%s\" to \"%s\""
 msgstr ""
@@ -1038,11 +1038,11 @@ msgstr ""
 msgid "SNAPSHOTS"
 msgstr ""
 
-#: lxc/storage.go:695
+#: lxc/storage.go:707
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/cluster.go:184 lxc/list.go:473 lxc/storage.go:693
+#: lxc/cluster.go:184 lxc/list.go:473 lxc/storage.go:705
 msgid "STATE"
 msgstr ""
 
@@ -1107,7 +1107,7 @@ msgstr "拡張した設定を表示する"
 msgid "Show the resources available to the server"
 msgstr ""
 
-#: lxc/storage.go:151
+#: lxc/storage.go:152
 msgid "Show the resources available to the storage pool"
 msgstr ""
 
@@ -1160,26 +1160,31 @@ msgstr "コンテナの停止に失敗しました!"
 msgid "Stopping the container failed: %s"
 msgstr "コンテナの停止に失敗しました: %s"
 
-#: lxc/storage.go:496
+#: lxc/storage.go:507
 #, c-format
 msgid "Storage pool %s created"
 msgstr "ストレージプール %s を作成しました"
 
-#: lxc/storage.go:555
+#: lxc/storage.go:567
 #, c-format
 msgid "Storage pool %s deleted"
 msgstr "ストレージプール %s を削除しました"
 
+#: lxc/storage.go:505
+#, fuzzy, c-format
+msgid "Storage pool %s pending on node %s"
+msgstr "ストレージプール %s を作成しました"
+
 #: lxc/init.go:146 lxc/init.go:147
 msgid "Storage pool name"
 msgstr "ストレージプール名"
 
-#: lxc/storage.go:840
+#: lxc/storage.go:852
 #, c-format
 msgid "Storage volume %s created"
 msgstr "ストレージボリューム %s を作成しました"
 
-#: lxc/storage.go:855
+#: lxc/storage.go:867
 #, c-format
 msgid "Storage volume %s deleted"
 msgstr "ストレージボリューム %s を削除しました"
@@ -1196,7 +1201,7 @@ msgstr "Swap (現在値)"
 msgid "Swap (peak)"
 msgstr "Swap (ピーク)"
 
-#: lxc/list.go:474 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:805
+#: lxc/list.go:474 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:817
 msgid "TYPE"
 msgstr ""
 
@@ -1235,7 +1240,7 @@ msgstr ""
 msgid "The opposite of \"lxc pause\" is \"lxc start\"."
 msgstr "\"lxc pause\" の反対のコマンドは \"lxc start\" です。"
 
-#: lxc/network.go:298 lxc/network.go:351 lxc/storage.go:416 lxc/storage.go:536
+#: lxc/network.go:298 lxc/network.go:351 lxc/storage.go:418 lxc/storage.go:548
 msgid "The specified device doesn't exist"
 msgstr "指定したデバイスが存在しません"
 
@@ -1324,7 +1329,7 @@ msgstr ""
 msgid "URL"
 msgstr ""
 
-#: lxc/network.go:526 lxc/profile.go:574 lxc/storage.go:697 lxc/storage.go:808
+#: lxc/network.go:526 lxc/profile.go:574 lxc/storage.go:709 lxc/storage.go:820
 msgid "USED BY"
 msgstr ""
 
@@ -2706,7 +2711,7 @@ msgstr ""
 "lxc snapshot u1 snap0\n"
 "    \"u1\" のスナップショットを \"snap0\" という名前で作成します。"
 
-#: lxc/storage.go:64
+#: lxc/storage.go:65
 #, fuzzy
 msgid ""
 "Usage: lxc storage <subcommand> [options]\n"
@@ -2720,7 +2725,8 @@ msgid ""
 "lxc storage show [<remote>:]<pool> [--resources]\n"
 "    Show details of a storage pool.\n"
 "\n"
-"lxc storage create [<remote>:]<pool> <driver> [key=value]...\n"
+"lxc storage create [<remote>:]<pool> <driver> [key=value]... [--target "
+"<node>]\n"
 "    Create a storage pool.\n"
 "\n"
 "lxc storage get [<remote>:]<pool> <key>\n"
diff --git a/po/lxd.pot b/po/lxd.pot
index 639df2a73..3e7f60ff1 100644
--- a/po/lxd.pot
+++ b/po/lxd.pot
@@ -7,7 +7,7 @@
 msgid   ""
 msgstr  "Project-Id-Version: lxd\n"
         "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-        "POT-Creation-Date: 2017-12-06 11:57+0000\n"
+        "POT-Creation-Date: 2017-12-06 11:58+0000\n"
         "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
         "Last-Translator: FULL NAME <EMAIL at ADDRESS>\n"
         "Language-Team: LANGUAGE <LL at li.org>\n"
@@ -16,7 +16,7 @@ msgstr  "Project-Id-Version: lxd\n"
         "Content-Type: text/plain; charset=CHARSET\n"
         "Content-Transfer-Encoding: 8bit\n"
 
-#: lxc/storage.go:33
+#: lxc/storage.go:34
 msgid   "### This is a yaml representation of a storage pool.\n"
         "### Any line starting with a '# will be ignored.\n"
         "###\n"
@@ -32,7 +32,7 @@ msgid   "### This is a yaml representation of a storage pool.\n"
         "###   zfs.pool_name: default"
 msgstr  ""
 
-#: lxc/storage.go:50
+#: lxc/storage.go:51
 msgid   "### This is a yaml representation of a storage volume.\n"
         "### Any line starting with a '# will be ignored.\n"
         "###\n"
@@ -281,7 +281,7 @@ msgstr  ""
 msgid   "Can't unset key '%s', it's not currently set."
 msgstr  ""
 
-#: lxc/profile.go:546 lxc/storage.go:653
+#: lxc/profile.go:546 lxc/storage.go:665
 msgid   "Cannot provide container name to list"
 msgstr  ""
 
@@ -306,7 +306,7 @@ msgstr  ""
 msgid   "Config key/value to apply to the new container"
 msgstr  ""
 
-#: lxc/config.go:816 lxc/config.go:881 lxc/config.go:1331 lxc/image.go:1190 lxc/network.go:426 lxc/profile.go:275 lxc/storage.go:604 lxc/storage.go:993
+#: lxc/config.go:816 lxc/config.go:881 lxc/config.go:1331 lxc/image.go:1190 lxc/network.go:426 lxc/profile.go:275 lxc/storage.go:616 lxc/storage.go:1005
 #, c-format
 msgid   "Config parsing error: %s"
 msgstr  ""
@@ -376,11 +376,11 @@ msgstr  ""
 msgid   "DATABASE"
 msgstr  ""
 
-#: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:467 lxc/network.go:525 lxc/storage.go:689 lxc/storage.go:807
+#: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:467 lxc/network.go:525 lxc/storage.go:701 lxc/storage.go:819
 msgid   "DESCRIPTION"
 msgstr  ""
 
-#: lxc/storage.go:690
+#: lxc/storage.go:702
 msgid   "DRIVER"
 msgstr  ""
 
@@ -695,7 +695,7 @@ msgstr  ""
 msgid   "Missing summary."
 msgstr  ""
 
-#: lxc/network.go:284 lxc/network.go:337 lxc/storage.go:402 lxc/storage.go:522
+#: lxc/network.go:284 lxc/network.go:337 lxc/storage.go:404 lxc/storage.go:534
 msgid   "More than one device matches, specify the device name."
 msgstr  ""
 
@@ -715,7 +715,7 @@ msgstr  ""
 msgid   "Must supply container name for: "
 msgstr  ""
 
-#: lxc/cluster.go:181 lxc/list.go:469 lxc/network.go:522 lxc/profile.go:573 lxc/remote.go:409 lxc/storage.go:688 lxc/storage.go:806
+#: lxc/cluster.go:181 lxc/list.go:469 lxc/network.go:522 lxc/profile.go:573 lxc/remote.go:409 lxc/storage.go:700 lxc/storage.go:818
 msgid   "NAME"
 msgstr  ""
 
@@ -767,7 +767,7 @@ msgstr  ""
 msgid   "No device found for this network"
 msgstr  ""
 
-#: lxc/storage.go:411 lxc/storage.go:531
+#: lxc/storage.go:413 lxc/storage.go:543
 msgid   "No device found for this storage volume."
 msgstr  ""
 
@@ -785,7 +785,7 @@ msgstr  ""
 msgid   "Node %s renamed to %s"
 msgstr  ""
 
-#: lxc/init.go:149
+#: lxc/init.go:149 lxc/storage.go:153
 msgid   "Node name"
 msgstr  ""
 
@@ -794,7 +794,7 @@ msgstr  ""
 msgid   "Node: %s"
 msgstr  ""
 
-#: lxc/storage.go:355 lxc/storage.go:448
+#: lxc/storage.go:357 lxc/storage.go:450
 msgid   "Only \"custom\" volumes can be attached to containers."
 msgstr  ""
 
@@ -872,7 +872,7 @@ msgstr  ""
 msgid   "Pid: %d"
 msgstr  ""
 
-#: lxc/network.go:427 lxc/profile.go:276 lxc/storage.go:605 lxc/storage.go:994
+#: lxc/network.go:427 lxc/profile.go:276 lxc/storage.go:617 lxc/storage.go:1006
 msgid   "Press enter to open the editor again"
 msgstr  ""
 
@@ -980,7 +980,7 @@ msgstr  ""
 msgid   "Remove %s (yes/no): "
 msgstr  ""
 
-#: lxc/storage.go:1025
+#: lxc/storage.go:1037
 #, c-format
 msgid   "Renamed storage volume from \"%s\" to \"%s\""
 msgstr  ""
@@ -1014,11 +1014,11 @@ msgstr  ""
 msgid   "SNAPSHOTS"
 msgstr  ""
 
-#: lxc/storage.go:695
+#: lxc/storage.go:707
 msgid   "SOURCE"
 msgstr  ""
 
-#: lxc/cluster.go:184 lxc/list.go:473 lxc/storage.go:693
+#: lxc/cluster.go:184 lxc/list.go:473 lxc/storage.go:705
 msgid   "STATE"
 msgstr  ""
 
@@ -1082,7 +1082,7 @@ msgstr  ""
 msgid   "Show the resources available to the server"
 msgstr  ""
 
-#: lxc/storage.go:151
+#: lxc/storage.go:152
 msgid   "Show the resources available to the storage pool"
 msgstr  ""
 
@@ -1135,26 +1135,31 @@ msgstr  ""
 msgid   "Stopping the container failed: %s"
 msgstr  ""
 
-#: lxc/storage.go:496
+#: lxc/storage.go:507
 #, c-format
 msgid   "Storage pool %s created"
 msgstr  ""
 
-#: lxc/storage.go:555
+#: lxc/storage.go:567
 #, c-format
 msgid   "Storage pool %s deleted"
 msgstr  ""
 
+#: lxc/storage.go:505
+#, c-format
+msgid   "Storage pool %s pending on node %s"
+msgstr  ""
+
 #: lxc/init.go:146 lxc/init.go:147
 msgid   "Storage pool name"
 msgstr  ""
 
-#: lxc/storage.go:840
+#: lxc/storage.go:852
 #, c-format
 msgid   "Storage volume %s created"
 msgstr  ""
 
-#: lxc/storage.go:855
+#: lxc/storage.go:867
 #, c-format
 msgid   "Storage volume %s deleted"
 msgstr  ""
@@ -1171,7 +1176,7 @@ msgstr  ""
 msgid   "Swap (peak)"
 msgstr  ""
 
-#: lxc/list.go:474 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:805
+#: lxc/list.go:474 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:817
 msgid   "TYPE"
 msgstr  ""
 
@@ -1204,7 +1209,7 @@ msgstr  ""
 msgid   "The opposite of \"lxc pause\" is \"lxc start\"."
 msgstr  ""
 
-#: lxc/network.go:298 lxc/network.go:351 lxc/storage.go:416 lxc/storage.go:536
+#: lxc/network.go:298 lxc/network.go:351 lxc/storage.go:418 lxc/storage.go:548
 msgid   "The specified device doesn't exist"
 msgstr  ""
 
@@ -1282,7 +1287,7 @@ msgstr  ""
 msgid   "URL"
 msgstr  ""
 
-#: lxc/network.go:526 lxc/profile.go:574 lxc/storage.go:697 lxc/storage.go:808
+#: lxc/network.go:526 lxc/profile.go:574 lxc/storage.go:709 lxc/storage.go:820
 msgid   "USED BY"
 msgstr  ""
 
@@ -1982,7 +1987,7 @@ msgid   "Usage: lxc snapshot [<remote>:]<container> <snapshot name> [--stateful]
         "    Create a snapshot of \"u1\" called \"snap0\"."
 msgstr  ""
 
-#: lxc/storage.go:64
+#: lxc/storage.go:65
 msgid   "Usage: lxc storage <subcommand> [options]\n"
         "\n"
         "Manage storage pools and volumes.\n"
@@ -1994,7 +1999,7 @@ msgid   "Usage: lxc storage <subcommand> [options]\n"
         "lxc storage show [<remote>:]<pool> [--resources]\n"
         "    Show details of a storage pool.\n"
         "\n"
-        "lxc storage create [<remote>:]<pool> <driver> [key=value]...\n"
+        "lxc storage create [<remote>:]<pool> <driver> [key=value]... [--target <node>]\n"
         "    Create a storage pool.\n"
         "\n"
         "lxc storage get [<remote>:]<pool> <key>\n"
diff --git a/po/nb_NO.po b/po/nb_NO.po
index a0d8b2a87..d914df8b3 100644
--- a/po/nb_NO.po
+++ b/po/nb_NO.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: lxd\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2017-12-06 11:48+0000\n"
+"POT-Creation-Date: 2017-12-06 11:58+0000\n"
 "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
 "Last-Translator: Automatically generated\n"
 "Language-Team: none\n"
@@ -16,7 +16,7 @@ msgstr ""
 "Content-Type: text/plain; charset=UTF-8\n"
 "Content-Transfer-Encoding: 8bit\n"
 
-#: lxc/storage.go:33
+#: lxc/storage.go:34
 msgid ""
 "### This is a yaml representation of a storage pool.\n"
 "### Any line starting with a '# will be ignored.\n"
@@ -33,7 +33,7 @@ msgid ""
 "###   zfs.pool_name: default"
 msgstr ""
 
-#: lxc/storage.go:50
+#: lxc/storage.go:51
 msgid ""
 "### This is a yaml representation of a storage volume.\n"
 "### Any line starting with a '# will be ignored.\n"
@@ -288,7 +288,7 @@ msgstr ""
 msgid "Can't unset key '%s', it's not currently set."
 msgstr ""
 
-#: lxc/profile.go:546 lxc/storage.go:653
+#: lxc/profile.go:546 lxc/storage.go:665
 msgid "Cannot provide container name to list"
 msgstr ""
 
@@ -314,7 +314,7 @@ msgid "Config key/value to apply to the new container"
 msgstr ""
 
 #: lxc/config.go:816 lxc/config.go:881 lxc/config.go:1331 lxc/image.go:1190
-#: lxc/network.go:426 lxc/profile.go:275 lxc/storage.go:604 lxc/storage.go:993
+#: lxc/network.go:426 lxc/profile.go:275 lxc/storage.go:616 lxc/storage.go:1005
 #, c-format
 msgid "Config parsing error: %s"
 msgstr ""
@@ -385,11 +385,11 @@ msgid "DATABASE"
 msgstr ""
 
 #: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:467 lxc/network.go:525
-#: lxc/storage.go:689 lxc/storage.go:807
+#: lxc/storage.go:701 lxc/storage.go:819
 msgid "DESCRIPTION"
 msgstr ""
 
-#: lxc/storage.go:690
+#: lxc/storage.go:702
 msgid "DRIVER"
 msgstr ""
 
@@ -704,7 +704,7 @@ msgstr ""
 msgid "Missing summary."
 msgstr ""
 
-#: lxc/network.go:284 lxc/network.go:337 lxc/storage.go:402 lxc/storage.go:522
+#: lxc/network.go:284 lxc/network.go:337 lxc/storage.go:404 lxc/storage.go:534
 msgid "More than one device matches, specify the device name."
 msgstr ""
 
@@ -725,7 +725,7 @@ msgid "Must supply container name for: "
 msgstr ""
 
 #: lxc/cluster.go:181 lxc/list.go:469 lxc/network.go:522 lxc/profile.go:573
-#: lxc/remote.go:409 lxc/storage.go:688 lxc/storage.go:806
+#: lxc/remote.go:409 lxc/storage.go:700 lxc/storage.go:818
 msgid "NAME"
 msgstr ""
 
@@ -777,7 +777,7 @@ msgstr ""
 msgid "No device found for this network"
 msgstr ""
 
-#: lxc/storage.go:411 lxc/storage.go:531
+#: lxc/storage.go:413 lxc/storage.go:543
 msgid "No device found for this storage volume."
 msgstr ""
 
@@ -795,7 +795,7 @@ msgstr ""
 msgid "Node %s renamed to %s"
 msgstr ""
 
-#: lxc/init.go:149
+#: lxc/init.go:149 lxc/storage.go:153
 msgid "Node name"
 msgstr ""
 
@@ -804,7 +804,7 @@ msgstr ""
 msgid "Node: %s"
 msgstr ""
 
-#: lxc/storage.go:355 lxc/storage.go:448
+#: lxc/storage.go:357 lxc/storage.go:450
 msgid "Only \"custom\" volumes can be attached to containers."
 msgstr ""
 
@@ -882,7 +882,7 @@ msgstr ""
 msgid "Pid: %d"
 msgstr ""
 
-#: lxc/network.go:427 lxc/profile.go:276 lxc/storage.go:605 lxc/storage.go:994
+#: lxc/network.go:427 lxc/profile.go:276 lxc/storage.go:617 lxc/storage.go:1006
 msgid "Press enter to open the editor again"
 msgstr ""
 
@@ -991,7 +991,7 @@ msgstr ""
 msgid "Remove %s (yes/no): "
 msgstr ""
 
-#: lxc/storage.go:1025
+#: lxc/storage.go:1037
 #, c-format
 msgid "Renamed storage volume from \"%s\" to \"%s\""
 msgstr ""
@@ -1025,11 +1025,11 @@ msgstr ""
 msgid "SNAPSHOTS"
 msgstr ""
 
-#: lxc/storage.go:695
+#: lxc/storage.go:707
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/cluster.go:184 lxc/list.go:473 lxc/storage.go:693
+#: lxc/cluster.go:184 lxc/list.go:473 lxc/storage.go:705
 msgid "STATE"
 msgstr ""
 
@@ -1093,7 +1093,7 @@ msgstr ""
 msgid "Show the resources available to the server"
 msgstr ""
 
-#: lxc/storage.go:151
+#: lxc/storage.go:152
 msgid "Show the resources available to the storage pool"
 msgstr ""
 
@@ -1146,26 +1146,31 @@ msgstr ""
 msgid "Stopping the container failed: %s"
 msgstr ""
 
-#: lxc/storage.go:496
+#: lxc/storage.go:507
 #, c-format
 msgid "Storage pool %s created"
 msgstr ""
 
-#: lxc/storage.go:555
+#: lxc/storage.go:567
 #, c-format
 msgid "Storage pool %s deleted"
 msgstr ""
 
+#: lxc/storage.go:505
+#, c-format
+msgid "Storage pool %s pending on node %s"
+msgstr ""
+
 #: lxc/init.go:146 lxc/init.go:147
 msgid "Storage pool name"
 msgstr ""
 
-#: lxc/storage.go:840
+#: lxc/storage.go:852
 #, c-format
 msgid "Storage volume %s created"
 msgstr ""
 
-#: lxc/storage.go:855
+#: lxc/storage.go:867
 #, c-format
 msgid "Storage volume %s deleted"
 msgstr ""
@@ -1182,7 +1187,7 @@ msgstr ""
 msgid "Swap (peak)"
 msgstr ""
 
-#: lxc/list.go:474 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:805
+#: lxc/list.go:474 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:817
 msgid "TYPE"
 msgstr ""
 
@@ -1218,7 +1223,7 @@ msgstr ""
 msgid "The opposite of \"lxc pause\" is \"lxc start\"."
 msgstr ""
 
-#: lxc/network.go:298 lxc/network.go:351 lxc/storage.go:416 lxc/storage.go:536
+#: lxc/network.go:298 lxc/network.go:351 lxc/storage.go:418 lxc/storage.go:548
 msgid "The specified device doesn't exist"
 msgstr ""
 
@@ -1297,7 +1302,7 @@ msgstr ""
 msgid "URL"
 msgstr ""
 
-#: lxc/network.go:526 lxc/profile.go:574 lxc/storage.go:697 lxc/storage.go:808
+#: lxc/network.go:526 lxc/profile.go:574 lxc/storage.go:709 lxc/storage.go:820
 msgid "USED BY"
 msgstr ""
 
@@ -2076,7 +2081,7 @@ msgid ""
 "    Create a snapshot of \"u1\" called \"snap0\"."
 msgstr ""
 
-#: lxc/storage.go:64
+#: lxc/storage.go:65
 msgid ""
 "Usage: lxc storage <subcommand> [options]\n"
 "\n"
@@ -2089,7 +2094,8 @@ msgid ""
 "lxc storage show [<remote>:]<pool> [--resources]\n"
 "    Show details of a storage pool.\n"
 "\n"
-"lxc storage create [<remote>:]<pool> <driver> [key=value]...\n"
+"lxc storage create [<remote>:]<pool> <driver> [key=value]... [--target "
+"<node>]\n"
 "    Create a storage pool.\n"
 "\n"
 "lxc storage get [<remote>:]<pool> <key>\n"
diff --git a/po/nl.po b/po/nl.po
index da4091f7d..6f16fa14f 100644
--- a/po/nl.po
+++ b/po/nl.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: lxd\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2017-12-06 11:48+0000\n"
+"POT-Creation-Date: 2017-12-06 11:58+0000\n"
 "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
 "Last-Translator: Automatically generated\n"
 "Language-Team: none\n"
@@ -16,7 +16,7 @@ msgstr ""
 "Content-Type: text/plain; charset=UTF-8\n"
 "Content-Transfer-Encoding: 8bit\n"
 
-#: lxc/storage.go:33
+#: lxc/storage.go:34
 msgid ""
 "### This is a yaml representation of a storage pool.\n"
 "### Any line starting with a '# will be ignored.\n"
@@ -33,7 +33,7 @@ msgid ""
 "###   zfs.pool_name: default"
 msgstr ""
 
-#: lxc/storage.go:50
+#: lxc/storage.go:51
 msgid ""
 "### This is a yaml representation of a storage volume.\n"
 "### Any line starting with a '# will be ignored.\n"
@@ -288,7 +288,7 @@ msgstr ""
 msgid "Can't unset key '%s', it's not currently set."
 msgstr ""
 
-#: lxc/profile.go:546 lxc/storage.go:653
+#: lxc/profile.go:546 lxc/storage.go:665
 msgid "Cannot provide container name to list"
 msgstr ""
 
@@ -314,7 +314,7 @@ msgid "Config key/value to apply to the new container"
 msgstr ""
 
 #: lxc/config.go:816 lxc/config.go:881 lxc/config.go:1331 lxc/image.go:1190
-#: lxc/network.go:426 lxc/profile.go:275 lxc/storage.go:604 lxc/storage.go:993
+#: lxc/network.go:426 lxc/profile.go:275 lxc/storage.go:616 lxc/storage.go:1005
 #, c-format
 msgid "Config parsing error: %s"
 msgstr ""
@@ -385,11 +385,11 @@ msgid "DATABASE"
 msgstr ""
 
 #: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:467 lxc/network.go:525
-#: lxc/storage.go:689 lxc/storage.go:807
+#: lxc/storage.go:701 lxc/storage.go:819
 msgid "DESCRIPTION"
 msgstr ""
 
-#: lxc/storage.go:690
+#: lxc/storage.go:702
 msgid "DRIVER"
 msgstr ""
 
@@ -704,7 +704,7 @@ msgstr ""
 msgid "Missing summary."
 msgstr ""
 
-#: lxc/network.go:284 lxc/network.go:337 lxc/storage.go:402 lxc/storage.go:522
+#: lxc/network.go:284 lxc/network.go:337 lxc/storage.go:404 lxc/storage.go:534
 msgid "More than one device matches, specify the device name."
 msgstr ""
 
@@ -725,7 +725,7 @@ msgid "Must supply container name for: "
 msgstr ""
 
 #: lxc/cluster.go:181 lxc/list.go:469 lxc/network.go:522 lxc/profile.go:573
-#: lxc/remote.go:409 lxc/storage.go:688 lxc/storage.go:806
+#: lxc/remote.go:409 lxc/storage.go:700 lxc/storage.go:818
 msgid "NAME"
 msgstr ""
 
@@ -777,7 +777,7 @@ msgstr ""
 msgid "No device found for this network"
 msgstr ""
 
-#: lxc/storage.go:411 lxc/storage.go:531
+#: lxc/storage.go:413 lxc/storage.go:543
 msgid "No device found for this storage volume."
 msgstr ""
 
@@ -795,7 +795,7 @@ msgstr ""
 msgid "Node %s renamed to %s"
 msgstr ""
 
-#: lxc/init.go:149
+#: lxc/init.go:149 lxc/storage.go:153
 msgid "Node name"
 msgstr ""
 
@@ -804,7 +804,7 @@ msgstr ""
 msgid "Node: %s"
 msgstr ""
 
-#: lxc/storage.go:355 lxc/storage.go:448
+#: lxc/storage.go:357 lxc/storage.go:450
 msgid "Only \"custom\" volumes can be attached to containers."
 msgstr ""
 
@@ -882,7 +882,7 @@ msgstr ""
 msgid "Pid: %d"
 msgstr ""
 
-#: lxc/network.go:427 lxc/profile.go:276 lxc/storage.go:605 lxc/storage.go:994
+#: lxc/network.go:427 lxc/profile.go:276 lxc/storage.go:617 lxc/storage.go:1006
 msgid "Press enter to open the editor again"
 msgstr ""
 
@@ -991,7 +991,7 @@ msgstr ""
 msgid "Remove %s (yes/no): "
 msgstr ""
 
-#: lxc/storage.go:1025
+#: lxc/storage.go:1037
 #, c-format
 msgid "Renamed storage volume from \"%s\" to \"%s\""
 msgstr ""
@@ -1025,11 +1025,11 @@ msgstr ""
 msgid "SNAPSHOTS"
 msgstr ""
 
-#: lxc/storage.go:695
+#: lxc/storage.go:707
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/cluster.go:184 lxc/list.go:473 lxc/storage.go:693
+#: lxc/cluster.go:184 lxc/list.go:473 lxc/storage.go:705
 msgid "STATE"
 msgstr ""
 
@@ -1093,7 +1093,7 @@ msgstr ""
 msgid "Show the resources available to the server"
 msgstr ""
 
-#: lxc/storage.go:151
+#: lxc/storage.go:152
 msgid "Show the resources available to the storage pool"
 msgstr ""
 
@@ -1146,26 +1146,31 @@ msgstr ""
 msgid "Stopping the container failed: %s"
 msgstr ""
 
-#: lxc/storage.go:496
+#: lxc/storage.go:507
 #, c-format
 msgid "Storage pool %s created"
 msgstr ""
 
-#: lxc/storage.go:555
+#: lxc/storage.go:567
 #, c-format
 msgid "Storage pool %s deleted"
 msgstr ""
 
+#: lxc/storage.go:505
+#, c-format
+msgid "Storage pool %s pending on node %s"
+msgstr ""
+
 #: lxc/init.go:146 lxc/init.go:147
 msgid "Storage pool name"
 msgstr ""
 
-#: lxc/storage.go:840
+#: lxc/storage.go:852
 #, c-format
 msgid "Storage volume %s created"
 msgstr ""
 
-#: lxc/storage.go:855
+#: lxc/storage.go:867
 #, c-format
 msgid "Storage volume %s deleted"
 msgstr ""
@@ -1182,7 +1187,7 @@ msgstr ""
 msgid "Swap (peak)"
 msgstr ""
 
-#: lxc/list.go:474 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:805
+#: lxc/list.go:474 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:817
 msgid "TYPE"
 msgstr ""
 
@@ -1218,7 +1223,7 @@ msgstr ""
 msgid "The opposite of \"lxc pause\" is \"lxc start\"."
 msgstr ""
 
-#: lxc/network.go:298 lxc/network.go:351 lxc/storage.go:416 lxc/storage.go:536
+#: lxc/network.go:298 lxc/network.go:351 lxc/storage.go:418 lxc/storage.go:548
 msgid "The specified device doesn't exist"
 msgstr ""
 
@@ -1297,7 +1302,7 @@ msgstr ""
 msgid "URL"
 msgstr ""
 
-#: lxc/network.go:526 lxc/profile.go:574 lxc/storage.go:697 lxc/storage.go:808
+#: lxc/network.go:526 lxc/profile.go:574 lxc/storage.go:709 lxc/storage.go:820
 msgid "USED BY"
 msgstr ""
 
@@ -2076,7 +2081,7 @@ msgid ""
 "    Create a snapshot of \"u1\" called \"snap0\"."
 msgstr ""
 
-#: lxc/storage.go:64
+#: lxc/storage.go:65
 msgid ""
 "Usage: lxc storage <subcommand> [options]\n"
 "\n"
@@ -2089,7 +2094,8 @@ msgid ""
 "lxc storage show [<remote>:]<pool> [--resources]\n"
 "    Show details of a storage pool.\n"
 "\n"
-"lxc storage create [<remote>:]<pool> <driver> [key=value]...\n"
+"lxc storage create [<remote>:]<pool> <driver> [key=value]... [--target "
+"<node>]\n"
 "    Create a storage pool.\n"
 "\n"
 "lxc storage get [<remote>:]<pool> <key>\n"
diff --git a/po/pt_BR.po b/po/pt_BR.po
index 7548e6757..659d5057e 100644
--- a/po/pt_BR.po
+++ b/po/pt_BR.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: lxd\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2017-12-06 11:48+0000\n"
+"POT-Creation-Date: 2017-12-06 11:58+0000\n"
 "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
 "Last-Translator: Automatically generated\n"
 "Language-Team: none\n"
@@ -16,7 +16,7 @@ msgstr ""
 "Content-Type: text/plain; charset=UTF-8\n"
 "Content-Transfer-Encoding: 8bit\n"
 
-#: lxc/storage.go:33
+#: lxc/storage.go:34
 msgid ""
 "### This is a yaml representation of a storage pool.\n"
 "### Any line starting with a '# will be ignored.\n"
@@ -33,7 +33,7 @@ msgid ""
 "###   zfs.pool_name: default"
 msgstr ""
 
-#: lxc/storage.go:50
+#: lxc/storage.go:51
 msgid ""
 "### This is a yaml representation of a storage volume.\n"
 "### Any line starting with a '# will be ignored.\n"
@@ -288,7 +288,7 @@ msgstr ""
 msgid "Can't unset key '%s', it's not currently set."
 msgstr ""
 
-#: lxc/profile.go:546 lxc/storage.go:653
+#: lxc/profile.go:546 lxc/storage.go:665
 msgid "Cannot provide container name to list"
 msgstr ""
 
@@ -314,7 +314,7 @@ msgid "Config key/value to apply to the new container"
 msgstr ""
 
 #: lxc/config.go:816 lxc/config.go:881 lxc/config.go:1331 lxc/image.go:1190
-#: lxc/network.go:426 lxc/profile.go:275 lxc/storage.go:604 lxc/storage.go:993
+#: lxc/network.go:426 lxc/profile.go:275 lxc/storage.go:616 lxc/storage.go:1005
 #, c-format
 msgid "Config parsing error: %s"
 msgstr ""
@@ -385,11 +385,11 @@ msgid "DATABASE"
 msgstr ""
 
 #: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:467 lxc/network.go:525
-#: lxc/storage.go:689 lxc/storage.go:807
+#: lxc/storage.go:701 lxc/storage.go:819
 msgid "DESCRIPTION"
 msgstr ""
 
-#: lxc/storage.go:690
+#: lxc/storage.go:702
 msgid "DRIVER"
 msgstr ""
 
@@ -704,7 +704,7 @@ msgstr ""
 msgid "Missing summary."
 msgstr ""
 
-#: lxc/network.go:284 lxc/network.go:337 lxc/storage.go:402 lxc/storage.go:522
+#: lxc/network.go:284 lxc/network.go:337 lxc/storage.go:404 lxc/storage.go:534
 msgid "More than one device matches, specify the device name."
 msgstr ""
 
@@ -725,7 +725,7 @@ msgid "Must supply container name for: "
 msgstr ""
 
 #: lxc/cluster.go:181 lxc/list.go:469 lxc/network.go:522 lxc/profile.go:573
-#: lxc/remote.go:409 lxc/storage.go:688 lxc/storage.go:806
+#: lxc/remote.go:409 lxc/storage.go:700 lxc/storage.go:818
 msgid "NAME"
 msgstr ""
 
@@ -777,7 +777,7 @@ msgstr ""
 msgid "No device found for this network"
 msgstr ""
 
-#: lxc/storage.go:411 lxc/storage.go:531
+#: lxc/storage.go:413 lxc/storage.go:543
 msgid "No device found for this storage volume."
 msgstr ""
 
@@ -795,7 +795,7 @@ msgstr ""
 msgid "Node %s renamed to %s"
 msgstr ""
 
-#: lxc/init.go:149
+#: lxc/init.go:149 lxc/storage.go:153
 msgid "Node name"
 msgstr ""
 
@@ -804,7 +804,7 @@ msgstr ""
 msgid "Node: %s"
 msgstr ""
 
-#: lxc/storage.go:355 lxc/storage.go:448
+#: lxc/storage.go:357 lxc/storage.go:450
 msgid "Only \"custom\" volumes can be attached to containers."
 msgstr ""
 
@@ -882,7 +882,7 @@ msgstr ""
 msgid "Pid: %d"
 msgstr ""
 
-#: lxc/network.go:427 lxc/profile.go:276 lxc/storage.go:605 lxc/storage.go:994
+#: lxc/network.go:427 lxc/profile.go:276 lxc/storage.go:617 lxc/storage.go:1006
 msgid "Press enter to open the editor again"
 msgstr ""
 
@@ -991,7 +991,7 @@ msgstr ""
 msgid "Remove %s (yes/no): "
 msgstr ""
 
-#: lxc/storage.go:1025
+#: lxc/storage.go:1037
 #, c-format
 msgid "Renamed storage volume from \"%s\" to \"%s\""
 msgstr ""
@@ -1025,11 +1025,11 @@ msgstr ""
 msgid "SNAPSHOTS"
 msgstr ""
 
-#: lxc/storage.go:695
+#: lxc/storage.go:707
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/cluster.go:184 lxc/list.go:473 lxc/storage.go:693
+#: lxc/cluster.go:184 lxc/list.go:473 lxc/storage.go:705
 msgid "STATE"
 msgstr ""
 
@@ -1093,7 +1093,7 @@ msgstr ""
 msgid "Show the resources available to the server"
 msgstr ""
 
-#: lxc/storage.go:151
+#: lxc/storage.go:152
 msgid "Show the resources available to the storage pool"
 msgstr ""
 
@@ -1146,26 +1146,31 @@ msgstr ""
 msgid "Stopping the container failed: %s"
 msgstr ""
 
-#: lxc/storage.go:496
+#: lxc/storage.go:507
 #, c-format
 msgid "Storage pool %s created"
 msgstr ""
 
-#: lxc/storage.go:555
+#: lxc/storage.go:567
 #, c-format
 msgid "Storage pool %s deleted"
 msgstr ""
 
+#: lxc/storage.go:505
+#, c-format
+msgid "Storage pool %s pending on node %s"
+msgstr ""
+
 #: lxc/init.go:146 lxc/init.go:147
 msgid "Storage pool name"
 msgstr ""
 
-#: lxc/storage.go:840
+#: lxc/storage.go:852
 #, c-format
 msgid "Storage volume %s created"
 msgstr ""
 
-#: lxc/storage.go:855
+#: lxc/storage.go:867
 #, c-format
 msgid "Storage volume %s deleted"
 msgstr ""
@@ -1182,7 +1187,7 @@ msgstr ""
 msgid "Swap (peak)"
 msgstr ""
 
-#: lxc/list.go:474 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:805
+#: lxc/list.go:474 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:817
 msgid "TYPE"
 msgstr ""
 
@@ -1218,7 +1223,7 @@ msgstr ""
 msgid "The opposite of \"lxc pause\" is \"lxc start\"."
 msgstr ""
 
-#: lxc/network.go:298 lxc/network.go:351 lxc/storage.go:416 lxc/storage.go:536
+#: lxc/network.go:298 lxc/network.go:351 lxc/storage.go:418 lxc/storage.go:548
 msgid "The specified device doesn't exist"
 msgstr ""
 
@@ -1297,7 +1302,7 @@ msgstr ""
 msgid "URL"
 msgstr ""
 
-#: lxc/network.go:526 lxc/profile.go:574 lxc/storage.go:697 lxc/storage.go:808
+#: lxc/network.go:526 lxc/profile.go:574 lxc/storage.go:709 lxc/storage.go:820
 msgid "USED BY"
 msgstr ""
 
@@ -2076,7 +2081,7 @@ msgid ""
 "    Create a snapshot of \"u1\" called \"snap0\"."
 msgstr ""
 
-#: lxc/storage.go:64
+#: lxc/storage.go:65
 msgid ""
 "Usage: lxc storage <subcommand> [options]\n"
 "\n"
@@ -2089,7 +2094,8 @@ msgid ""
 "lxc storage show [<remote>:]<pool> [--resources]\n"
 "    Show details of a storage pool.\n"
 "\n"
-"lxc storage create [<remote>:]<pool> <driver> [key=value]...\n"
+"lxc storage create [<remote>:]<pool> <driver> [key=value]... [--target "
+"<node>]\n"
 "    Create a storage pool.\n"
 "\n"
 "lxc storage get [<remote>:]<pool> <key>\n"
diff --git a/po/ru.po b/po/ru.po
index f40ef67b8..6cb2c6454 100644
--- a/po/ru.po
+++ b/po/ru.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: lxd\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2017-12-06 11:48+0000\n"
+"POT-Creation-Date: 2017-12-06 11:58+0000\n"
 "PO-Revision-Date: 2017-09-05 16:48+0000\n"
 "Last-Translator: Ilya Yakimavets <ilya.yakimavets at backend.expert>\n"
 "Language-Team: Russian <https://hosted.weblate.org/projects/linux-containers/"
@@ -20,7 +20,7 @@ msgstr ""
 "%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2;\n"
 "X-Generator: Weblate 2.17-dev\n"
 
-#: lxc/storage.go:33
+#: lxc/storage.go:34
 msgid ""
 "### This is a yaml representation of a storage pool.\n"
 "### Any line starting with a '# will be ignored.\n"
@@ -50,7 +50,7 @@ msgstr ""
 "###   source: /home/chb/mnt/lxd_test/default.img\n"
 "###   zfs.pool_name: default"
 
-#: lxc/storage.go:50
+#: lxc/storage.go:51
 msgid ""
 "### This is a yaml representation of a storage volume.\n"
 "### Any line starting with a '# will be ignored.\n"
@@ -376,7 +376,7 @@ msgstr ""
 msgid "Can't unset key '%s', it's not currently set."
 msgstr ""
 
-#: lxc/profile.go:546 lxc/storage.go:653
+#: lxc/profile.go:546 lxc/storage.go:665
 msgid "Cannot provide container name to list"
 msgstr "Невозможно добавить имя контейнера в список"
 
@@ -402,7 +402,7 @@ msgid "Config key/value to apply to the new container"
 msgstr ""
 
 #: lxc/config.go:816 lxc/config.go:881 lxc/config.go:1331 lxc/image.go:1190
-#: lxc/network.go:426 lxc/profile.go:275 lxc/storage.go:604 lxc/storage.go:993
+#: lxc/network.go:426 lxc/profile.go:275 lxc/storage.go:616 lxc/storage.go:1005
 #, c-format
 msgid "Config parsing error: %s"
 msgstr ""
@@ -473,11 +473,11 @@ msgid "DATABASE"
 msgstr ""
 
 #: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:467 lxc/network.go:525
-#: lxc/storage.go:689 lxc/storage.go:807
+#: lxc/storage.go:701 lxc/storage.go:819
 msgid "DESCRIPTION"
 msgstr ""
 
-#: lxc/storage.go:690
+#: lxc/storage.go:702
 msgid "DRIVER"
 msgstr ""
 
@@ -794,7 +794,7 @@ msgstr " Использование памяти:"
 msgid "Missing summary."
 msgstr ""
 
-#: lxc/network.go:284 lxc/network.go:337 lxc/storage.go:402 lxc/storage.go:522
+#: lxc/network.go:284 lxc/network.go:337 lxc/storage.go:404 lxc/storage.go:534
 msgid "More than one device matches, specify the device name."
 msgstr ""
 
@@ -815,7 +815,7 @@ msgid "Must supply container name for: "
 msgstr ""
 
 #: lxc/cluster.go:181 lxc/list.go:469 lxc/network.go:522 lxc/profile.go:573
-#: lxc/remote.go:409 lxc/storage.go:688 lxc/storage.go:806
+#: lxc/remote.go:409 lxc/storage.go:700 lxc/storage.go:818
 msgid "NAME"
 msgstr ""
 
@@ -868,7 +868,7 @@ msgstr ""
 msgid "No device found for this network"
 msgstr ""
 
-#: lxc/storage.go:411 lxc/storage.go:531
+#: lxc/storage.go:413 lxc/storage.go:543
 msgid "No device found for this storage volume."
 msgstr ""
 
@@ -886,7 +886,7 @@ msgstr ""
 msgid "Node %s renamed to %s"
 msgstr ""
 
-#: lxc/init.go:149
+#: lxc/init.go:149 lxc/storage.go:153
 msgid "Node name"
 msgstr ""
 
@@ -895,7 +895,7 @@ msgstr ""
 msgid "Node: %s"
 msgstr "Авто-обновление: %s"
 
-#: lxc/storage.go:355 lxc/storage.go:448
+#: lxc/storage.go:357 lxc/storage.go:450
 msgid "Only \"custom\" volumes can be attached to containers."
 msgstr ""
 
@@ -973,7 +973,7 @@ msgstr ""
 msgid "Pid: %d"
 msgstr ""
 
-#: lxc/network.go:427 lxc/profile.go:276 lxc/storage.go:605 lxc/storage.go:994
+#: lxc/network.go:427 lxc/profile.go:276 lxc/storage.go:617 lxc/storage.go:1006
 msgid "Press enter to open the editor again"
 msgstr ""
 
@@ -1082,7 +1082,7 @@ msgstr ""
 msgid "Remove %s (yes/no): "
 msgstr ""
 
-#: lxc/storage.go:1025
+#: lxc/storage.go:1037
 #, c-format
 msgid "Renamed storage volume from \"%s\" to \"%s\""
 msgstr ""
@@ -1116,11 +1116,11 @@ msgstr ""
 msgid "SNAPSHOTS"
 msgstr ""
 
-#: lxc/storage.go:695
+#: lxc/storage.go:707
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/cluster.go:184 lxc/list.go:473 lxc/storage.go:693
+#: lxc/cluster.go:184 lxc/list.go:473 lxc/storage.go:705
 msgid "STATE"
 msgstr ""
 
@@ -1184,7 +1184,7 @@ msgstr ""
 msgid "Show the resources available to the server"
 msgstr ""
 
-#: lxc/storage.go:151
+#: lxc/storage.go:152
 msgid "Show the resources available to the storage pool"
 msgstr ""
 
@@ -1237,26 +1237,31 @@ msgstr ""
 msgid "Stopping the container failed: %s"
 msgstr "Невозможно добавить имя контейнера в список"
 
-#: lxc/storage.go:496
+#: lxc/storage.go:507
 #, c-format
 msgid "Storage pool %s created"
 msgstr ""
 
-#: lxc/storage.go:555
+#: lxc/storage.go:567
 #, c-format
 msgid "Storage pool %s deleted"
 msgstr ""
 
+#: lxc/storage.go:505
+#, c-format
+msgid "Storage pool %s pending on node %s"
+msgstr ""
+
 #: lxc/init.go:146 lxc/init.go:147
 msgid "Storage pool name"
 msgstr ""
 
-#: lxc/storage.go:840
+#: lxc/storage.go:852
 #, c-format
 msgid "Storage volume %s created"
 msgstr ""
 
-#: lxc/storage.go:855
+#: lxc/storage.go:867
 #, c-format
 msgid "Storage volume %s deleted"
 msgstr ""
@@ -1273,7 +1278,7 @@ msgstr ""
 msgid "Swap (peak)"
 msgstr ""
 
-#: lxc/list.go:474 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:805
+#: lxc/list.go:474 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:817
 msgid "TYPE"
 msgstr ""
 
@@ -1309,7 +1314,7 @@ msgstr ""
 msgid "The opposite of \"lxc pause\" is \"lxc start\"."
 msgstr ""
 
-#: lxc/network.go:298 lxc/network.go:351 lxc/storage.go:416 lxc/storage.go:536
+#: lxc/network.go:298 lxc/network.go:351 lxc/storage.go:418 lxc/storage.go:548
 msgid "The specified device doesn't exist"
 msgstr ""
 
@@ -1388,7 +1393,7 @@ msgstr ""
 msgid "URL"
 msgstr ""
 
-#: lxc/network.go:526 lxc/profile.go:574 lxc/storage.go:697 lxc/storage.go:808
+#: lxc/network.go:526 lxc/profile.go:574 lxc/storage.go:709 lxc/storage.go:820
 msgid "USED BY"
 msgstr ""
 
@@ -2179,7 +2184,7 @@ msgid ""
 "    Create a snapshot of \"u1\" called \"snap0\"."
 msgstr ""
 
-#: lxc/storage.go:64
+#: lxc/storage.go:65
 msgid ""
 "Usage: lxc storage <subcommand> [options]\n"
 "\n"
@@ -2192,7 +2197,8 @@ msgid ""
 "lxc storage show [<remote>:]<pool> [--resources]\n"
 "    Show details of a storage pool.\n"
 "\n"
-"lxc storage create [<remote>:]<pool> <driver> [key=value]...\n"
+"lxc storage create [<remote>:]<pool> <driver> [key=value]... [--target "
+"<node>]\n"
 "    Create a storage pool.\n"
 "\n"
 "lxc storage get [<remote>:]<pool> <key>\n"
diff --git a/po/sr.po b/po/sr.po
index cd56a45dc..241fb678a 100644
--- a/po/sr.po
+++ b/po/sr.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: lxd\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2017-12-06 11:48+0000\n"
+"POT-Creation-Date: 2017-12-06 11:58+0000\n"
 "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
 "Last-Translator: Automatically generated\n"
 "Language-Team: none\n"
@@ -16,7 +16,7 @@ msgstr ""
 "Content-Type: text/plain; charset=UTF-8\n"
 "Content-Transfer-Encoding: 8bit\n"
 
-#: lxc/storage.go:33
+#: lxc/storage.go:34
 msgid ""
 "### This is a yaml representation of a storage pool.\n"
 "### Any line starting with a '# will be ignored.\n"
@@ -33,7 +33,7 @@ msgid ""
 "###   zfs.pool_name: default"
 msgstr ""
 
-#: lxc/storage.go:50
+#: lxc/storage.go:51
 msgid ""
 "### This is a yaml representation of a storage volume.\n"
 "### Any line starting with a '# will be ignored.\n"
@@ -288,7 +288,7 @@ msgstr ""
 msgid "Can't unset key '%s', it's not currently set."
 msgstr ""
 
-#: lxc/profile.go:546 lxc/storage.go:653
+#: lxc/profile.go:546 lxc/storage.go:665
 msgid "Cannot provide container name to list"
 msgstr ""
 
@@ -314,7 +314,7 @@ msgid "Config key/value to apply to the new container"
 msgstr ""
 
 #: lxc/config.go:816 lxc/config.go:881 lxc/config.go:1331 lxc/image.go:1190
-#: lxc/network.go:426 lxc/profile.go:275 lxc/storage.go:604 lxc/storage.go:993
+#: lxc/network.go:426 lxc/profile.go:275 lxc/storage.go:616 lxc/storage.go:1005
 #, c-format
 msgid "Config parsing error: %s"
 msgstr ""
@@ -385,11 +385,11 @@ msgid "DATABASE"
 msgstr ""
 
 #: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:467 lxc/network.go:525
-#: lxc/storage.go:689 lxc/storage.go:807
+#: lxc/storage.go:701 lxc/storage.go:819
 msgid "DESCRIPTION"
 msgstr ""
 
-#: lxc/storage.go:690
+#: lxc/storage.go:702
 msgid "DRIVER"
 msgstr ""
 
@@ -704,7 +704,7 @@ msgstr ""
 msgid "Missing summary."
 msgstr ""
 
-#: lxc/network.go:284 lxc/network.go:337 lxc/storage.go:402 lxc/storage.go:522
+#: lxc/network.go:284 lxc/network.go:337 lxc/storage.go:404 lxc/storage.go:534
 msgid "More than one device matches, specify the device name."
 msgstr ""
 
@@ -725,7 +725,7 @@ msgid "Must supply container name for: "
 msgstr ""
 
 #: lxc/cluster.go:181 lxc/list.go:469 lxc/network.go:522 lxc/profile.go:573
-#: lxc/remote.go:409 lxc/storage.go:688 lxc/storage.go:806
+#: lxc/remote.go:409 lxc/storage.go:700 lxc/storage.go:818
 msgid "NAME"
 msgstr ""
 
@@ -777,7 +777,7 @@ msgstr ""
 msgid "No device found for this network"
 msgstr ""
 
-#: lxc/storage.go:411 lxc/storage.go:531
+#: lxc/storage.go:413 lxc/storage.go:543
 msgid "No device found for this storage volume."
 msgstr ""
 
@@ -795,7 +795,7 @@ msgstr ""
 msgid "Node %s renamed to %s"
 msgstr ""
 
-#: lxc/init.go:149
+#: lxc/init.go:149 lxc/storage.go:153
 msgid "Node name"
 msgstr ""
 
@@ -804,7 +804,7 @@ msgstr ""
 msgid "Node: %s"
 msgstr ""
 
-#: lxc/storage.go:355 lxc/storage.go:448
+#: lxc/storage.go:357 lxc/storage.go:450
 msgid "Only \"custom\" volumes can be attached to containers."
 msgstr ""
 
@@ -882,7 +882,7 @@ msgstr ""
 msgid "Pid: %d"
 msgstr ""
 
-#: lxc/network.go:427 lxc/profile.go:276 lxc/storage.go:605 lxc/storage.go:994
+#: lxc/network.go:427 lxc/profile.go:276 lxc/storage.go:617 lxc/storage.go:1006
 msgid "Press enter to open the editor again"
 msgstr ""
 
@@ -991,7 +991,7 @@ msgstr ""
 msgid "Remove %s (yes/no): "
 msgstr ""
 
-#: lxc/storage.go:1025
+#: lxc/storage.go:1037
 #, c-format
 msgid "Renamed storage volume from \"%s\" to \"%s\""
 msgstr ""
@@ -1025,11 +1025,11 @@ msgstr ""
 msgid "SNAPSHOTS"
 msgstr ""
 
-#: lxc/storage.go:695
+#: lxc/storage.go:707
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/cluster.go:184 lxc/list.go:473 lxc/storage.go:693
+#: lxc/cluster.go:184 lxc/list.go:473 lxc/storage.go:705
 msgid "STATE"
 msgstr ""
 
@@ -1093,7 +1093,7 @@ msgstr ""
 msgid "Show the resources available to the server"
 msgstr ""
 
-#: lxc/storage.go:151
+#: lxc/storage.go:152
 msgid "Show the resources available to the storage pool"
 msgstr ""
 
@@ -1146,26 +1146,31 @@ msgstr ""
 msgid "Stopping the container failed: %s"
 msgstr ""
 
-#: lxc/storage.go:496
+#: lxc/storage.go:507
 #, c-format
 msgid "Storage pool %s created"
 msgstr ""
 
-#: lxc/storage.go:555
+#: lxc/storage.go:567
 #, c-format
 msgid "Storage pool %s deleted"
 msgstr ""
 
+#: lxc/storage.go:505
+#, c-format
+msgid "Storage pool %s pending on node %s"
+msgstr ""
+
 #: lxc/init.go:146 lxc/init.go:147
 msgid "Storage pool name"
 msgstr ""
 
-#: lxc/storage.go:840
+#: lxc/storage.go:852
 #, c-format
 msgid "Storage volume %s created"
 msgstr ""
 
-#: lxc/storage.go:855
+#: lxc/storage.go:867
 #, c-format
 msgid "Storage volume %s deleted"
 msgstr ""
@@ -1182,7 +1187,7 @@ msgstr ""
 msgid "Swap (peak)"
 msgstr ""
 
-#: lxc/list.go:474 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:805
+#: lxc/list.go:474 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:817
 msgid "TYPE"
 msgstr ""
 
@@ -1218,7 +1223,7 @@ msgstr ""
 msgid "The opposite of \"lxc pause\" is \"lxc start\"."
 msgstr ""
 
-#: lxc/network.go:298 lxc/network.go:351 lxc/storage.go:416 lxc/storage.go:536
+#: lxc/network.go:298 lxc/network.go:351 lxc/storage.go:418 lxc/storage.go:548
 msgid "The specified device doesn't exist"
 msgstr ""
 
@@ -1297,7 +1302,7 @@ msgstr ""
 msgid "URL"
 msgstr ""
 
-#: lxc/network.go:526 lxc/profile.go:574 lxc/storage.go:697 lxc/storage.go:808
+#: lxc/network.go:526 lxc/profile.go:574 lxc/storage.go:709 lxc/storage.go:820
 msgid "USED BY"
 msgstr ""
 
@@ -2076,7 +2081,7 @@ msgid ""
 "    Create a snapshot of \"u1\" called \"snap0\"."
 msgstr ""
 
-#: lxc/storage.go:64
+#: lxc/storage.go:65
 msgid ""
 "Usage: lxc storage <subcommand> [options]\n"
 "\n"
@@ -2089,7 +2094,8 @@ msgid ""
 "lxc storage show [<remote>:]<pool> [--resources]\n"
 "    Show details of a storage pool.\n"
 "\n"
-"lxc storage create [<remote>:]<pool> <driver> [key=value]...\n"
+"lxc storage create [<remote>:]<pool> <driver> [key=value]... [--target "
+"<node>]\n"
 "    Create a storage pool.\n"
 "\n"
 "lxc storage get [<remote>:]<pool> <key>\n"
diff --git a/po/sv.po b/po/sv.po
index a0e2c97c7..6385d8ef3 100644
--- a/po/sv.po
+++ b/po/sv.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: lxd\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2017-12-06 11:48+0000\n"
+"POT-Creation-Date: 2017-12-06 11:58+0000\n"
 "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
 "Last-Translator: Automatically generated\n"
 "Language-Team: none\n"
@@ -16,7 +16,7 @@ msgstr ""
 "Content-Type: text/plain; charset=UTF-8\n"
 "Content-Transfer-Encoding: 8bit\n"
 
-#: lxc/storage.go:33
+#: lxc/storage.go:34
 msgid ""
 "### This is a yaml representation of a storage pool.\n"
 "### Any line starting with a '# will be ignored.\n"
@@ -33,7 +33,7 @@ msgid ""
 "###   zfs.pool_name: default"
 msgstr ""
 
-#: lxc/storage.go:50
+#: lxc/storage.go:51
 msgid ""
 "### This is a yaml representation of a storage volume.\n"
 "### Any line starting with a '# will be ignored.\n"
@@ -288,7 +288,7 @@ msgstr ""
 msgid "Can't unset key '%s', it's not currently set."
 msgstr ""
 
-#: lxc/profile.go:546 lxc/storage.go:653
+#: lxc/profile.go:546 lxc/storage.go:665
 msgid "Cannot provide container name to list"
 msgstr ""
 
@@ -314,7 +314,7 @@ msgid "Config key/value to apply to the new container"
 msgstr ""
 
 #: lxc/config.go:816 lxc/config.go:881 lxc/config.go:1331 lxc/image.go:1190
-#: lxc/network.go:426 lxc/profile.go:275 lxc/storage.go:604 lxc/storage.go:993
+#: lxc/network.go:426 lxc/profile.go:275 lxc/storage.go:616 lxc/storage.go:1005
 #, c-format
 msgid "Config parsing error: %s"
 msgstr ""
@@ -385,11 +385,11 @@ msgid "DATABASE"
 msgstr ""
 
 #: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:467 lxc/network.go:525
-#: lxc/storage.go:689 lxc/storage.go:807
+#: lxc/storage.go:701 lxc/storage.go:819
 msgid "DESCRIPTION"
 msgstr ""
 
-#: lxc/storage.go:690
+#: lxc/storage.go:702
 msgid "DRIVER"
 msgstr ""
 
@@ -704,7 +704,7 @@ msgstr ""
 msgid "Missing summary."
 msgstr ""
 
-#: lxc/network.go:284 lxc/network.go:337 lxc/storage.go:402 lxc/storage.go:522
+#: lxc/network.go:284 lxc/network.go:337 lxc/storage.go:404 lxc/storage.go:534
 msgid "More than one device matches, specify the device name."
 msgstr ""
 
@@ -725,7 +725,7 @@ msgid "Must supply container name for: "
 msgstr ""
 
 #: lxc/cluster.go:181 lxc/list.go:469 lxc/network.go:522 lxc/profile.go:573
-#: lxc/remote.go:409 lxc/storage.go:688 lxc/storage.go:806
+#: lxc/remote.go:409 lxc/storage.go:700 lxc/storage.go:818
 msgid "NAME"
 msgstr ""
 
@@ -777,7 +777,7 @@ msgstr ""
 msgid "No device found for this network"
 msgstr ""
 
-#: lxc/storage.go:411 lxc/storage.go:531
+#: lxc/storage.go:413 lxc/storage.go:543
 msgid "No device found for this storage volume."
 msgstr ""
 
@@ -795,7 +795,7 @@ msgstr ""
 msgid "Node %s renamed to %s"
 msgstr ""
 
-#: lxc/init.go:149
+#: lxc/init.go:149 lxc/storage.go:153
 msgid "Node name"
 msgstr ""
 
@@ -804,7 +804,7 @@ msgstr ""
 msgid "Node: %s"
 msgstr ""
 
-#: lxc/storage.go:355 lxc/storage.go:448
+#: lxc/storage.go:357 lxc/storage.go:450
 msgid "Only \"custom\" volumes can be attached to containers."
 msgstr ""
 
@@ -882,7 +882,7 @@ msgstr ""
 msgid "Pid: %d"
 msgstr ""
 
-#: lxc/network.go:427 lxc/profile.go:276 lxc/storage.go:605 lxc/storage.go:994
+#: lxc/network.go:427 lxc/profile.go:276 lxc/storage.go:617 lxc/storage.go:1006
 msgid "Press enter to open the editor again"
 msgstr ""
 
@@ -991,7 +991,7 @@ msgstr ""
 msgid "Remove %s (yes/no): "
 msgstr ""
 
-#: lxc/storage.go:1025
+#: lxc/storage.go:1037
 #, c-format
 msgid "Renamed storage volume from \"%s\" to \"%s\""
 msgstr ""
@@ -1025,11 +1025,11 @@ msgstr ""
 msgid "SNAPSHOTS"
 msgstr ""
 
-#: lxc/storage.go:695
+#: lxc/storage.go:707
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/cluster.go:184 lxc/list.go:473 lxc/storage.go:693
+#: lxc/cluster.go:184 lxc/list.go:473 lxc/storage.go:705
 msgid "STATE"
 msgstr ""
 
@@ -1093,7 +1093,7 @@ msgstr ""
 msgid "Show the resources available to the server"
 msgstr ""
 
-#: lxc/storage.go:151
+#: lxc/storage.go:152
 msgid "Show the resources available to the storage pool"
 msgstr ""
 
@@ -1146,26 +1146,31 @@ msgstr ""
 msgid "Stopping the container failed: %s"
 msgstr ""
 
-#: lxc/storage.go:496
+#: lxc/storage.go:507
 #, c-format
 msgid "Storage pool %s created"
 msgstr ""
 
-#: lxc/storage.go:555
+#: lxc/storage.go:567
 #, c-format
 msgid "Storage pool %s deleted"
 msgstr ""
 
+#: lxc/storage.go:505
+#, c-format
+msgid "Storage pool %s pending on node %s"
+msgstr ""
+
 #: lxc/init.go:146 lxc/init.go:147
 msgid "Storage pool name"
 msgstr ""
 
-#: lxc/storage.go:840
+#: lxc/storage.go:852
 #, c-format
 msgid "Storage volume %s created"
 msgstr ""
 
-#: lxc/storage.go:855
+#: lxc/storage.go:867
 #, c-format
 msgid "Storage volume %s deleted"
 msgstr ""
@@ -1182,7 +1187,7 @@ msgstr ""
 msgid "Swap (peak)"
 msgstr ""
 
-#: lxc/list.go:474 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:805
+#: lxc/list.go:474 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:817
 msgid "TYPE"
 msgstr ""
 
@@ -1218,7 +1223,7 @@ msgstr ""
 msgid "The opposite of \"lxc pause\" is \"lxc start\"."
 msgstr ""
 
-#: lxc/network.go:298 lxc/network.go:351 lxc/storage.go:416 lxc/storage.go:536
+#: lxc/network.go:298 lxc/network.go:351 lxc/storage.go:418 lxc/storage.go:548
 msgid "The specified device doesn't exist"
 msgstr ""
 
@@ -1297,7 +1302,7 @@ msgstr ""
 msgid "URL"
 msgstr ""
 
-#: lxc/network.go:526 lxc/profile.go:574 lxc/storage.go:697 lxc/storage.go:808
+#: lxc/network.go:526 lxc/profile.go:574 lxc/storage.go:709 lxc/storage.go:820
 msgid "USED BY"
 msgstr ""
 
@@ -2076,7 +2081,7 @@ msgid ""
 "    Create a snapshot of \"u1\" called \"snap0\"."
 msgstr ""
 
-#: lxc/storage.go:64
+#: lxc/storage.go:65
 msgid ""
 "Usage: lxc storage <subcommand> [options]\n"
 "\n"
@@ -2089,7 +2094,8 @@ msgid ""
 "lxc storage show [<remote>:]<pool> [--resources]\n"
 "    Show details of a storage pool.\n"
 "\n"
-"lxc storage create [<remote>:]<pool> <driver> [key=value]...\n"
+"lxc storage create [<remote>:]<pool> <driver> [key=value]... [--target "
+"<node>]\n"
 "    Create a storage pool.\n"
 "\n"
 "lxc storage get [<remote>:]<pool> <key>\n"
diff --git a/po/tr.po b/po/tr.po
index 7c47f0d0e..1d051a6e4 100644
--- a/po/tr.po
+++ b/po/tr.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: lxd\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2017-12-06 11:48+0000\n"
+"POT-Creation-Date: 2017-12-06 11:58+0000\n"
 "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
 "Last-Translator: Automatically generated\n"
 "Language-Team: none\n"
@@ -16,7 +16,7 @@ msgstr ""
 "Content-Type: text/plain; charset=UTF-8\n"
 "Content-Transfer-Encoding: 8bit\n"
 
-#: lxc/storage.go:33
+#: lxc/storage.go:34
 msgid ""
 "### This is a yaml representation of a storage pool.\n"
 "### Any line starting with a '# will be ignored.\n"
@@ -33,7 +33,7 @@ msgid ""
 "###   zfs.pool_name: default"
 msgstr ""
 
-#: lxc/storage.go:50
+#: lxc/storage.go:51
 msgid ""
 "### This is a yaml representation of a storage volume.\n"
 "### Any line starting with a '# will be ignored.\n"
@@ -288,7 +288,7 @@ msgstr ""
 msgid "Can't unset key '%s', it's not currently set."
 msgstr ""
 
-#: lxc/profile.go:546 lxc/storage.go:653
+#: lxc/profile.go:546 lxc/storage.go:665
 msgid "Cannot provide container name to list"
 msgstr ""
 
@@ -314,7 +314,7 @@ msgid "Config key/value to apply to the new container"
 msgstr ""
 
 #: lxc/config.go:816 lxc/config.go:881 lxc/config.go:1331 lxc/image.go:1190
-#: lxc/network.go:426 lxc/profile.go:275 lxc/storage.go:604 lxc/storage.go:993
+#: lxc/network.go:426 lxc/profile.go:275 lxc/storage.go:616 lxc/storage.go:1005
 #, c-format
 msgid "Config parsing error: %s"
 msgstr ""
@@ -385,11 +385,11 @@ msgid "DATABASE"
 msgstr ""
 
 #: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:467 lxc/network.go:525
-#: lxc/storage.go:689 lxc/storage.go:807
+#: lxc/storage.go:701 lxc/storage.go:819
 msgid "DESCRIPTION"
 msgstr ""
 
-#: lxc/storage.go:690
+#: lxc/storage.go:702
 msgid "DRIVER"
 msgstr ""
 
@@ -704,7 +704,7 @@ msgstr ""
 msgid "Missing summary."
 msgstr ""
 
-#: lxc/network.go:284 lxc/network.go:337 lxc/storage.go:402 lxc/storage.go:522
+#: lxc/network.go:284 lxc/network.go:337 lxc/storage.go:404 lxc/storage.go:534
 msgid "More than one device matches, specify the device name."
 msgstr ""
 
@@ -725,7 +725,7 @@ msgid "Must supply container name for: "
 msgstr ""
 
 #: lxc/cluster.go:181 lxc/list.go:469 lxc/network.go:522 lxc/profile.go:573
-#: lxc/remote.go:409 lxc/storage.go:688 lxc/storage.go:806
+#: lxc/remote.go:409 lxc/storage.go:700 lxc/storage.go:818
 msgid "NAME"
 msgstr ""
 
@@ -777,7 +777,7 @@ msgstr ""
 msgid "No device found for this network"
 msgstr ""
 
-#: lxc/storage.go:411 lxc/storage.go:531
+#: lxc/storage.go:413 lxc/storage.go:543
 msgid "No device found for this storage volume."
 msgstr ""
 
@@ -795,7 +795,7 @@ msgstr ""
 msgid "Node %s renamed to %s"
 msgstr ""
 
-#: lxc/init.go:149
+#: lxc/init.go:149 lxc/storage.go:153
 msgid "Node name"
 msgstr ""
 
@@ -804,7 +804,7 @@ msgstr ""
 msgid "Node: %s"
 msgstr ""
 
-#: lxc/storage.go:355 lxc/storage.go:448
+#: lxc/storage.go:357 lxc/storage.go:450
 msgid "Only \"custom\" volumes can be attached to containers."
 msgstr ""
 
@@ -882,7 +882,7 @@ msgstr ""
 msgid "Pid: %d"
 msgstr ""
 
-#: lxc/network.go:427 lxc/profile.go:276 lxc/storage.go:605 lxc/storage.go:994
+#: lxc/network.go:427 lxc/profile.go:276 lxc/storage.go:617 lxc/storage.go:1006
 msgid "Press enter to open the editor again"
 msgstr ""
 
@@ -991,7 +991,7 @@ msgstr ""
 msgid "Remove %s (yes/no): "
 msgstr ""
 
-#: lxc/storage.go:1025
+#: lxc/storage.go:1037
 #, c-format
 msgid "Renamed storage volume from \"%s\" to \"%s\""
 msgstr ""
@@ -1025,11 +1025,11 @@ msgstr ""
 msgid "SNAPSHOTS"
 msgstr ""
 
-#: lxc/storage.go:695
+#: lxc/storage.go:707
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/cluster.go:184 lxc/list.go:473 lxc/storage.go:693
+#: lxc/cluster.go:184 lxc/list.go:473 lxc/storage.go:705
 msgid "STATE"
 msgstr ""
 
@@ -1093,7 +1093,7 @@ msgstr ""
 msgid "Show the resources available to the server"
 msgstr ""
 
-#: lxc/storage.go:151
+#: lxc/storage.go:152
 msgid "Show the resources available to the storage pool"
 msgstr ""
 
@@ -1146,26 +1146,31 @@ msgstr ""
 msgid "Stopping the container failed: %s"
 msgstr ""
 
-#: lxc/storage.go:496
+#: lxc/storage.go:507
 #, c-format
 msgid "Storage pool %s created"
 msgstr ""
 
-#: lxc/storage.go:555
+#: lxc/storage.go:567
 #, c-format
 msgid "Storage pool %s deleted"
 msgstr ""
 
+#: lxc/storage.go:505
+#, c-format
+msgid "Storage pool %s pending on node %s"
+msgstr ""
+
 #: lxc/init.go:146 lxc/init.go:147
 msgid "Storage pool name"
 msgstr ""
 
-#: lxc/storage.go:840
+#: lxc/storage.go:852
 #, c-format
 msgid "Storage volume %s created"
 msgstr ""
 
-#: lxc/storage.go:855
+#: lxc/storage.go:867
 #, c-format
 msgid "Storage volume %s deleted"
 msgstr ""
@@ -1182,7 +1187,7 @@ msgstr ""
 msgid "Swap (peak)"
 msgstr ""
 
-#: lxc/list.go:474 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:805
+#: lxc/list.go:474 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:817
 msgid "TYPE"
 msgstr ""
 
@@ -1218,7 +1223,7 @@ msgstr ""
 msgid "The opposite of \"lxc pause\" is \"lxc start\"."
 msgstr ""
 
-#: lxc/network.go:298 lxc/network.go:351 lxc/storage.go:416 lxc/storage.go:536
+#: lxc/network.go:298 lxc/network.go:351 lxc/storage.go:418 lxc/storage.go:548
 msgid "The specified device doesn't exist"
 msgstr ""
 
@@ -1297,7 +1302,7 @@ msgstr ""
 msgid "URL"
 msgstr ""
 
-#: lxc/network.go:526 lxc/profile.go:574 lxc/storage.go:697 lxc/storage.go:808
+#: lxc/network.go:526 lxc/profile.go:574 lxc/storage.go:709 lxc/storage.go:820
 msgid "USED BY"
 msgstr ""
 
@@ -2076,7 +2081,7 @@ msgid ""
 "    Create a snapshot of \"u1\" called \"snap0\"."
 msgstr ""
 
-#: lxc/storage.go:64
+#: lxc/storage.go:65
 msgid ""
 "Usage: lxc storage <subcommand> [options]\n"
 "\n"
@@ -2089,7 +2094,8 @@ msgid ""
 "lxc storage show [<remote>:]<pool> [--resources]\n"
 "    Show details of a storage pool.\n"
 "\n"
-"lxc storage create [<remote>:]<pool> <driver> [key=value]...\n"
+"lxc storage create [<remote>:]<pool> <driver> [key=value]... [--target "
+"<node>]\n"
 "    Create a storage pool.\n"
 "\n"
 "lxc storage get [<remote>:]<pool> <key>\n"
diff --git a/po/zh.po b/po/zh.po
index df3af9211..d51099070 100644
--- a/po/zh.po
+++ b/po/zh.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: lxd\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2017-12-06 11:48+0000\n"
+"POT-Creation-Date: 2017-12-06 11:58+0000\n"
 "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
 "Last-Translator: Automatically generated\n"
 "Language-Team: none\n"
@@ -16,7 +16,7 @@ msgstr ""
 "Content-Type: text/plain; charset=UTF-8\n"
 "Content-Transfer-Encoding: 8bit\n"
 
-#: lxc/storage.go:33
+#: lxc/storage.go:34
 msgid ""
 "### This is a yaml representation of a storage pool.\n"
 "### Any line starting with a '# will be ignored.\n"
@@ -33,7 +33,7 @@ msgid ""
 "###   zfs.pool_name: default"
 msgstr ""
 
-#: lxc/storage.go:50
+#: lxc/storage.go:51
 msgid ""
 "### This is a yaml representation of a storage volume.\n"
 "### Any line starting with a '# will be ignored.\n"
@@ -288,7 +288,7 @@ msgstr ""
 msgid "Can't unset key '%s', it's not currently set."
 msgstr ""
 
-#: lxc/profile.go:546 lxc/storage.go:653
+#: lxc/profile.go:546 lxc/storage.go:665
 msgid "Cannot provide container name to list"
 msgstr ""
 
@@ -314,7 +314,7 @@ msgid "Config key/value to apply to the new container"
 msgstr ""
 
 #: lxc/config.go:816 lxc/config.go:881 lxc/config.go:1331 lxc/image.go:1190
-#: lxc/network.go:426 lxc/profile.go:275 lxc/storage.go:604 lxc/storage.go:993
+#: lxc/network.go:426 lxc/profile.go:275 lxc/storage.go:616 lxc/storage.go:1005
 #, c-format
 msgid "Config parsing error: %s"
 msgstr ""
@@ -385,11 +385,11 @@ msgid "DATABASE"
 msgstr ""
 
 #: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:467 lxc/network.go:525
-#: lxc/storage.go:689 lxc/storage.go:807
+#: lxc/storage.go:701 lxc/storage.go:819
 msgid "DESCRIPTION"
 msgstr ""
 
-#: lxc/storage.go:690
+#: lxc/storage.go:702
 msgid "DRIVER"
 msgstr ""
 
@@ -704,7 +704,7 @@ msgstr ""
 msgid "Missing summary."
 msgstr ""
 
-#: lxc/network.go:284 lxc/network.go:337 lxc/storage.go:402 lxc/storage.go:522
+#: lxc/network.go:284 lxc/network.go:337 lxc/storage.go:404 lxc/storage.go:534
 msgid "More than one device matches, specify the device name."
 msgstr ""
 
@@ -725,7 +725,7 @@ msgid "Must supply container name for: "
 msgstr ""
 
 #: lxc/cluster.go:181 lxc/list.go:469 lxc/network.go:522 lxc/profile.go:573
-#: lxc/remote.go:409 lxc/storage.go:688 lxc/storage.go:806
+#: lxc/remote.go:409 lxc/storage.go:700 lxc/storage.go:818
 msgid "NAME"
 msgstr ""
 
@@ -777,7 +777,7 @@ msgstr ""
 msgid "No device found for this network"
 msgstr ""
 
-#: lxc/storage.go:411 lxc/storage.go:531
+#: lxc/storage.go:413 lxc/storage.go:543
 msgid "No device found for this storage volume."
 msgstr ""
 
@@ -795,7 +795,7 @@ msgstr ""
 msgid "Node %s renamed to %s"
 msgstr ""
 
-#: lxc/init.go:149
+#: lxc/init.go:149 lxc/storage.go:153
 msgid "Node name"
 msgstr ""
 
@@ -804,7 +804,7 @@ msgstr ""
 msgid "Node: %s"
 msgstr ""
 
-#: lxc/storage.go:355 lxc/storage.go:448
+#: lxc/storage.go:357 lxc/storage.go:450
 msgid "Only \"custom\" volumes can be attached to containers."
 msgstr ""
 
@@ -882,7 +882,7 @@ msgstr ""
 msgid "Pid: %d"
 msgstr ""
 
-#: lxc/network.go:427 lxc/profile.go:276 lxc/storage.go:605 lxc/storage.go:994
+#: lxc/network.go:427 lxc/profile.go:276 lxc/storage.go:617 lxc/storage.go:1006
 msgid "Press enter to open the editor again"
 msgstr ""
 
@@ -991,7 +991,7 @@ msgstr ""
 msgid "Remove %s (yes/no): "
 msgstr ""
 
-#: lxc/storage.go:1025
+#: lxc/storage.go:1037
 #, c-format
 msgid "Renamed storage volume from \"%s\" to \"%s\""
 msgstr ""
@@ -1025,11 +1025,11 @@ msgstr ""
 msgid "SNAPSHOTS"
 msgstr ""
 
-#: lxc/storage.go:695
+#: lxc/storage.go:707
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/cluster.go:184 lxc/list.go:473 lxc/storage.go:693
+#: lxc/cluster.go:184 lxc/list.go:473 lxc/storage.go:705
 msgid "STATE"
 msgstr ""
 
@@ -1093,7 +1093,7 @@ msgstr ""
 msgid "Show the resources available to the server"
 msgstr ""
 
-#: lxc/storage.go:151
+#: lxc/storage.go:152
 msgid "Show the resources available to the storage pool"
 msgstr ""
 
@@ -1146,26 +1146,31 @@ msgstr ""
 msgid "Stopping the container failed: %s"
 msgstr ""
 
-#: lxc/storage.go:496
+#: lxc/storage.go:507
 #, c-format
 msgid "Storage pool %s created"
 msgstr ""
 
-#: lxc/storage.go:555
+#: lxc/storage.go:567
 #, c-format
 msgid "Storage pool %s deleted"
 msgstr ""
 
+#: lxc/storage.go:505
+#, c-format
+msgid "Storage pool %s pending on node %s"
+msgstr ""
+
 #: lxc/init.go:146 lxc/init.go:147
 msgid "Storage pool name"
 msgstr ""
 
-#: lxc/storage.go:840
+#: lxc/storage.go:852
 #, c-format
 msgid "Storage volume %s created"
 msgstr ""
 
-#: lxc/storage.go:855
+#: lxc/storage.go:867
 #, c-format
 msgid "Storage volume %s deleted"
 msgstr ""
@@ -1182,7 +1187,7 @@ msgstr ""
 msgid "Swap (peak)"
 msgstr ""
 
-#: lxc/list.go:474 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:805
+#: lxc/list.go:474 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:817
 msgid "TYPE"
 msgstr ""
 
@@ -1218,7 +1223,7 @@ msgstr ""
 msgid "The opposite of \"lxc pause\" is \"lxc start\"."
 msgstr ""
 
-#: lxc/network.go:298 lxc/network.go:351 lxc/storage.go:416 lxc/storage.go:536
+#: lxc/network.go:298 lxc/network.go:351 lxc/storage.go:418 lxc/storage.go:548
 msgid "The specified device doesn't exist"
 msgstr ""
 
@@ -1297,7 +1302,7 @@ msgstr ""
 msgid "URL"
 msgstr ""
 
-#: lxc/network.go:526 lxc/profile.go:574 lxc/storage.go:697 lxc/storage.go:808
+#: lxc/network.go:526 lxc/profile.go:574 lxc/storage.go:709 lxc/storage.go:820
 msgid "USED BY"
 msgstr ""
 
@@ -2076,7 +2081,7 @@ msgid ""
 "    Create a snapshot of \"u1\" called \"snap0\"."
 msgstr ""
 
-#: lxc/storage.go:64
+#: lxc/storage.go:65
 msgid ""
 "Usage: lxc storage <subcommand> [options]\n"
 "\n"
@@ -2089,7 +2094,8 @@ msgid ""
 "lxc storage show [<remote>:]<pool> [--resources]\n"
 "    Show details of a storage pool.\n"
 "\n"
-"lxc storage create [<remote>:]<pool> <driver> [key=value]...\n"
+"lxc storage create [<remote>:]<pool> <driver> [key=value]... [--target "
+"<node>]\n"
 "    Create a storage pool.\n"
 "\n"
 "lxc storage get [<remote>:]<pool> <key>\n"
diff --git a/po/zh_Hans.po b/po/zh_Hans.po
index 544abd00f..6ed4ad2cf 100644
--- a/po/zh_Hans.po
+++ b/po/zh_Hans.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: lxd\n"
 "Report-Msgid-Bugs-To: lxc-devel at lists.linuxcontainers.org\n"
-"POT-Creation-Date: 2017-12-06 11:48+0000\n"
+"POT-Creation-Date: 2017-12-06 11:58+0000\n"
 "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
 "Last-Translator: Automatically generated\n"
 "Language-Team: none\n"
@@ -16,7 +16,7 @@ msgstr ""
 "Content-Type: text/plain; charset=UTF-8\n"
 "Content-Transfer-Encoding: 8bit\n"
 
-#: lxc/storage.go:33
+#: lxc/storage.go:34
 msgid ""
 "### This is a yaml representation of a storage pool.\n"
 "### Any line starting with a '# will be ignored.\n"
@@ -33,7 +33,7 @@ msgid ""
 "###   zfs.pool_name: default"
 msgstr ""
 
-#: lxc/storage.go:50
+#: lxc/storage.go:51
 msgid ""
 "### This is a yaml representation of a storage volume.\n"
 "### Any line starting with a '# will be ignored.\n"
@@ -288,7 +288,7 @@ msgstr ""
 msgid "Can't unset key '%s', it's not currently set."
 msgstr ""
 
-#: lxc/profile.go:546 lxc/storage.go:653
+#: lxc/profile.go:546 lxc/storage.go:665
 msgid "Cannot provide container name to list"
 msgstr ""
 
@@ -314,7 +314,7 @@ msgid "Config key/value to apply to the new container"
 msgstr ""
 
 #: lxc/config.go:816 lxc/config.go:881 lxc/config.go:1331 lxc/image.go:1190
-#: lxc/network.go:426 lxc/profile.go:275 lxc/storage.go:604 lxc/storage.go:993
+#: lxc/network.go:426 lxc/profile.go:275 lxc/storage.go:616 lxc/storage.go:1005
 #, c-format
 msgid "Config parsing error: %s"
 msgstr ""
@@ -385,11 +385,11 @@ msgid "DATABASE"
 msgstr ""
 
 #: lxc/image.go:234 lxc/image.go:1137 lxc/list.go:467 lxc/network.go:525
-#: lxc/storage.go:689 lxc/storage.go:807
+#: lxc/storage.go:701 lxc/storage.go:819
 msgid "DESCRIPTION"
 msgstr ""
 
-#: lxc/storage.go:690
+#: lxc/storage.go:702
 msgid "DRIVER"
 msgstr ""
 
@@ -704,7 +704,7 @@ msgstr ""
 msgid "Missing summary."
 msgstr ""
 
-#: lxc/network.go:284 lxc/network.go:337 lxc/storage.go:402 lxc/storage.go:522
+#: lxc/network.go:284 lxc/network.go:337 lxc/storage.go:404 lxc/storage.go:534
 msgid "More than one device matches, specify the device name."
 msgstr ""
 
@@ -725,7 +725,7 @@ msgid "Must supply container name for: "
 msgstr ""
 
 #: lxc/cluster.go:181 lxc/list.go:469 lxc/network.go:522 lxc/profile.go:573
-#: lxc/remote.go:409 lxc/storage.go:688 lxc/storage.go:806
+#: lxc/remote.go:409 lxc/storage.go:700 lxc/storage.go:818
 msgid "NAME"
 msgstr ""
 
@@ -777,7 +777,7 @@ msgstr ""
 msgid "No device found for this network"
 msgstr ""
 
-#: lxc/storage.go:411 lxc/storage.go:531
+#: lxc/storage.go:413 lxc/storage.go:543
 msgid "No device found for this storage volume."
 msgstr ""
 
@@ -795,7 +795,7 @@ msgstr ""
 msgid "Node %s renamed to %s"
 msgstr ""
 
-#: lxc/init.go:149
+#: lxc/init.go:149 lxc/storage.go:153
 msgid "Node name"
 msgstr ""
 
@@ -804,7 +804,7 @@ msgstr ""
 msgid "Node: %s"
 msgstr ""
 
-#: lxc/storage.go:355 lxc/storage.go:448
+#: lxc/storage.go:357 lxc/storage.go:450
 msgid "Only \"custom\" volumes can be attached to containers."
 msgstr ""
 
@@ -882,7 +882,7 @@ msgstr ""
 msgid "Pid: %d"
 msgstr ""
 
-#: lxc/network.go:427 lxc/profile.go:276 lxc/storage.go:605 lxc/storage.go:994
+#: lxc/network.go:427 lxc/profile.go:276 lxc/storage.go:617 lxc/storage.go:1006
 msgid "Press enter to open the editor again"
 msgstr ""
 
@@ -991,7 +991,7 @@ msgstr ""
 msgid "Remove %s (yes/no): "
 msgstr ""
 
-#: lxc/storage.go:1025
+#: lxc/storage.go:1037
 #, c-format
 msgid "Renamed storage volume from \"%s\" to \"%s\""
 msgstr ""
@@ -1025,11 +1025,11 @@ msgstr ""
 msgid "SNAPSHOTS"
 msgstr ""
 
-#: lxc/storage.go:695
+#: lxc/storage.go:707
 msgid "SOURCE"
 msgstr ""
 
-#: lxc/cluster.go:184 lxc/list.go:473 lxc/storage.go:693
+#: lxc/cluster.go:184 lxc/list.go:473 lxc/storage.go:705
 msgid "STATE"
 msgstr ""
 
@@ -1093,7 +1093,7 @@ msgstr ""
 msgid "Show the resources available to the server"
 msgstr ""
 
-#: lxc/storage.go:151
+#: lxc/storage.go:152
 msgid "Show the resources available to the storage pool"
 msgstr ""
 
@@ -1146,26 +1146,31 @@ msgstr ""
 msgid "Stopping the container failed: %s"
 msgstr ""
 
-#: lxc/storage.go:496
+#: lxc/storage.go:507
 #, c-format
 msgid "Storage pool %s created"
 msgstr ""
 
-#: lxc/storage.go:555
+#: lxc/storage.go:567
 #, c-format
 msgid "Storage pool %s deleted"
 msgstr ""
 
+#: lxc/storage.go:505
+#, c-format
+msgid "Storage pool %s pending on node %s"
+msgstr ""
+
 #: lxc/init.go:146 lxc/init.go:147
 msgid "Storage pool name"
 msgstr ""
 
-#: lxc/storage.go:840
+#: lxc/storage.go:852
 #, c-format
 msgid "Storage volume %s created"
 msgstr ""
 
-#: lxc/storage.go:855
+#: lxc/storage.go:867
 #, c-format
 msgid "Storage volume %s deleted"
 msgstr ""
@@ -1182,7 +1187,7 @@ msgstr ""
 msgid "Swap (peak)"
 msgstr ""
 
-#: lxc/list.go:474 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:805
+#: lxc/list.go:474 lxc/network.go:523 lxc/operation.go:153 lxc/storage.go:817
 msgid "TYPE"
 msgstr ""
 
@@ -1218,7 +1223,7 @@ msgstr ""
 msgid "The opposite of \"lxc pause\" is \"lxc start\"."
 msgstr ""
 
-#: lxc/network.go:298 lxc/network.go:351 lxc/storage.go:416 lxc/storage.go:536
+#: lxc/network.go:298 lxc/network.go:351 lxc/storage.go:418 lxc/storage.go:548
 msgid "The specified device doesn't exist"
 msgstr ""
 
@@ -1297,7 +1302,7 @@ msgstr ""
 msgid "URL"
 msgstr ""
 
-#: lxc/network.go:526 lxc/profile.go:574 lxc/storage.go:697 lxc/storage.go:808
+#: lxc/network.go:526 lxc/profile.go:574 lxc/storage.go:709 lxc/storage.go:820
 msgid "USED BY"
 msgstr ""
 
@@ -2076,7 +2081,7 @@ msgid ""
 "    Create a snapshot of \"u1\" called \"snap0\"."
 msgstr ""
 
-#: lxc/storage.go:64
+#: lxc/storage.go:65
 msgid ""
 "Usage: lxc storage <subcommand> [options]\n"
 "\n"
@@ -2089,7 +2094,8 @@ msgid ""
 "lxc storage show [<remote>:]<pool> [--resources]\n"
 "    Show details of a storage pool.\n"
 "\n"
-"lxc storage create [<remote>:]<pool> <driver> [key=value]...\n"
+"lxc storage create [<remote>:]<pool> <driver> [key=value]... [--target "
+"<node>]\n"
 "    Create a storage pool.\n"
 "\n"
 "lxc storage get [<remote>:]<pool> <key>\n"
diff --git a/test/suites/clustering.sh b/test/suites/clustering.sh
index 3f9b3d167..9a019bb45 100644
--- a/test/suites/clustering.sh
+++ b/test/suites/clustering.sh
@@ -195,6 +195,14 @@ test_clustering_storage() {
   # The state of the preseeded storage pool is still CREATED
   LXD_DIR="${LXD_ONE_DIR}" lxc storage list | grep data | grep -q CREATED
 
+  # Trying to pass config values other than 'source' results in an error
+  ! LXD_DIR="${LXD_ONE_DIR}" lxc storage create pool1 dir source=/foo size=123 --target node1
+
+  # Create a new storage pool
+  LXD_DIR="${LXD_ONE_DIR}" lxc storage create pool1 dir --target node1
+  LXD_DIR="${LXD_ONE_DIR}" lxc storage create pool1 dir --target node2
+  LXD_DIR="${LXD_ONE_DIR}" lxc storage show pool1 | grep state: | grep -q PENDING
+
   LXD_DIR="${LXD_TWO_DIR}" lxd shutdown
   LXD_DIR="${LXD_ONE_DIR}" lxd shutdown
   sleep 2

From 71d52a5777ea9ad5714d966d1d28d2b1408fca04 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Thu, 7 Dec 2017 09:23:15 +0000
Subject: [PATCH 110/116] Add client.ClusterNodeName() returning the name of
 the remote node

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 client/interfaces.go    |  3 ++-
 client/lxd_server.go    |  5 +++++
 lxd/api_1.0.go          | 13 ++++++++++++-
 lxd/api_cluster_test.go |  1 +
 lxd/db/node.go          | 17 +++++++++++++++++
 lxd/db/node_test.go     | 11 +++++++++++
 shared/api/server.go    |  3 ++-
 7 files changed, 50 insertions(+), 3 deletions(-)

diff --git a/client/interfaces.go b/client/interfaces.go
index 7a56436bf..8de9a2190 100644
--- a/client/interfaces.go
+++ b/client/interfaces.go
@@ -48,7 +48,8 @@ type ContainerServer interface {
 	HasExtension(extension string) (exists bool)
 	RequireAuthenticated(authenticated bool)
 	IsClustered() (clustered bool)
-	ClusterTargetNode(name string) ContainerServer
+	ClusterTargetNode(name string) (client ContainerServer)
+	ClusterNodeName() (name string)
 
 	// Certificate functions
 	GetCertificateFingerprints() (fingerprints []string, err error)
diff --git a/client/lxd_server.go b/client/lxd_server.go
index 1c4ef5959..af594bca3 100644
--- a/client/lxd_server.go
+++ b/client/lxd_server.go
@@ -100,3 +100,8 @@ func (r *ProtocolLXD) ClusterTargetNode(name string) ContainerServer {
 		targetNode:           name,
 	}
 }
+
+// ClusterNodeName returns the name of the node this client is pointing to.
+func (r *ProtocolLXD) ClusterNodeName() string {
+	return r.server.Environment.NodeName
+}
diff --git a/lxd/api_1.0.go b/lxd/api_1.0.go
index 5209a5cb7..1ff4a5b79 100644
--- a/lxd/api_1.0.go
+++ b/lxd/api_1.0.go
@@ -122,6 +122,15 @@ func api10Get(d *Daemon, r *http.Request) Response {
 		return SmartError(err)
 	}
 
+	nodeName := ""
+	err = d.cluster.Transaction(func(tx *db.ClusterTx) error {
+		nodeName, err = tx.NodeName()
+		return err
+	})
+	if err != nil {
+		return SmartError(err)
+	}
+
 	certificate := string(d.endpoints.NetworkPublicKey())
 	var certificateFingerprint string
 	if certificate != "" {
@@ -154,7 +163,9 @@ func api10Get(d *Daemon, r *http.Request) Response {
 		Server:                 "lxd",
 		ServerPid:              os.Getpid(),
 		ServerVersion:          version.Version,
-		Clustered:              clustered}
+		Clustered:              clustered,
+		NodeName:               nodeName,
+	}
 
 	drivers := readStoragePoolDriversCache()
 	for _, driver := range drivers {
diff --git a/lxd/api_cluster_test.go b/lxd/api_cluster_test.go
index e4f187478..315f13b7c 100644
--- a/lxd/api_cluster_test.go
+++ b/lxd/api_cluster_test.go
@@ -30,6 +30,7 @@ func TestCluster_Bootstrap(t *testing.T) {
 	_, _, err = client.GetServer()
 	require.NoError(t, err)
 	assert.True(t, client.IsClustered())
+	assert.Equal(t, "buzz", client.ClusterNodeName())
 }
 
 // A LXD node which is already configured for networking can join an existing
diff --git a/lxd/db/node.go b/lxd/db/node.go
index 172d79448..0fac94c64 100644
--- a/lxd/db/node.go
+++ b/lxd/db/node.go
@@ -61,6 +61,23 @@ func (c *ClusterTx) NodeByName(name string) (NodeInfo, error) {
 	}
 }
 
+// NodeName returns the name of the node this method is invoked on.
+func (c *ClusterTx) NodeName() (string, error) {
+	stmt := "SELECT name FROM nodes WHERE id=?"
+	names, err := query.SelectStrings(c.tx, stmt, c.nodeID)
+	if err != nil {
+		return "", err
+	}
+	switch len(names) {
+	case 0:
+		return "", nil
+	case 1:
+		return names[0], nil
+	default:
+		return "", fmt.Errorf("inconsistency: non-unique node ID")
+	}
+}
+
 // Nodes returns all LXD nodes part of the cluster.
 //
 // If this LXD instance is not clustered, a list with a single node whose
diff --git a/lxd/db/node_test.go b/lxd/db/node_test.go
index d95363746..439240b14 100644
--- a/lxd/db/node_test.go
+++ b/lxd/db/node_test.go
@@ -53,6 +53,17 @@ func TestNodesCount(t *testing.T) {
 	assert.Equal(t, 2, count)
 }
 
+func TestNodeName(t *testing.T) {
+	tx, cleanup := db.NewTestClusterTx(t)
+	defer cleanup()
+
+	name, err := tx.NodeName()
+	require.NoError(t, err)
+
+	// The default node 1 has a conventional name 'none'.
+	assert.Equal(t, "none", name)
+}
+
 // Rename a node
 func TestNodeRename(t *testing.T) {
 	tx, cleanup := db.NewTestClusterTx(t)
diff --git a/shared/api/server.go b/shared/api/server.go
index 570041533..b0989571b 100644
--- a/shared/api/server.go
+++ b/shared/api/server.go
@@ -18,7 +18,8 @@ type ServerEnvironment struct {
 	StorageVersion         string   `json:"storage_version" yaml:"storage_version"`
 
 	// API extension: clustering
-	Clustered bool `json:"clustered" yaml:"clustered"`
+	Clustered bool   `json:"clustered" yaml:"clustered"`
+	NodeName  string `json:"node_name" yaml:"node_name"`
 }
 
 // ServerPut represents the modifiable fields of a LXD server configuration

From 4ca6990a6e57f24a0cd0b7df732d0aa5326be29f Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Thu, 7 Dec 2017 11:17:27 +0000
Subject: [PATCH 111/116] Create a storage pool across all nodes of a cluster

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/api_storage_pools_test.go |  38 ++++++++++++++
 lxd/storage_pools.go          | 112 +++++++++++++++++++++++++++++++++++++++---
 lxd/storage_pools_utils.go    |   7 +++
 test/suites/clustering.sh     |   9 +++-
 4 files changed, 157 insertions(+), 9 deletions(-)

diff --git a/lxd/api_storage_pools_test.go b/lxd/api_storage_pools_test.go
index 8c572f487..c4cf3adbd 100644
--- a/lxd/api_storage_pools_test.go
+++ b/lxd/api_storage_pools_test.go
@@ -38,3 +38,41 @@ func TestStoragePoolsCreate_TargetNode(t *testing.T) {
 
 	assert.Equal(t, "PENDING", pool.State)
 }
+
+// An error is returned when trying to create a new storage pool in a cluster
+// where the pool was not defined on all nodes.
+func TestStoragePoolsCreate_MissingNodes(t *testing.T) {
+	if testing.Short() {
+		t.Skip("skipping storage-pools targetNode test in short mode.")
+	}
+	daemons, cleanup := newDaemons(t, 2)
+	defer cleanup()
+
+	f := clusterFixture{t: t}
+	f.FormCluster(daemons)
+
+	// Define the pool on rusp-0.
+	daemon := daemons[0]
+	client := f.ClientUnix(daemon).ClusterTargetNode("rusp-0")
+
+	poolPost := api.StoragePoolsPost{
+		Name:   "mypool",
+		Driver: "dir",
+	}
+	poolPost.Config = map[string]string{
+		"source": "",
+	}
+
+	err := client.CreateStoragePool(poolPost)
+	require.NoError(t, err)
+
+	// Trying to create the pool now results in an error, since it's not
+	// defined on all nodes.
+	poolPost = api.StoragePoolsPost{
+		Name:   "mypool",
+		Driver: "dir",
+	}
+	client = f.ClientUnix(daemon)
+	err = client.CreateStoragePool(poolPost)
+	require.EqualError(t, err, "Pool not defined on nodes: buzz")
+}
diff --git a/lxd/storage_pools.go b/lxd/storage_pools.go
index 4f4bafdc9..baba86839 100644
--- a/lxd/storage_pools.go
+++ b/lxd/storage_pools.go
@@ -8,6 +8,7 @@ import (
 	"strings"
 
 	"github.com/gorilla/mux"
+	lxd "github.com/lxc/lxd/client"
 	"github.com/lxc/lxd/lxd/cluster"
 	"github.com/lxc/lxd/lxd/db"
 	"github.com/lxc/lxd/lxd/util"
@@ -81,6 +82,18 @@ func storagePoolsPost(d *Daemon, r *http.Request) Response {
 	url := fmt.Sprintf("/%s/storage-pools/%s", version.APIVersion, req.Name)
 	response := SyncResponseLocation(true, nil, url)
 
+	if isClusterNotification(r) {
+		// This is an internal request which triggers the actual
+		// creation of the pool across all nodes, after they have been
+		// previously defined.
+		err = doStoragePoolCreateInternal(
+			d.State(), req.Name, req.Description, req.Driver, req.Config)
+		if err != nil {
+			return SmartError(err)
+		}
+		return response
+	}
+
 	targetNode := r.FormValue("targetNode")
 	if targetNode == "" {
 		count, err := cluster.Count(d.State())
@@ -94,16 +107,16 @@ func storagePoolsPost(d *Daemon, r *http.Request) Response {
 			// pool immediately.
 			err = storagePoolCreateInternal(
 				d.State(), req.Name, req.Description, req.Driver, req.Config)
-			if err != nil {
-				return InternalError(err)
-			}
-			return response
+		} else {
+			// No targetNode was specified and we're clustered, so finalize the
+			// config in the db and actually create the pool on all nodes.
+			err = storagePoolsPostCluster(d, req)
+		}
+		if err != nil {
+			return InternalError(err)
 		}
+		return response
 
-		// No targetNode was specified and we're clustered. Check that
-		// the storage pool has been defined on all nodes and, if so,
-		// actually create it on all of them.
-		panic("TODO")
 	}
 
 	// A targetNode was specified, let's just define the node's storage
@@ -124,6 +137,89 @@ func storagePoolsPost(d *Daemon, r *http.Request) Response {
 	return response
 }
 
+func storagePoolsPostCluster(d *Daemon, req api.StoragePoolsPost) error {
+	// Check that no 'source' config key has been defined, since
+	// that's node-specific.
+	for key := range req.Config {
+		if key == "source" {
+			return fmt.Errorf("Config key 'source' is node-specific")
+		}
+	}
+
+	// Check that the pool is properly defined, fetch the node-specific
+	// configs and insert the global config.
+	var configs map[string]map[string]string
+	var nodeName string
+	err := d.cluster.Transaction(func(tx *db.ClusterTx) error {
+		// Check that the pool was defined at all.
+		poolID, err := tx.StoragePoolID(req.Name)
+		if err != nil {
+			return err
+		}
+
+		// Fetch the node-specific configs.
+		configs, err = tx.StoragePoolNodeConfigs(poolID)
+		if err != nil {
+			return err
+		}
+
+		// Take note of the name of this node
+		nodeName, err = tx.NodeName()
+		if err != nil {
+			return err
+		}
+
+		// Insert the global config keys.
+		return tx.StoragePoolConfigAdd(poolID, 0, req.Config)
+	})
+	if err != nil {
+		return err
+	}
+
+	// Create the pool on this node.
+	nodeReq := req
+	for key, value := range configs[nodeName] {
+		nodeReq.Config[key] = value
+	}
+	err = doStoragePoolCreateInternal(
+		d.State(), req.Name, req.Description, req.Driver, req.Config)
+	if err != nil {
+		return err
+	}
+
+	// Notify all other nodes to create the pool.
+	notifier, err := cluster.NewNotifier(d.State(), d.endpoints.NetworkCert(), cluster.NotifyAll)
+	if err != nil {
+		return err
+	}
+	notifyErr := notifier(func(client lxd.ContainerServer) error {
+		_, _, err := client.GetServer()
+		if err != nil {
+			return err
+		}
+		nodeReq := req
+		for key, value := range configs[client.ClusterNodeName()] {
+			nodeReq.Config[key] = value
+		}
+		return client.CreateStoragePool(nodeReq)
+	})
+
+	errored := notifyErr != nil
+
+	// Finally update the storage pool state.
+	err = d.cluster.Transaction(func(tx *db.ClusterTx) error {
+		if errored {
+			return tx.StoragePoolErrored(req.Name)
+		}
+		return tx.StoragePoolCreated(req.Name)
+	})
+	if err != nil {
+		return err
+	}
+
+	return notifyErr
+}
+
 var storagePoolsCmd = Command{name: "storage-pools", get: storagePoolsGet, post: storagePoolsPost}
 
 // /1.0/storage-pools/{name}
diff --git a/lxd/storage_pools_utils.go b/lxd/storage_pools_utils.go
index e520a96e6..50bb091f1 100644
--- a/lxd/storage_pools_utils.go
+++ b/lxd/storage_pools_utils.go
@@ -211,7 +211,14 @@ func storagePoolCreateInternal(state *state.State, poolName, poolDescription str
 		}
 		dbStoragePoolDeleteAndUpdateCache(state.Cluster, poolName)
 	}()
+	err = doStoragePoolCreateInternal(state, poolName, poolDescription, driver, config)
+	tryUndo = err != nil
+	return err
+}
 
+// This performs all non-db related work needed to create the pool.
+func doStoragePoolCreateInternal(state *state.State, poolName, poolDescription string, driver string, config map[string]string) error {
+	tryUndo := true
 	s, err := storagePoolInit(state, poolName)
 	if err != nil {
 		return err
diff --git a/test/suites/clustering.sh b/test/suites/clustering.sh
index 9a019bb45..08158711c 100644
--- a/test/suites/clustering.sh
+++ b/test/suites/clustering.sh
@@ -198,11 +198,18 @@ test_clustering_storage() {
   # Trying to pass config values other than 'source' results in an error
   ! LXD_DIR="${LXD_ONE_DIR}" lxc storage create pool1 dir source=/foo size=123 --target node1
 
-  # Create a new storage pool
+  # Define storage pools on the two nodes
   LXD_DIR="${LXD_ONE_DIR}" lxc storage create pool1 dir --target node1
   LXD_DIR="${LXD_ONE_DIR}" lxc storage create pool1 dir --target node2
   LXD_DIR="${LXD_ONE_DIR}" lxc storage show pool1 | grep state: | grep -q PENDING
 
+  # The source config key is not legal for the final pool creation
+  ! LXD_DIR="${LXD_ONE_DIR}" lxc storage create pool1 dir source=/foo
+
+  # Create the storage pool
+  LXD_DIR="${LXD_TWO_DIR}" lxc storage create pool1 dir
+  LXD_DIR="${LXD_ONE_DIR}" lxc storage show pool1 | grep state: | grep -q CREATED
+
   LXD_DIR="${LXD_TWO_DIR}" lxd shutdown
   LXD_DIR="${LXD_ONE_DIR}" lxd shutdown
   sleep 2

From e480b2acd5579a210e48df05d5c3deaede7acdcc Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Thu, 7 Dec 2017 13:29:14 +0000
Subject: [PATCH 112/116] Compare global pools/networks configs when a node
 requests to join

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 client/interfaces.go      |   2 +-
 client/lxd_cluster.go     |   4 +-
 lxd/api_cluster.go        | 108 +++++++++++++++++++++++++++++++++++++++++++++-
 lxd/cluster/membership.go |  14 +++---
 lxd/db/migration.go       |  10 +++++
 lxd/db/networks.go        |  10 ++++-
 lxd/main_init.go          |  17 +++-----
 lxd/main_init_test.go     |   7 +--
 lxd/util/config.go        |  16 +++++++
 shared/api/cluster.go     |  18 ++++----
 10 files changed, 171 insertions(+), 35 deletions(-)
 create mode 100644 lxd/util/config.go

diff --git a/client/interfaces.go b/client/interfaces.go
index 8de9a2190..773eb5ac4 100644
--- a/client/interfaces.go
+++ b/client/interfaces.go
@@ -166,7 +166,7 @@ type ContainerServer interface {
 	// Cluster functions ("cluster" API extensions)
 	GetCluster(password string) (cluster *api.Cluster, err error)
 	BootstrapCluster(name string) (op *Operation, err error)
-	AcceptNode(targetPassword, name, address string, schema, api int) (info *api.ClusterNodeAccepted, err error)
+	AcceptNode(targetPassword, name, address string, schema, api int, pools []api.StoragePool, networks []api.Network) (info *api.ClusterNodeAccepted, err error)
 	JoinCluster(targetAddress, targetPassword, targetCert, name string) (op *Operation, err error)
 	LeaveCluster(name string, force bool) (err error)
 	GetNodes() (nodes []api.Node, err error)
diff --git a/client/lxd_cluster.go b/client/lxd_cluster.go
index 93e9a6d6b..50ce1e3da 100644
--- a/client/lxd_cluster.go
+++ b/client/lxd_cluster.go
@@ -36,13 +36,15 @@ func (r *ProtocolLXD) BootstrapCluster(name string) (*Operation, error) {
 }
 
 // AcceptNode requests to accept a new node into the cluster.
-func (r *ProtocolLXD) AcceptNode(targetPassword, name, address string, schema, apiExt int) (*api.ClusterNodeAccepted, error) {
+func (r *ProtocolLXD) AcceptNode(targetPassword, name, address string, schema, apiExt int, pools []api.StoragePool, networks []api.Network) (*api.ClusterNodeAccepted, error) {
 	cluster := api.ClusterPost{
 		Name:           name,
 		Address:        address,
 		Schema:         schema,
 		API:            apiExt,
 		TargetPassword: targetPassword,
+		StoragePools:   pools,
+		Networks:       networks,
 	}
 	info := &api.ClusterNodeAccepted{}
 	_, err := r.queryStruct("POST", "/cluster/nodes", cluster, "", &info)
diff --git a/lxd/api_cluster.go b/lxd/api_cluster.go
index 1b1ca19ce..42a82d02f 100644
--- a/lxd/api_cluster.go
+++ b/lxd/api_cluster.go
@@ -195,6 +195,18 @@ func clusterNodesPostAccept(d *Daemon, req api.ClusterPost) Response {
 	if util.PasswordCheck(secret, req.TargetPassword) != nil {
 		return Forbidden
 	}
+
+	// Check that the pools and networks provided by the joining node have
+	// configs that match the cluster ones.
+	err = clusterCheckStoragePoolsMatch(d.cluster, req.StoragePools)
+	if err != nil {
+		return SmartError(err)
+	}
+	err = clusterCheckNetworksMatch(d.cluster, req.Networks)
+	if err != nil {
+		return SmartError(err)
+	}
+
 	nodes, err := cluster.Accept(d.State(), d.gateway, req.Name, req.Address, req.Schema, req.API)
 	if err != nil {
 		return BadRequest(err)
@@ -210,6 +222,71 @@ func clusterNodesPostAccept(d *Daemon, req api.ClusterPost) Response {
 	return SyncResponse(true, accepted)
 }
 
+func clusterCheckStoragePoolsMatch(cluster *db.Cluster, reqPools []api.StoragePool) error {
+	poolNames, err := cluster.StoragePools()
+	if err != nil && err != db.NoSuchObjectError {
+		return err
+	}
+	for _, name := range poolNames {
+		found := false
+		for _, reqPool := range reqPools {
+			if reqPool.Name != name {
+				continue
+			}
+			found = true
+			_, pool, err := cluster.StoragePoolGet(name)
+			if err != nil {
+				return err
+			}
+			if pool.Driver != reqPool.Driver {
+				return fmt.Errorf("Mismatching driver for storage pool %s", name)
+			}
+			// Exclude the "source" key, which is node-specific.
+			delete(pool.Config, "source")
+			delete(reqPool.Config, "source")
+			if !util.CompareConfigs(pool.Config, reqPool.Config) {
+				return fmt.Errorf("Mismatching config for storage pool %s", name)
+			}
+			break
+		}
+		if !found {
+			return fmt.Errorf("Missing storage pool %s", name)
+		}
+	}
+	return nil
+}
+
+func clusterCheckNetworksMatch(cluster *db.Cluster, reqNetworks []api.Network) error {
+	networkNames, err := cluster.Networks()
+	if err != nil && err != db.NoSuchObjectError {
+		return err
+	}
+	for _, name := range networkNames {
+		found := false
+		for _, reqNetwork := range reqNetworks {
+			if reqNetwork.Name != name {
+				continue
+			}
+			found = true
+			_, network, err := cluster.NetworkGet(name)
+			if err != nil {
+				return err
+			}
+			// Exclude the "bridge.external_interfaces" key, which is node-specific.
+			delete(network.Config, "bridge.external_interfaces")
+			delete(reqNetwork.Config, "bridge.external_interfaces")
+			if !util.CompareConfigs(network.Config, reqNetwork.Config) {
+				return fmt.Errorf("Mismatching config for network %s", name)
+			}
+			break
+		}
+		if !found {
+			return fmt.Errorf("Missing network %s", name)
+		}
+	}
+	return nil
+}
+
 func clusterNodesPostJoin(d *Daemon, req api.ClusterPost) Response {
 	// Make sure basic pre-conditions are ment.
 	if len(req.TargetCert) == 0 {
@@ -217,12 +294,39 @@ func clusterNodesPostJoin(d *Daemon, req api.ClusterPost) Response {
 	}
 	address, err := node.HTTPSAddress(d.db)
 	if err != nil {
-		return InternalError(err)
+		return SmartError(err)
 	}
 	if address == "" {
 		return BadRequest(fmt.Errorf("No core.https_address config key is set on this node"))
 	}
 
+	// Get all defined storage pools and networks, so they can be compared
+	// to the ones in the cluster.
+	pools := []api.StoragePool{}
+	poolNames, err := d.cluster.StoragePools()
+	if err != nil && err != db.NoSuchObjectError {
+		return SmartError(err)
+	}
+	for _, name := range poolNames {
+		_, pool, err := d.cluster.StoragePoolGet(name)
+		if err != nil {
+			return SmartError(err)
+		}
+		pools = append(pools, *pool)
+	}
+	networks := []api.Network{}
+	networkNames, err := d.cluster.Networks()
+	if err != nil && err != db.NoSuchObjectError {
+		return SmartError(err)
+	}
+	for _, name := range networkNames {
+		_, network, err := d.cluster.NetworkGet(name)
+		if err != nil {
+			return SmartError(err)
+		}
+		networks = append(networks, *network)
+	}
+
 	// Client parameters to connect to the target cluster node.
 	args := &lxd.ConnectionArgs{
 		TLSServerCert: string(req.TargetCert),
@@ -239,7 +343,7 @@ func clusterNodesPostJoin(d *Daemon, req api.ClusterPost) Response {
 		}
 		info, err := client.AcceptNode(
 			req.TargetPassword, req.Name, address, cluster.SchemaVersion,
-			len(version.APIExtensions))
+			len(version.APIExtensions), pools, networks)
 		if err != nil {
 			return errors.Wrap(err, "failed to request to add node")
 		}
diff --git a/lxd/cluster/membership.go b/lxd/cluster/membership.go
index 7f09f68d4..32a8ad007 100644
--- a/lxd/cluster/membership.go
+++ b/lxd/cluster/membership.go
@@ -227,11 +227,11 @@ func Join(state *state.State, gateway *Gateway, cert *shared.CertInfo, name stri
 		return err
 	}
 
-	// Get the local config keys for the cluster networks. It assumes that
-	// the local storage pools and networks match the cluster networks, if
-	// not an error will be returned. Also get any outstanding operation,
-	// typically there will be just one, created by the POST /cluster/nodes
-	// request which triggered this code.
+	// Get the local config keys for the cluster pools and networks. It
+	// assumes that the local storage pools and networks match the cluster
+	// networks, if not an error will be returned. Also get any outstanding
+	// operation, typically there will be just one, created by the POST
+	// /cluster/nodes request which triggered this code.
 	var pools map[string]map[string]string
 	var networks map[string]map[string]string
 	var operations []string
@@ -341,6 +341,10 @@ func Join(state *state.State, gateway *Gateway, cert *shared.CertInfo, name stri
 			if !ok {
 				return fmt.Errorf("joining node has no config for network %s", name)
 			}
+			// We only need to add the bridge.external_interfaces
+			// key, since the other keys are global and are already
+			// there.
+			config = map[string]string{"bridge.external_interfaces": config["bridge.external_interfaces"]}
 			err := tx.NetworkConfigAdd(id, node.ID, config)
 			if err != nil {
 				return errors.Wrap(err, "failed to add joining node's network config")
diff --git a/lxd/db/migration.go b/lxd/db/migration.go
index 4a424bf7e..4be14d2b0 100644
--- a/lxd/db/migration.go
+++ b/lxd/db/migration.go
@@ -98,6 +98,16 @@ func (c *Cluster) ImportPreClusteringData(dump *Dump) error {
 			case "containers":
 				fallthrough
 			case "networks_config":
+				// The "bridge.external_interfaces" config key
+				// is the only one which is not global to the
+				// cluster, so all other keys will have a NULL
+				// node_id.
+				for i, column := range columns {
+					if column == "key" && row[i] != "bridge.external_interfaces" {
+						nullNodeID = true
+						break
+					}
+				}
 				appendNodeID()
 			case "storage_pools_config":
 				// The "source" config key is the only one
diff --git a/lxd/db/networks.go b/lxd/db/networks.go
index 19d60f35a..825d918c2 100644
--- a/lxd/db/networks.go
+++ b/lxd/db/networks.go
@@ -156,7 +156,7 @@ func (c *Cluster) NetworkConfigGet(id int64) (map[string]string, error) {
             key, value
         FROM networks_config
 		WHERE network_id=?
-                AND node_id=?`
+                AND (node_id=? OR node_id IS NULL)`
 	inargs := []interface{}{id, c.nodeID}
 	outfmt := []interface{}{key, value}
 	results, err := queryScan(c.db, query, inargs, outfmt)
@@ -274,8 +274,14 @@ func networkConfigAdd(tx *sql.Tx, networkID, nodeID int64, config map[string]str
 		if v == "" {
 			continue
 		}
+		var nodeIDValue interface{}
+		if k != "bridge.external_interfaces" {
+			nodeIDValue = nil
+		} else {
+			nodeIDValue = nodeID
+		}
 
-		_, err = stmt.Exec(networkID, nodeID, k, v)
+		_, err = stmt.Exec(networkID, nodeIDValue, k, v)
 		if err != nil {
 			return err
 		}
diff --git a/lxd/main_init.go b/lxd/main_init.go
index a48dafa71..d1fbf5f90 100644
--- a/lxd/main_init.go
+++ b/lxd/main_init.go
@@ -6,7 +6,6 @@ import (
 	"net"
 	"os"
 	"os/exec"
-	"sort"
 	"strconv"
 	"strings"
 	"syscall"
@@ -870,17 +869,13 @@ func (cmd *CmdInit) askClusteringNetworks(cluster *api.Cluster) ([]api.NetworksP
 		post.Config = network.Config
 		post.Type = network.Type
 		post.Managed = true
+		// The only config key to ask is 'bridge.external_interfaces',
+		// which is the only one node-specific.
+		key := "bridge.external_interfaces"
 		// Sort config keys to get a stable ordering (expecially for tests)
-		keys := []string{}
-		for key := range post.Config {
-			keys = append(keys, key)
-		}
-		sort.Strings(keys)
-		for _, key := range keys {
-			question := fmt.Sprintf(
-				`Enter local value for key "%s" of network "%s": `, key, post.Name)
-			post.Config[key] = cmd.Context.AskString(question, "", nil)
-		}
+		question := fmt.Sprintf(
+			`Enter local value for key "%s" of network "%s": `, key, post.Name)
+		post.Config[key] = cmd.Context.AskString(question, "", nil)
 		networks[i] = post
 	}
 	return networks, nil
diff --git a/lxd/main_init_test.go b/lxd/main_init_test.go
index 873521f38..f364297df 100644
--- a/lxd/main_init_test.go
+++ b/lxd/main_init_test.go
@@ -195,11 +195,8 @@ func (suite *cmdInitTestSuite) TestCmdInit_InteractiveClusteringJoin() {
 		ClusterAcceptFingerprint: true,
 		ClusterConfirmLosingData: true,
 		ClusterConfig: []string{
-			"",               // storage source
-			"10.23.189.2/24", // ipv4.address
-			"true",           // ipv4.nat
-			"aaaa:bbbb:cccc:dddd::1/64", // ipv6.address
-			"true", // ipv6.nat
+			"", // storage source
+			"", // bridge.external_interfaces
 		},
 	}
 	answers.Render(suite.streams)
diff --git a/lxd/util/config.go b/lxd/util/config.go
new file mode 100644
index 000000000..782dca01e
--- /dev/null
+++ b/lxd/util/config.go
@@ -0,0 +1,16 @@
+package util
+
+// CompareConfigs compares two config maps and returns true if they are equal.
+func CompareConfigs(config1, config2 map[string]string) bool {
+	for key, value := range config1 {
+		if config2[key] != value {
+			return false
+		}
+	}
+	for key, value := range config2 {
+		if config1[key] != value {
+			return false
+		}
+	}
+	return true
+}
diff --git a/shared/api/cluster.go b/shared/api/cluster.go
index 1320a1ff4..e68ce5551 100644
--- a/shared/api/cluster.go
+++ b/shared/api/cluster.go
@@ -11,14 +11,16 @@ type Cluster struct {
 //
 // API extension: cluster
 type ClusterPost struct {
-	Name           string `json:"name" yaml:"name"`
-	Address        string `json:"address" yaml:"address"`
-	Schema         int    `json:"schema" yaml:"schema"`
-	API            int    `json:"api" yaml:"api"`
-	TargetAddress  string `json:"target_address" yaml:"target_address"`
-	TargetCert     string `json:"target_cert" yaml:"target_cert"`
-	TargetCA       []byte `json:"target_ca" yaml:"target_ca"`
-	TargetPassword string `json:"target_password" yaml:"target_password"`
+	Name           string        `json:"name" yaml:"name"`
+	Address        string        `json:"address" yaml:"address"`
+	Schema         int           `json:"schema" yaml:"schema"`
+	API            int           `json:"api" yaml:"api"`
+	TargetAddress  string        `json:"target_address" yaml:"target_address"`
+	TargetCert     string        `json:"target_cert" yaml:"target_cert"`
+	TargetCA       []byte        `json:"target_ca" yaml:"target_ca"`
+	TargetPassword string        `json:"target_password" yaml:"target_password"`
+	StoragePools   []StoragePool `json:"storage_pools" yaml:"storage_pools"`
+	Networks       []Network     `json:"networks" yaml:"networks"`
 }
 
 // ClusterNodeAccepted represents the response of a request to join a cluster.

From eea3039f7ccac23a89d9753f5e5494fa43ef1123 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Mon, 11 Dec 2017 11:48:02 +0000
Subject: [PATCH 113/116] Show the 'source' pool config key only when targeting
 a node

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 client/lxd_storage_pools.go    |  6 +++++-
 doc/api-extensions.md          |  1 +
 lxc/storage.go                 |  5 +++++
 lxd/api_1.0.go                 | 10 +---------
 lxd/cluster/membership.go      | 15 +++++++++++++++
 lxd/cluster/membership_test.go |  4 ++++
 lxd/cluster/resolve.go         | 25 +++++++++++++++++++++++++
 lxd/containers_post.go         | 12 +-----------
 lxd/storage_pools.go           | 32 ++++++++++++++++++++++++++++++++
 test/suites/clustering.sh      |  7 +++++++
 10 files changed, 96 insertions(+), 21 deletions(-)
 create mode 100644 lxd/cluster/resolve.go

diff --git a/client/lxd_storage_pools.go b/client/lxd_storage_pools.go
index 717c0f22d..b4a2a326b 100644
--- a/client/lxd_storage_pools.go
+++ b/client/lxd_storage_pools.go
@@ -52,7 +52,11 @@ func (r *ProtocolLXD) GetStoragePool(name string) (*api.StoragePool, string, err
 	pool := api.StoragePool{}
 
 	// Fetch the raw value
-	etag, err := r.queryStruct("GET", fmt.Sprintf("/storage-pools/%s", url.QueryEscape(name)), nil, "", &pool)
+	path := fmt.Sprintf("/storage-pools/%s", url.QueryEscape(name))
+	if r.targetNode != "" {
+		path += fmt.Sprintf("?targetNode=%s", r.targetNode)
+	}
+	etag, err := r.queryStruct("GET", path, nil, "", &pool)
 	if err != nil {
 		return nil, "", err
 	}
diff --git a/doc/api-extensions.md b/doc/api-extensions.md
index 4d113caae..c2a0eb854 100644
--- a/doc/api-extensions.md
+++ b/doc/api-extensions.md
@@ -389,3 +389,4 @@ The following existing endpoints have been modified:
 
  * `POST /1.0/containers` accepts a new targetNode query parameter
  * `POST /1.0/storage-pools` accepts a new targetNode query parameter
+ * `GET /1.0/storage-pool/<name>` accepts a new targetNode query parameter
diff --git a/lxc/storage.go b/lxc/storage.go
index 29e153bda..2d38bb82b 100644
--- a/lxc/storage.go
+++ b/lxc/storage.go
@@ -759,6 +759,11 @@ func (c *storageCmd) doStoragePoolShow(client lxd.ContainerServer, name string)
 		return errArgs
 	}
 
+	// If a target node was specified, we return also node-specific config values.
+	if c.target != "" {
+		client = client.ClusterTargetNode(c.target)
+	}
+
 	if c.resources {
 		res, err := client.GetStoragePoolResources(name)
 		if err != nil {
diff --git a/lxd/api_1.0.go b/lxd/api_1.0.go
index 1ff4a5b79..b36aa4c0e 100644
--- a/lxd/api_1.0.go
+++ b/lxd/api_1.0.go
@@ -109,15 +109,7 @@ func api10Get(d *Daemon, r *http.Request) Response {
 		return InternalError(err)
 	}
 
-	clustered := false
-	err = d.db.Transaction(func(tx *db.NodeTx) error {
-		addresses, err := tx.RaftNodeAddresses()
-		if err != nil {
-			return err
-		}
-		clustered = len(addresses) > 0
-		return nil
-	})
+	clustered, err := cluster.Enabled(d.db)
 	if err != nil {
 		return SmartError(err)
 	}
diff --git a/lxd/cluster/membership.go b/lxd/cluster/membership.go
index 32a8ad007..b9adc5c9f 100644
--- a/lxd/cluster/membership.go
+++ b/lxd/cluster/membership.go
@@ -498,6 +498,21 @@ func Count(state *state.State) (int, error) {
 	return count, err
 }
 
+// Enabled is a convenience that returns true if clustering is enabled on this
+// node.
+func Enabled(node *db.Node) (bool, error) {
+	enabled := false
+	err := node.Transaction(func(tx *db.NodeTx) error {
+		addresses, err := tx.RaftNodeAddresses()
+		if err != nil {
+			return err
+		}
+		enabled = len(addresses) > 0
+		return nil
+	})
+	return enabled, err
+}
+
 // Check that node-related preconditions are met for bootstrapping or joining a
 // cluster.
 func membershipCheckNodeStateForBootstrapOrJoin(tx *db.NodeTx, address string) error {
diff --git a/lxd/cluster/membership_test.go b/lxd/cluster/membership_test.go
index f38f43c65..d1d86f4eb 100644
--- a/lxd/cluster/membership_test.go
+++ b/lxd/cluster/membership_test.go
@@ -131,6 +131,10 @@ func TestBootstrap(t *testing.T) {
 	count, err := cluster.Count(state)
 	require.NoError(t, err)
 	assert.Equal(t, 1, count)
+
+	enabled, err := cluster.Enabled(state.Node)
+	require.NoError(t, err)
+	assert.True(t, enabled)
 }
 
 // If pre-conditions are not met, a descriptive error is returned.
diff --git a/lxd/cluster/resolve.go b/lxd/cluster/resolve.go
new file mode 100644
index 000000000..6aece834b
--- /dev/null
+++ b/lxd/cluster/resolve.go
@@ -0,0 +1,25 @@
+package cluster
+
+import "github.com/lxc/lxd/lxd/db"
+
+// ResolveTarget is a convenience for handling the value ?targetNode query
+// parameter. It returns the address of the given node, or the empty string if
+// the given node is the local one.
+func ResolveTarget(cluster *db.Cluster, target string) (string, error) {
+	address := ""
+	err := cluster.Transaction(func(tx *db.ClusterTx) error {
+		name, err := tx.NodeName()
+		if err != nil {
+			return err
+		}
+		node, err := tx.NodeByName(target)
+		if err != nil {
+			return err
+		}
+		if node.Name != name {
+			address = node.Address
+		}
+		return nil
+	})
+	return address, err
+}
diff --git a/lxd/containers_post.go b/lxd/containers_post.go
index 2541edbf6..6657cd9a2 100644
--- a/lxd/containers_post.go
+++ b/lxd/containers_post.go
@@ -525,17 +525,7 @@ func containersPost(d *Daemon, r *http.Request) Response {
 
 	targetNode := r.FormValue("targetNode")
 	if targetNode != "" {
-		address := ""
-		err := d.cluster.Transaction(func(tx *db.ClusterTx) error {
-			node, err := tx.NodeByName(targetNode)
-			if err != nil {
-				return err
-			}
-			if node.Address != d.endpoints.NetworkAddress() {
-				address = node.Address
-			}
-			return nil
-		})
+		address, err := cluster.ResolveTarget(d.cluster, targetNode)
 		if err != nil {
 			return SmartError(err)
 		}
diff --git a/lxd/storage_pools.go b/lxd/storage_pools.go
index baba86839..a46e34e0b 100644
--- a/lxd/storage_pools.go
+++ b/lxd/storage_pools.go
@@ -240,6 +240,38 @@ func storagePoolGet(d *Daemon, r *http.Request) Response {
 	}
 	pool.UsedBy = poolUsedBy
 
+	targetNode := r.FormValue("targetNode")
+
+	// If no target node is specified and the client is clustered, we omit
+	// the node-specific fields, namely "source"
+	clustered, err := cluster.Enabled(d.db)
+	if err != nil {
+		return SmartError(err)
+	}
+	if targetNode == "" && clustered {
+		delete(pool.Config, "source")
+	}
+
+	// If a target was specified, forward the request to the relevant node.
+	if targetNode != "" {
+		address, err := cluster.ResolveTarget(d.cluster, targetNode)
+		if err != nil {
+			return SmartError(err)
+		}
+		if address != "" {
+			cert := d.endpoints.NetworkCert()
+			client, err := cluster.Connect(address, cert, true)
+			if err != nil {
+				return SmartError(err)
+			}
+			client = client.ClusterTargetNode(targetNode)
+			pool, _, err = client.GetStoragePool(poolName)
+			if err != nil {
+				return SmartError(err)
+			}
+		}
+	}
+
 	etag := []interface{}{pool.Name, pool.Driver, pool.Config}
 
 	return SyncResponseETag(true, &pool, etag)
diff --git a/test/suites/clustering.sh b/test/suites/clustering.sh
index 08158711c..e071cbdc7 100644
--- a/test/suites/clustering.sh
+++ b/test/suites/clustering.sh
@@ -210,6 +210,13 @@ test_clustering_storage() {
   LXD_DIR="${LXD_TWO_DIR}" lxc storage create pool1 dir
   LXD_DIR="${LXD_ONE_DIR}" lxc storage show pool1 | grep state: | grep -q CREATED
 
+  # The 'source' config key is omitted when showing the cluster
+  # configuration, and included when showing the node-specific one.
+  ! LXD_DIR="${LXD_TWO_DIR}" lxc storage show pool1 | grep -q source
+  LXD_DIR="${LXD_ONE_DIR}" lxc storage show pool1 --target node1 | grep source | grep -q "$(basename "${LXD_ONE_DIR}")"
+  LXD_DIR="${LXD_ONE_DIR}" lxc storage show pool1 --target node2 | grep source | grep -q "$(basename "${LXD_TWO_DIR}")"
+
+
   LXD_DIR="${LXD_TWO_DIR}" lxd shutdown
   LXD_DIR="${LXD_ONE_DIR}" lxd shutdown
   sleep 2

From 98ac1bcff9b9748ffa189cb3724866d97e5fc325 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Mon, 11 Dec 2017 12:16:31 +0000
Subject: [PATCH 114/116] Support deleting a pool across all nodes of a cluster

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/storage_pools.go      | 28 ++++++++++++++++++++++++++++
 test/suites/clustering.sh |  3 +++
 2 files changed, 31 insertions(+)

diff --git a/lxd/storage_pools.go b/lxd/storage_pools.go
index a46e34e0b..6b6b98a98 100644
--- a/lxd/storage_pools.go
+++ b/lxd/storage_pools.go
@@ -406,6 +406,34 @@ func storagePoolDelete(d *Daemon, r *http.Request) Response {
 		return InternalError(err)
 	}
 
+	// If this is a cluster notification, we're done, any database work
+	// will be done by the node that is originally serving the request.
+	if isClusterNotification(r) {
+		return EmptySyncResponse
+	}
+
+	// If we are clustered, also notify all other nodes, if any.
+	clustered, err := cluster.Enabled(d.db)
+	if err != nil {
+		return SmartError(err)
+	}
+	if clustered {
+		notifier, err := cluster.NewNotifier(d.State(), d.endpoints.NetworkCert(), cluster.NotifyAll)
+		if err != nil {
+			return SmartError(err)
+		}
+		err = notifier(func(client lxd.ContainerServer) error {
+			_, _, err := client.GetServer()
+			if err != nil {
+				return err
+			}
+			return client.DeleteStoragePool(poolName)
+		})
+		if err != nil {
+			return SmartError(err)
+		}
+	}
+
 	err = dbStoragePoolDeleteAndUpdateCache(d.cluster, poolName)
 	if err != nil {
 		return SmartError(err)
diff --git a/test/suites/clustering.sh b/test/suites/clustering.sh
index e071cbdc7..7cb6a3db7 100644
--- a/test/suites/clustering.sh
+++ b/test/suites/clustering.sh
@@ -216,6 +216,9 @@ test_clustering_storage() {
   LXD_DIR="${LXD_ONE_DIR}" lxc storage show pool1 --target node1 | grep source | grep -q "$(basename "${LXD_ONE_DIR}")"
   LXD_DIR="${LXD_ONE_DIR}" lxc storage show pool1 --target node2 | grep source | grep -q "$(basename "${LXD_TWO_DIR}")"
 
+  # Delete the storage pool
+  LXD_DIR="${LXD_ONE_DIR}" lxc storage delete pool1
+  ! LXD_DIR="${LXD_ONE_DIR}" lxc storage list | grep -q pool1
 
   LXD_DIR="${LXD_TWO_DIR}" lxd shutdown
   LXD_DIR="${LXD_ONE_DIR}" lxd shutdown

From d4229196cd48ff1861d571b741e6076d720d0ea8 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Mon, 11 Dec 2017 14:55:03 +0000
Subject: [PATCH 115/116] Delete containers from any node

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/container_delete.go   | 26 ++++++++++++++++++++++++++
 test/suites/clustering.sh |  5 +++++
 2 files changed, 31 insertions(+)

diff --git a/lxd/container_delete.go b/lxd/container_delete.go
index c0226f349..499352133 100644
--- a/lxd/container_delete.go
+++ b/lxd/container_delete.go
@@ -5,10 +5,36 @@ import (
 	"net/http"
 
 	"github.com/gorilla/mux"
+	"github.com/lxc/lxd/lxd/cluster"
+	"github.com/lxc/lxd/lxd/db"
 )
 
 func containerDelete(d *Daemon, r *http.Request) Response {
 	name := mux.Vars(r)["name"]
+
+	// Handle requests targeted to a container on a different node
+	var nodeAddress string
+	err := d.cluster.Transaction(func(tx *db.ClusterTx) error {
+		var err error
+		nodeAddress, err = tx.ContainerNodeAddress(name)
+		return err
+	})
+	if err != nil {
+		return SmartError(err)
+	}
+	if nodeAddress != "" {
+		cert := d.endpoints.NetworkCert()
+		client, err := cluster.Connect(nodeAddress, cert, false)
+		if err != nil {
+			return SmartError(err)
+		}
+		op, err := client.DeleteContainer(name)
+		if err != nil {
+			return SmartError(err)
+		}
+		return ForwardedOperationResponse(&op.Operation)
+	}
+
 	c, err := containerLoadByName(d.State(), name)
 	if err != nil {
 		return SmartError(err)
diff --git a/test/suites/clustering.sh b/test/suites/clustering.sh
index 7cb6a3db7..aba729a4b 100644
--- a/test/suites/clustering.sh
+++ b/test/suites/clustering.sh
@@ -143,6 +143,11 @@ test_clustering_containers() {
   # A Node: field indicates on which node the container is running
   LXD_DIR="${LXD_ONE_DIR}" lxc info foo | grep -q "Node: node2"
 
+  # Delete the container via node1 and create it again.
+  LXD_DIR="${LXD_ONE_DIR}" lxc delete foo
+  ! LXD_DIR="${LXD_ONE_DIR}" lxc list | grep -q foo
+  LXD_DIR="${LXD_ONE_DIR}" lxc init --target node2 testimage foo
+
   # Start and stop the container via node1
   LXD_DIR="${LXD_ONE_DIR}" lxc start foo
   LXD_DIR="${LXD_TWO_DIR}" lxc info foo | grep -q "Status: Running"

From 0ebfb46b220e041dfe29156034c06bb8abff1c37 Mon Sep 17 00:00:00 2001
From: Free Ekanayaka <free.ekanayaka at canonical.com>
Date: Tue, 12 Dec 2017 08:55:59 +0000
Subject: [PATCH 116/116] Add StoragePool.Nodes field to the API and to lxc
 storage show

Signed-off-by: Free Ekanayaka <free.ekanayaka at canonical.com>
---
 lxd/db/storage_pools.go    | 25 +++++++++++++++++++++++++
 shared/api/storage_pool.go |  3 ++-
 test/suites/clustering.sh  |  2 ++
 3 files changed, 29 insertions(+), 1 deletion(-)

diff --git a/lxd/db/storage_pools.go b/lxd/db/storage_pools.go
index 943b08f90..7391c7418 100644
--- a/lxd/db/storage_pools.go
+++ b/lxd/db/storage_pools.go
@@ -363,9 +363,34 @@ func (c *Cluster) StoragePoolGet(poolName string) (int64, *api.StoragePool, erro
 		storagePool.State = "UNKNOWN"
 	}
 
+	nodes, err := c.storagePoolNodes(poolID)
+	if err != nil {
+		return -1, nil, err
+	}
+	storagePool.Nodes = nodes
+
 	return poolID, &storagePool, nil
 }
 
+// Return the names of the nodes the given pool is defined on.
+func (c *Cluster) storagePoolNodes(poolID int64) ([]string, error) {
+	stmt := `
+SELECT nodes.name FROM nodes
+  JOIN storage_pools_nodes ON storage_pools_nodes.node_id = nodes.id
+  WHERE storage_pools_nodes.storage_pool_id = ?
+`
+	var nodes []string
+	err := c.Transaction(func(tx *ClusterTx) error {
+		var err error
+		nodes, err = query.SelectStrings(tx.tx, stmt, poolID)
+		return err
+	})
+	if err != nil {
+		return nil, err
+	}
+	return nodes, nil
+}
+
 // Get config of a storage pool.
 func (c *Cluster) StoragePoolConfigGet(poolID int64) (map[string]string, error) {
 	var key, value string
diff --git a/shared/api/storage_pool.go b/shared/api/storage_pool.go
index ac5a2b3cb..614736b94 100644
--- a/shared/api/storage_pool.go
+++ b/shared/api/storage_pool.go
@@ -21,7 +21,8 @@ type StoragePool struct {
 	UsedBy []string `json:"used_by" yaml:"used_by"`
 
 	// API extension: clustering
-	State string `json:"state" yaml:"state"`
+	State string   `json:"state" yaml:"state"`
+	Nodes []string `json:"nodes" yaml:"nodes"`
 }
 
 // StoragePoolPut represents the modifiable fields of a LXD storage pool.
diff --git a/test/suites/clustering.sh b/test/suites/clustering.sh
index aba729a4b..1102a9c61 100644
--- a/test/suites/clustering.sh
+++ b/test/suites/clustering.sh
@@ -205,6 +205,8 @@ test_clustering_storage() {
 
   # Define storage pools on the two nodes
   LXD_DIR="${LXD_ONE_DIR}" lxc storage create pool1 dir --target node1
+  LXD_DIR="${LXD_TWO_DIR}" lxc storage show pool1 | grep -q node1
+  ! LXD_DIR="${LXD_TWO_DIR}" lxc storage show pool1 | grep -q node2
   LXD_DIR="${LXD_ONE_DIR}" lxc storage create pool1 dir --target node2
   LXD_DIR="${LXD_ONE_DIR}" lxc storage show pool1 | grep state: | grep -q PENDING
 


More information about the lxc-devel mailing list